diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index ec964d080..3cb12b7a8 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -24,8 +24,8 @@
},
{
"ImportPath": "github.com/minio/minio-go",
- "Comment": "v0.2.5-62-g61f6570",
- "Rev": "61f6570da0edd761974216c9ed5da485d3cc0c99"
+ "Comment": "v0.2.5-177-g691a38d",
+ "Rev": "691a38d161d6dfc0e8e78dc5360bc39f48a8626d"
},
{
"ImportPath": "github.com/pkg/sftp",
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/INSTALLGO.md b/Godeps/_workspace/src/github.com/minio/minio-go/INSTALLGO.md
new file mode 100644
index 000000000..c3762bbfc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/INSTALLGO.md
@@ -0,0 +1,83 @@
+## Ubuntu (Kylin) 14.04
+### Build Dependencies
+This installation guide is based on Ubuntu 14.04+ on x86-64 platform.
+
+##### Install Git, GCC
+```sh
+$ sudo apt-get install git build-essential
+```
+
+##### Install Go 1.5+
+
+Download Go 1.5+ from [https://golang.org/dl/](https://golang.org/dl/).
+
+```sh
+$ wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz
+$ mkdir -p ${HOME}/bin/
+$ mkdir -p ${HOME}/go/
+$ tar -C ${HOME}/bin/ -xzf go1.5.1.linux-amd64.tar.gz
+```
+##### Setup GOROOT and GOPATH
+
+Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries
+and GOPATH specifies the location of your project workspace.
+
+```sh
+export GOROOT=${HOME}/bin/go
+export GOPATH=${HOME}/go
+export PATH=$PATH:${HOME}/bin/go/bin:${GOPATH}/bin
+```
+```sh
+$ source ~/.bashrc
+```
+
+##### Testing it all
+```sh
+$ go env
+```
+
+## OS X (Yosemite) 10.10
+### Build Dependencies
+This installation document assumes OS X Yosemite 10.10+ on x86-64 platform.
+
+##### Install brew
+```sh
+$ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
+```
+
+##### Install Git, Python
+```sh
+$ brew install git python
+```
+
+##### Install Go 1.5+
+
+Install golang binaries using `brew`
+
+```sh
+$ brew install go
+$ mkdir -p $HOME/go
+```
+
+##### Setup GOROOT and GOPATH
+
+Add the following exports to your ``~/.bash_profile``. Environment variable GOROOT specifies the location of your golang binaries
+and GOPATH specifies the location of your project workspace.
+
+```sh
+export GOPATH=${HOME}/go
+export GOVERSION=$(brew list go | head -n 1 | cut -d '/' -f 6)
+export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec
+export PATH=$PATH:${GOPATH}/bin
+```
+
+##### Source the new enviornment
+
+```sh
+$ source ~/.bash_profile
+```
+
+##### Testing it all
+```sh
+$ go env
+```
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/MAINTAINERS.md b/Godeps/_workspace/src/github.com/minio/minio-go/MAINTAINERS.md
new file mode 100644
index 000000000..6dbef6265
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/MAINTAINERS.md
@@ -0,0 +1,19 @@
+# For maintainers only
+
+## Responsibilities
+
+Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
+
+### Making new releases
+
+Edit `libraryVersion` constant in `api.go`.
+
+```
+$ grep libraryVersion api.go
+ libraryVersion = "0.3.0"
+```
+
+```
+$ git tag 0.3.0
+$ git push --tags
+```
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/README.md b/Godeps/_workspace/src/github.com/minio/minio-go/README.md
index bda9123a5..5417d8f14 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/README.md
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/README.md
@@ -1,12 +1,35 @@
# Minio Go Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+## Description
+
+Minio Go library is a simple client library for S3 compatible cloud storage servers. Supports AWS Signature Version 4 and 2. AWS Signature Version 4 is chosen as default.
+
+List of supported cloud storage providers.
+
+ - AWS Signature Version 4
+ - Amazon S3
+ - Minio
+
+ - AWS Signature Version 2
+ - Google Cloud Storage (Compatibility Mode)
+ - Openstack Swift + Swift3 middleware
+ - Ceph Object Gateway
+ - Riak CS
+
## Install
+If you do not have a working Golang environment, please follow [Install Golang](./INSTALLGO.md).
+
```sh
$ go get github.com/minio/minio-go
```
+
## Example
+### ListBuckets()
+
+This example shows how to List your buckets.
+
```go
package main
@@ -17,47 +40,51 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false)
if err != nil {
log.Fatalln(err)
}
- for bucket := range s3Client.ListBuckets() {
- if bucket.Err != nil {
- log.Fatalln(bucket.Err)
- }
- log.Println(bucket.Stat)
+ buckets, err := s3Client.ListBuckets()
+ if err != nil {
+ log.Fatalln(err)
+ }
+ for _, bucket := range buckets {
+ log.Println(bucket)
}
}
```
## Documentation
-### Bucket Level
-* [MakeBucket(bucket, acl) error](examples/s3/makebucket.go)
-* [BucketExists(bucket) error](examples/s3/bucketexists.go)
-* [RemoveBucket(bucket) error](examples/s3/removebucket.go)
-* [GetBucketACL(bucket) (BucketACL, error)](examples/s3/getbucketacl.go)
-* [SetBucketACL(bucket, BucketACL) error)](examples/s3/setbucketacl.go)
-* [ListBuckets() <-chan BucketStat](examples/s3/listbuckets.go)
-* [ListObjects(bucket, prefix, recursive) <-chan ObjectStat](examples/s3/listobjects.go)
-* [ListIncompleteUploads(bucket, prefix, recursive) <-chan ObjectMultipartStat](examples/s3/listincompleteuploads.go)
+### Bucket Operations.
+* [MakeBucket(bucketName, BucketACL, location) error](examples/s3/makebucket.go)
+* [BucketExists(bucketName) error](examples/s3/bucketexists.go)
+* [RemoveBucket(bucketName) error](examples/s3/removebucket.go)
+* [GetBucketACL(bucketName) (BucketACL, error)](examples/s3/getbucketacl.go)
+* [SetBucketACL(bucketName, BucketACL) error)](examples/s3/setbucketacl.go)
+* [ListBuckets() []BucketStat](examples/s3/listbuckets.go)
+* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectStat](examples/s3/listobjects.go)
+* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartStat](examples/s3/listincompleteuploads.go)
-### Object Level
-* [PutObject(bucket, object, size, io.Reader) error](examples/s3/putobject.go)
-* [GetObject(bucket, object) (io.Reader, ObjectStat, error)](examples/s3/getobject.go)
-* [GetPartialObject(bucket, object, offset, length) (io.Reader, ObjectStat, error)](examples/s3/getpartialobject.go)
-* [StatObject(bucket, object) (ObjectStat, error)](examples/s3/statobject.go)
-* [RemoveObject(bucket, object) error](examples/s3/removeobject.go)
-* [RemoveIncompleteUpload(bucket, object) <-chan error](examples/s3/removeincompleteupload.go)
+### Object Operations.
+* [PutObject(bucketName, objectName, io.Reader, size, contentType) error](examples/s3/putobject.go)
+* [GetObject(bucketName, objectName) (io.ReadCloser, ObjectStat, error)](examples/s3/getobject.go)
+* [StatObject(bucketName, objectName) (ObjectStat, error)](examples/s3/statobject.go)
+* [RemoveObject(bucketName, objectName) error](examples/s3/removeobject.go)
+* [RemoveIncompleteUpload(bucketName, objectName) <-chan error](examples/s3/removeincompleteupload.go)
-### Presigned Bucket/Object Level
-* [PresignedGetObject(bucket, object, time.Duration) (string, error)](examples/s3/presignedgetobject.go)
-* [PresignedPutObject(bucket, object, time.Duration) (string, error)](examples/s3/presignedputobject.go)
+### File Object Operations.
+* [FPutObject(bucketName, objectName, filePath, contentType) (size, error)](examples/s3/fputobject.go)
+* [FGetObject(bucketName, objectName, filePath) error](examples/s3/fgetobject.go)
+
+### Presigned Operations.
+* [PresignedGetObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedgetobject.go)
+* [PresignedPutObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedputobject.go)
* [PresignedPostPolicy(NewPostPolicy()) (map[string]string, error)](examples/s3/presignedpostpolicy.go)
### API Reference
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-core.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-core.go
deleted file mode 100644
index 73fffbd29..000000000
--- a/Godeps/_workspace/src/github.com/minio/minio-go/api-core.go
+++ /dev/null
@@ -1,906 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/json"
- "encoding/xml"
- "fmt"
- "io"
- "net/http"
- "strconv"
- "strings"
- "time"
-)
-
-const (
- separator = "/"
-)
-
-// apiCore container to hold unexported internal functions
-type apiCore struct {
- config *Config
-}
-
-// closeResp close non nil response with any response Body
-func closeResp(resp *http.Response) {
- if resp != nil && resp.Body != nil {
- resp.Body.Close()
- }
-}
-
-// putBucketRequest wrapper creates a new putBucket request
-func (a apiCore) putBucketRequest(bucket, acl, location string) (*request, error) {
- var r *request
- var err error
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "PUT",
- HTTPPath: separator + bucket,
- }
- var createBucketConfigBuffer *bytes.Reader
- // If location is set use it and create proper bucket configuration
- switch {
- case location != "":
- createBucketConfig := new(createBucketConfiguration)
- createBucketConfig.Location = location
- var createBucketConfigBytes []byte
- switch {
- case a.config.AcceptType == "application/xml":
- createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
- case a.config.AcceptType == "application/json":
- createBucketConfigBytes, err = json.Marshal(createBucketConfig)
- default:
- createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
- }
- if err != nil {
- return nil, err
- }
- createBucketConfigBuffer = bytes.NewReader(createBucketConfigBytes)
- }
- switch {
- case createBucketConfigBuffer == nil:
- r, err = newRequest(op, a.config, nil)
- if err != nil {
- return nil, err
- }
- default:
- r, err = newRequest(op, a.config, createBucketConfigBuffer)
- if err != nil {
- return nil, err
- }
- r.req.ContentLength = int64(createBucketConfigBuffer.Len())
- }
- // by default bucket is private
- switch {
- case acl != "":
- r.Set("x-amz-acl", acl)
- default:
- r.Set("x-amz-acl", "private")
- }
-
- return r, nil
-}
-
-/// Bucket Write Operations
-
-// putBucket create a new bucket
-//
-// Requires valid AWS Access Key ID to authenticate requests
-// Anonymous requests are never allowed to create buckets
-//
-// optional arguments are acl and location - by default all buckets are created
-// with ``private`` acl and location set to US Standard if one wishes to set
-// different ACLs and Location one can set them properly.
-//
-// ACL valid values
-// ------------------
-// private - owner gets full access [DEFAULT]
-// public-read - owner gets full access, others get read access
-// public-read-write - owner gets full access, others get full access too
-// authenticated-read - owner gets full access, authenticated users get read access
-// ------------------
-//
-// Location valid values
-// ------------------
-// [ us-west-1 | us-west-2 | eu-west-1 | eu-central-1 | ap-southeast-1 | ap-northeast-1 | ap-southeast-2 | sa-east-1 ]
-//
-// Default - US standard
-func (a apiCore) putBucket(bucket, acl, location string) error {
- req, err := a.putBucketRequest(bucket, acl, location)
- if err != nil {
- return err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- return nil
-}
-
-// putBucketRequestACL wrapper creates a new putBucketACL request
-func (a apiCore) putBucketACLRequest(bucket, acl string) (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "PUT",
- HTTPPath: separator + bucket + "?acl",
- }
- req, err := newRequest(op, a.config, nil)
- if err != nil {
- return nil, err
- }
- req.Set("x-amz-acl", acl)
- return req, nil
-}
-
-// putBucketACL set the permissions on an existing bucket using Canned ACL's
-func (a apiCore) putBucketACL(bucket, acl string) error {
- req, err := a.putBucketACLRequest(bucket, acl)
- if err != nil {
- return err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- return nil
-}
-
-// getBucketACLRequest wrapper creates a new getBucketACL request
-func (a apiCore) getBucketACLRequest(bucket string) (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "GET",
- HTTPPath: separator + bucket + "?acl",
- }
- req, err := newRequest(op, a.config, nil)
- if err != nil {
- return nil, err
- }
- return req, nil
-}
-
-// getBucketACL get the acl information on an existing bucket
-func (a apiCore) getBucketACL(bucket string) (accessControlPolicy, error) {
- req, err := a.getBucketACLRequest(bucket)
- if err != nil {
- return accessControlPolicy{}, err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return accessControlPolicy{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return accessControlPolicy{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- policy := accessControlPolicy{}
- err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &policy)
- if err != nil {
- return accessControlPolicy{}, err
- }
- // In-case of google private bucket policy doesn't have any Grant list
- if a.config.Region == "google" {
- return policy, nil
- }
- if policy.AccessControlList.Grant == nil {
- errorResponse := ErrorResponse{
- Code: "InternalError",
- Message: "Access control Grant list is empty, please report this at https://github.com/minio/minio-go/issues.",
- Resource: separator + bucket,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- return accessControlPolicy{}, errorResponse
- }
- return policy, nil
-}
-
-// getBucketLocationRequest wrapper creates a new getBucketLocation request
-func (a apiCore) getBucketLocationRequest(bucket string) (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "GET",
- HTTPPath: separator + bucket + "?location",
- }
- req, err := newRequest(op, a.config, nil)
- if err != nil {
- return nil, err
- }
- return req, nil
-}
-
-// getBucketLocation uses location subresource to return a bucket's region
-func (a apiCore) getBucketLocation(bucket string) (string, error) {
- req, err := a.getBucketLocationRequest(bucket)
- if err != nil {
- return "", err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return "", err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return "", BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- var locationConstraint string
- err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &locationConstraint)
- if err != nil {
- return "", err
- }
- return locationConstraint, nil
-}
-
-// listObjectsRequest wrapper creates a new listObjects request
-func (a apiCore) listObjectsRequest(bucket, marker, prefix, delimiter string, maxkeys int) (*request, error) {
- // resourceQuery - get resources properly escaped and lined up before using them in http request
- resourceQuery := func() (*string, error) {
- switch {
- case marker != "":
- marker = fmt.Sprintf("&marker=%s", getURLEncodedPath(marker))
- fallthrough
- case prefix != "":
- prefix = fmt.Sprintf("&prefix=%s", getURLEncodedPath(prefix))
- fallthrough
- case delimiter != "":
- delimiter = fmt.Sprintf("&delimiter=%s", delimiter)
- }
- query := fmt.Sprintf("?max-keys=%d", maxkeys) + marker + prefix + delimiter
- return &query, nil
- }
- query, err := resourceQuery()
- if err != nil {
- return nil, err
- }
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "GET",
- HTTPPath: separator + bucket + *query,
- }
- r, err := newRequest(op, a.config, nil)
- if err != nil {
- return nil, err
- }
- return r, nil
-}
-
-/// Bucket Read Operations
-
-// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
-//
-// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
-// request paramters :-
-// ---------
-// ?marker - Specifies the key to start with when listing objects in a bucket.
-// ?delimiter - A delimiter is a character you use to group keys.
-// ?prefix - Limits the response to keys that begin with the specified prefix.
-// ?max-keys - Sets the maximum number of keys returned in the response body.
-func (a apiCore) listObjects(bucket, marker, prefix, delimiter string, maxkeys int) (listBucketResult, error) {
- if err := invalidBucketError(bucket); err != nil {
- return listBucketResult{}, err
- }
- req, err := a.listObjectsRequest(bucket, marker, prefix, delimiter, maxkeys)
- if err != nil {
- return listBucketResult{}, err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return listBucketResult{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return listBucketResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- listBucketResult := listBucketResult{}
- err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listBucketResult)
- if err != nil {
- return listBucketResult, err
- }
- // close body while returning, along with any error
- return listBucketResult, nil
-}
-
-// headBucketRequest wrapper creates a new headBucket request
-func (a apiCore) headBucketRequest(bucket string) (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "HEAD",
- HTTPPath: separator + bucket,
- }
- return newRequest(op, a.config, nil)
-}
-
-// headBucket useful to determine if a bucket exists and you have permission to access it.
-func (a apiCore) headBucket(bucket string) error {
- if err := invalidBucketError(bucket); err != nil {
- return err
- }
- req, err := a.headBucketRequest(bucket)
- if err != nil {
- return err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- // Head has no response body, handle it
- var errorResponse ErrorResponse
- switch resp.StatusCode {
- case http.StatusNotFound:
- errorResponse = ErrorResponse{
- Code: "NoSuchBucket",
- Message: "The specified bucket does not exist.",
- Resource: separator + bucket,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- case http.StatusForbidden:
- errorResponse = ErrorResponse{
- Code: "AccessDenied",
- Message: "Access Denied.",
- Resource: separator + bucket,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- default:
- errorResponse = ErrorResponse{
- Code: resp.Status,
- Message: resp.Status,
- Resource: separator + bucket,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- }
- return errorResponse
- }
- }
- return nil
-}
-
-// deleteBucketRequest wrapper creates a new deleteBucket request
-func (a apiCore) deleteBucketRequest(bucket string) (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "DELETE",
- HTTPPath: separator + bucket,
- }
- return newRequest(op, a.config, nil)
-}
-
-// deleteBucket deletes the bucket named in the URI
-//
-// NOTE: -
-// All objects (including all object versions and delete markers)
-// in the bucket must be deleted before successfully attempting this request
-func (a apiCore) deleteBucket(bucket string) error {
- if err := invalidBucketError(bucket); err != nil {
- return err
- }
- req, err := a.deleteBucketRequest(bucket)
- if err != nil {
- return err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusNoContent {
- var errorResponse ErrorResponse
- switch resp.StatusCode {
- case http.StatusNotFound:
- errorResponse = ErrorResponse{
- Code: "NoSuchBucket",
- Message: "The specified bucket does not exist.",
- Resource: separator + bucket,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- case http.StatusForbidden:
- errorResponse = ErrorResponse{
- Code: "AccessDenied",
- Message: "Access Denied.",
- Resource: separator + bucket,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- case http.StatusConflict:
- errorResponse = ErrorResponse{
- Code: "Conflict",
- Message: "Bucket not empty.",
- Resource: separator + bucket,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- default:
- errorResponse = ErrorResponse{
- Code: resp.Status,
- Message: resp.Status,
- Resource: separator + bucket,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- }
- return errorResponse
- }
- }
- return nil
-}
-
-/// Object Read/Write/Stat Operations
-
-func (a apiCore) putObjectUnAuthenticatedRequest(bucket, object, contentType string, size int64, body io.Reader) (*request, error) {
- if strings.TrimSpace(contentType) == "" {
- contentType = "application/octet-stream"
- }
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "PUT",
- HTTPPath: separator + bucket + separator + object,
- }
- r, err := newUnauthenticatedRequest(op, a.config, body)
- if err != nil {
- return nil, err
- }
- // Content-MD5 is not set consciously
- r.Set("Content-Type", contentType)
- r.req.ContentLength = size
- return r, nil
-}
-
-// putObjectUnAuthenticated - add an object to a bucket
-// NOTE: You must have WRITE permissions on a bucket to add an object to it.
-func (a apiCore) putObjectUnAuthenticated(bucket, object, contentType string, size int64, body io.Reader) (ObjectStat, error) {
- req, err := a.putObjectUnAuthenticatedRequest(bucket, object, contentType, size, body)
- if err != nil {
- return ObjectStat{}, err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return ObjectStat{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return ObjectStat{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- var metadata ObjectStat
- metadata.ETag = strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes
- return metadata, nil
-}
-
-// putObjectRequest wrapper creates a new PutObject request
-func (a apiCore) putObjectRequest(bucket, object, contentType string, md5SumBytes []byte, size int64, body io.ReadSeeker) (*request, error) {
- if strings.TrimSpace(contentType) == "" {
- contentType = "application/octet-stream"
- }
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "PUT",
- HTTPPath: separator + bucket + separator + object,
- }
- r, err := newRequest(op, a.config, body)
- if err != nil {
- return nil, err
- }
- // set Content-MD5 as base64 encoded md5
- if md5SumBytes != nil {
- r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes))
- }
- r.Set("Content-Type", contentType)
- r.req.ContentLength = size
- return r, nil
-}
-
-// putObject - add an object to a bucket
-// NOTE: You must have WRITE permissions on a bucket to add an object to it.
-func (a apiCore) putObject(bucket, object, contentType string, md5SumBytes []byte, size int64, body io.ReadSeeker) (ObjectStat, error) {
- req, err := a.putObjectRequest(bucket, object, contentType, md5SumBytes, size, body)
- if err != nil {
- return ObjectStat{}, err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return ObjectStat{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return ObjectStat{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- var metadata ObjectStat
- metadata.ETag = strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes
- return metadata, nil
-}
-
-func (a apiCore) presignedPostPolicy(p *PostPolicy) map[string]string {
- t := time.Now().UTC()
- r := new(request)
- r.config = a.config
- if r.config.Signature.isV2() {
- policyBase64 := p.base64()
- p.formData["policy"] = policyBase64
- p.formData["AWSAccessKeyId"] = r.config.AccessKeyID
- p.formData["signature"] = r.PostPresignSignatureV2(policyBase64)
- return p.formData
- }
- credential := getCredential(r.config.AccessKeyID, r.config.Region, t)
- p.addNewPolicy(policy{"eq", "$x-amz-date", t.Format(iso8601DateFormat)})
- p.addNewPolicy(policy{"eq", "$x-amz-algorithm", authHeader})
- p.addNewPolicy(policy{"eq", "$x-amz-credential", credential})
-
- policyBase64 := p.base64()
- p.formData["policy"] = policyBase64
- p.formData["x-amz-algorithm"] = authHeader
- p.formData["x-amz-credential"] = credential
- p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
- p.formData["x-amz-signature"] = r.PostPresignSignatureV4(policyBase64, t)
- return p.formData
-}
-
-func (a apiCore) presignedPutObject(bucket, object string, expires int64) (string, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "PUT",
- HTTPPath: separator + bucket + separator + object,
- }
- r, err := newPresignedRequest(op, a.config, expires)
- if err != nil {
- return "", err
- }
- if r.config.Signature.isV2() {
- return r.PreSignV2()
- }
- return r.PreSignV4()
-}
-
-func (a apiCore) presignedGetObjectRequest(bucket, object string, expires, offset, length int64) (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "GET",
- HTTPPath: separator + bucket + separator + object,
- }
- r, err := newPresignedRequest(op, a.config, expires)
- if err != nil {
- return nil, err
- }
- switch {
- case length > 0 && offset > 0:
- r.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
- case offset > 0 && length == 0:
- r.Set("Range", fmt.Sprintf("bytes=%d-", offset))
- case length > 0 && offset == 0:
- r.Set("Range", fmt.Sprintf("bytes=-%d", length))
- }
- return r, nil
-}
-
-func (a apiCore) presignedGetObject(bucket, object string, expires, offset, length int64) (string, error) {
- if err := invalidArgumentError(object); err != nil {
- return "", err
- }
- r, err := a.presignedGetObjectRequest(bucket, object, expires, offset, length)
- if err != nil {
- return "", err
- }
- if r.config.Signature.isV2() {
- return r.PreSignV2()
- }
- return r.PreSignV4()
-}
-
-// getObjectRequest wrapper creates a new getObject request
-func (a apiCore) getObjectRequest(bucket, object string, offset, length int64) (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "GET",
- HTTPPath: separator + bucket + separator + object,
- }
- r, err := newRequest(op, a.config, nil)
- if err != nil {
- return nil, err
- }
- switch {
- case length > 0 && offset >= 0:
- r.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
- case offset > 0 && length == 0:
- r.Set("Range", fmt.Sprintf("bytes=%d-", offset))
- // The final length bytes
- case length < 0 && offset == 0:
- r.Set("Range", fmt.Sprintf("bytes=%d", length))
- }
- return r, nil
-}
-
-// getObject - retrieve object from Object Storage
-//
-// Additionally this function also takes range arguments to download the specified
-// range bytes of an object. Setting offset and length = 0 will download the full object.
-//
-// For more information about the HTTP Range header.
-// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
-func (a apiCore) getObject(bucket, object string, offset, length int64) (io.ReadCloser, ObjectStat, error) {
- if err := invalidArgumentError(object); err != nil {
- return nil, ObjectStat{}, err
- }
- req, err := a.getObjectRequest(bucket, object, offset, length)
- if err != nil {
- return nil, ObjectStat{}, err
- }
- resp, err := req.Do()
- if err != nil {
- return nil, ObjectStat{}, err
- }
- if resp != nil {
- switch resp.StatusCode {
- case http.StatusOK:
- case http.StatusPartialContent:
- default:
- return nil, ObjectStat{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes
- date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
- if err != nil {
- return nil, ObjectStat{}, ErrorResponse{
- Code: "InternalError",
- Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues.",
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- }
- contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
- if contentType == "" {
- contentType = "application/octet-stream"
- }
- var objectstat ObjectStat
- objectstat.ETag = md5sum
- objectstat.Key = object
- objectstat.Size = resp.ContentLength
- objectstat.LastModified = date
- objectstat.ContentType = contentType
-
- // do not close body here, caller will close
- return resp.Body, objectstat, nil
-}
-
-// deleteObjectRequest wrapper creates a new deleteObject request
-func (a apiCore) deleteObjectRequest(bucket, object string) (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "DELETE",
- HTTPPath: separator + bucket + separator + object,
- }
- return newRequest(op, a.config, nil)
-}
-
-// deleteObject deletes a given object from a bucket
-func (a apiCore) deleteObject(bucket, object string) error {
- if err := invalidBucketError(bucket); err != nil {
- return err
- }
- if err := invalidArgumentError(object); err != nil {
- return err
- }
- req, err := a.deleteObjectRequest(bucket, object)
- if err != nil {
- return err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusNoContent {
- var errorResponse ErrorResponse
- switch resp.StatusCode {
- case http.StatusNotFound:
- errorResponse = ErrorResponse{
- Code: "NoSuchKey",
- Message: "The specified key does not exist.",
- Resource: separator + bucket + separator + object,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- case http.StatusForbidden:
- errorResponse = ErrorResponse{
- Code: "AccessDenied",
- Message: "Access Denied.",
- Resource: separator + bucket + separator + object,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- default:
- errorResponse = ErrorResponse{
- Code: resp.Status,
- Message: resp.Status,
- Resource: separator + bucket + separator + object,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- }
- return errorResponse
- }
- }
- return nil
-}
-
-// headObjectRequest wrapper creates a new headObject request
-func (a apiCore) headObjectRequest(bucket, object string) (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "HEAD",
- HTTPPath: separator + bucket + separator + object,
- }
- return newRequest(op, a.config, nil)
-}
-
-// headObject retrieves metadata from an object without returning the object itself
-func (a apiCore) headObject(bucket, object string) (ObjectStat, error) {
- if err := invalidBucketError(bucket); err != nil {
- return ObjectStat{}, err
- }
- if err := invalidArgumentError(object); err != nil {
- return ObjectStat{}, err
- }
- req, err := a.headObjectRequest(bucket, object)
- if err != nil {
- return ObjectStat{}, err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return ObjectStat{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- var errorResponse ErrorResponse
- switch resp.StatusCode {
- case http.StatusNotFound:
- errorResponse = ErrorResponse{
- Code: "NoSuchKey",
- Message: "The specified key does not exist.",
- Resource: separator + bucket + separator + object,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- case http.StatusForbidden:
- errorResponse = ErrorResponse{
- Code: "AccessDenied",
- Message: "Access Denied.",
- Resource: separator + bucket + separator + object,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- default:
- errorResponse = ErrorResponse{
- Code: resp.Status,
- Message: resp.Status,
- Resource: separator + bucket + separator + object,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
-
- }
- return ObjectStat{}, errorResponse
- }
- }
- md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes
- size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
- if err != nil {
- return ObjectStat{}, ErrorResponse{
- Code: "InternalError",
- Message: "Content-Length not recognized, please report this issue at https://github.com/minio/minio-go/issues.",
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- }
- date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
- if err != nil {
- return ObjectStat{}, ErrorResponse{
- Code: "InternalError",
- Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues.",
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- }
- contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
- if contentType == "" {
- contentType = "application/octet-stream"
- }
-
- var objectstat ObjectStat
- objectstat.ETag = md5sum
- objectstat.Key = object
- objectstat.Size = size
- objectstat.LastModified = date
- objectstat.ContentType = contentType
- return objectstat, nil
-}
-
-/// Service Operations
-
-// listBucketRequest wrapper creates a new listBuckets request
-func (a apiCore) listBucketsRequest() (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "GET",
- HTTPPath: separator,
- }
- return newRequest(op, a.config, nil)
-}
-
-// listBuckets list of all buckets owned by the authenticated sender of the request
-func (a apiCore) listBuckets() (listAllMyBucketsResult, error) {
- req, err := a.listBucketsRequest()
- if err != nil {
- return listAllMyBucketsResult{}, err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return listAllMyBucketsResult{}, err
- }
- if resp != nil {
- // for un-authenticated requests, amazon sends a redirect handle it
- if resp.StatusCode == http.StatusTemporaryRedirect {
- return listAllMyBucketsResult{}, ErrorResponse{
- Code: "AccessDenied",
- Message: "Anonymous access is forbidden for this operation.",
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- }
- }
- if resp.StatusCode != http.StatusOK {
- return listAllMyBucketsResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- listAllMyBucketsResult := listAllMyBucketsResult{}
- err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listAllMyBucketsResult)
- if err != nil {
- return listAllMyBucketsResult, err
- }
- return listAllMyBucketsResult, nil
-}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go
new file mode 100644
index 000000000..7667645a1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-definitions.go
@@ -0,0 +1,93 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "io"
+ "time"
+)
+
+// BucketStat container for bucket metadata.
+type BucketStat struct {
+ // The name of the bucket.
+ Name string
+ // Date the bucket was created.
+ CreationDate time.Time
+}
+
+// ObjectStat container for object metadata.
+type ObjectStat struct {
+ ETag string
+ Key string
+ LastModified time.Time
+ Size int64
+ ContentType string
+
+ // Owner name.
+ Owner struct {
+ DisplayName string
+ ID string
+ }
+
+ // The class of storage used to store the object.
+ StorageClass string
+
+ // Error
+ Err error
+}
+
+// ObjectMultipartStat container for multipart object metadata.
+type ObjectMultipartStat struct {
+ // Date and time at which the multipart upload was initiated.
+ Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ Initiator initiator
+ Owner owner
+
+ StorageClass string
+
+ // Key of the object for which the multipart upload was initiated.
+ Key string
+ Size int64
+
+ // Upload ID that identifies the multipart upload.
+ UploadID string `xml:"UploadId"`
+
+ // Error
+ Err error
+}
+
+// partMetadata - container for each partMetadata.
+type partMetadata struct {
+ MD5Sum []byte
+ Sha256Sum []byte
+ ReadCloser io.ReadCloser
+ Size int64
+ Number int // partMetadata number.
+
+ // Error
+ Err error
+}
+
+// putObjectMetadata - container for each single PUT operation.
+type putObjectMetadata struct {
+ MD5Sum []byte
+ Sha256Sum []byte
+ ReadCloser io.ReadCloser
+ Size int64
+ ContentType string
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go
new file mode 100644
index 000000000..0d2496507
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-error-response.go
@@ -0,0 +1,232 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "strconv"
+)
+
+/* **** SAMPLE ERROR RESPONSE ****
+
+
+ AccessDenied
+ Access Denied
+ bucketName
+ objectName
+ F19772218238A85A
+ GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD
+
+*/
+
+// ErrorResponse is the type error returned by some API operations.
+type ErrorResponse struct {
+ XMLName xml.Name `xml:"Error" json:"-"`
+ Code string
+ Message string
+ BucketName string
+ Key string
+ RequestID string `xml:"RequestId"`
+ HostID string `xml:"HostId"`
+
+ // This is a new undocumented field, set only if available.
+ AmzBucketRegion string
+}
+
+// ToErrorResponse returns parsed ErrorResponse struct, if input is nil or not ErrorResponse return value is nil
+// this fuction is useful when some one wants to dig deeper into the error structures over the network.
+//
+// For example:
+//
+// import s3 "github.com/minio/minio-go"
+// ...
+// ...
+// reader, stat, err := s3.GetObject(...)
+// if err != nil {
+// resp := s3.ToErrorResponse(err)
+// fmt.Println(resp.ToXML())
+// }
+// ...
+func ToErrorResponse(err error) ErrorResponse {
+ switch err := err.(type) {
+ case ErrorResponse:
+ return err
+ default:
+ return ErrorResponse{}
+ }
+}
+
+// ToXML send raw xml marshalled as string
+func (e ErrorResponse) ToXML() string {
+ b, err := xml.Marshal(&e)
+ if err != nil {
+ panic(err)
+ }
+ return string(b)
+}
+
+// ToJSON send raw json marshalled as string
+func (e ErrorResponse) ToJSON() string {
+ b, err := json.Marshal(&e)
+ if err != nil {
+ panic(err)
+ }
+ return string(b)
+}
+
+// Error formats HTTP error string
+func (e ErrorResponse) Error() string {
+ return e.Message
+}
+
+// Common reporting string
+const (
+ reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
+)
+
+// HTTPRespToErrorResponse returns a new encoded ErrorResponse structure
+func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
+ if resp == nil {
+ msg := "Response is empty. " + reportIssue
+ return ErrInvalidArgument(msg)
+ }
+ var errorResponse ErrorResponse
+ err := xmlDecoder(resp.Body, &errorResponse)
+ if err != nil {
+ switch resp.StatusCode {
+ case http.StatusNotFound:
+ if objectName == "" {
+ errorResponse = ErrorResponse{
+ Code: "NoSuchBucket",
+ Message: "The specified bucket does not exist.",
+ BucketName: bucketName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ }
+ } else {
+ errorResponse = ErrorResponse{
+ Code: "NoSuchKey",
+ Message: "The specified key does not exist.",
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ }
+ }
+ case http.StatusForbidden:
+ errorResponse = ErrorResponse{
+ Code: "AccessDenied",
+ Message: "Access Denied.",
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ }
+ case http.StatusConflict:
+ errorResponse = ErrorResponse{
+ Code: "Conflict",
+ Message: "Bucket not empty.",
+ BucketName: bucketName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ }
+ default:
+ errorResponse = ErrorResponse{
+ Code: resp.Status,
+ Message: resp.Status,
+ BucketName: bucketName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ }
+ }
+ }
+ return errorResponse
+}
+
+// ErrEntityTooLarge input size is larger than supported maximum.
+func ErrEntityTooLarge(totalSize int64, bucketName, objectName string) error {
+ msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size '5GiB' for single PUT operation.", totalSize)
+ return ErrorResponse{
+ Code: "EntityTooLarge",
+ Message: msg,
+ BucketName: bucketName,
+ Key: objectName,
+ }
+}
+
+// ErrUnexpectedShortRead unexpected shorter read of input buffer from target.
+func ErrUnexpectedShortRead(totalRead, totalSize int64, bucketName, objectName string) error {
+ msg := fmt.Sprintf("Data read ‘%s’ is shorter than the size ‘%s’ of input buffer.",
+ strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
+ return ErrorResponse{
+ Code: "UnexpectedShortRead",
+ Message: msg,
+ BucketName: bucketName,
+ Key: objectName,
+ }
+}
+
+// ErrUnexpectedEOF unexpected end of file reached.
+func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
+ msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.",
+ strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
+ return ErrorResponse{
+ Code: "UnexpectedEOF",
+ Message: msg,
+ BucketName: bucketName,
+ Key: objectName,
+ }
+}
+
+// ErrInvalidBucketName - invalid bucket name response.
+func ErrInvalidBucketName(message string) error {
+ return ErrorResponse{
+ Code: "InvalidBucketName",
+ Message: message,
+ RequestID: "minio",
+ }
+}
+
+// ErrInvalidObjectName - invalid object name response.
+func ErrInvalidObjectName(message string) error {
+ return ErrorResponse{
+ Code: "NoSuchKey",
+ Message: message,
+ RequestID: "minio",
+ }
+}
+
+// ErrInvalidObjectPrefix - invalid object prefix response is
+// similar to object name response.
+var ErrInvalidObjectPrefix = ErrInvalidObjectName
+
+// ErrInvalidArgument - invalid argument response.
+func ErrInvalidArgument(message string) error {
+ return ErrorResponse{
+ Code: "InvalidArgument",
+ Message: message,
+ RequestID: "minio",
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-fget-object.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-fget-object.go
new file mode 100644
index 000000000..ee96a6cb9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-fget-object.go
@@ -0,0 +1,102 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+)
+
+// FGetObject - get object to a file.
+func (c Client) FGetObject(bucketName, objectName, filePath string) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return err
+ }
+
+ // Verify if destination already exists.
+ st, err := os.Stat(filePath)
+ if err == nil {
+ // If the destination exists and is a directory.
+ if st.IsDir() {
+ return ErrInvalidArgument("fileName is a directory.")
+ }
+ }
+
+ // Proceed if file does not exist. return for all other errors.
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ }
+
+ // Extract top level direcotry.
+ objectDir, _ := filepath.Split(filePath)
+ if objectDir != "" {
+ // Create any missing top level directories.
+ if err := os.MkdirAll(objectDir, 0700); err != nil {
+ return err
+ }
+ }
+
+ // Gather md5sum.
+ objectStat, err := c.StatObject(bucketName, objectName)
+ if err != nil {
+ return err
+ }
+
+ // Write to a temporary file "fileName.part.minio" before saving.
+ filePartPath := filePath + objectStat.ETag + ".part.minio"
+
+ // If exists, open in append mode. If not create it as a part file.
+ filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
+ if err != nil {
+ return err
+ }
+
+ // Issue Stat to get the current offset.
+ st, err = filePart.Stat()
+ if err != nil {
+ return err
+ }
+
+ // Seek to current position for incoming reader.
+ objectReader, objectStat, err := c.getObject(bucketName, objectName, st.Size(), 0)
+ if err != nil {
+ return err
+ }
+
+ // Write to the part file.
+ if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil {
+ return err
+ }
+
+ // Close the file before rename, this is specifically needed for Windows users.
+ filePart.Close()
+
+ // Safely completed. Now commit by renaming to actual filename.
+ if err = os.Rename(filePartPath, filePath); err != nil {
+ return err
+ }
+
+ // Return.
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go
new file mode 100644
index 000000000..059710038
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-fput-object.go
@@ -0,0 +1,281 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "os"
+ "sort"
+)
+
+// getUploadID if already present for object name or initiate a request to fetch a new upload id.
+func (c Client) getUploadID(bucketName, objectName, contentType string) (string, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return "", err
+ }
+
+ // Set content Type to default if empty string.
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+
+ // Find upload id for previous upload for an object.
+ uploadID, err := c.findUploadID(bucketName, objectName)
+ if err != nil {
+ return "", err
+ }
+ if uploadID == "" {
+ // Initiate multipart upload for an object.
+ initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType)
+ if err != nil {
+ return "", err
+ }
+ // Save the new upload id.
+ uploadID = initMultipartUploadResult.UploadID
+ }
+ return uploadID, nil
+}
+
+// FPutObject - put object a file.
+func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (int64, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Open the referenced file.
+ fileData, err := os.Open(filePath)
+ // If any error fail quickly here.
+ if err != nil {
+ return 0, err
+ }
+ defer fileData.Close()
+
+ // Save the file stat.
+ fileStat, err := fileData.Stat()
+ if err != nil {
+ return 0, err
+ }
+
+ // Save the file size.
+ fileSize := fileStat.Size()
+ if fileSize > int64(maxMultipartPutObjectSize) {
+ return 0, ErrInvalidArgument("Input file size is bigger than the supported maximum of 5TiB.")
+ }
+
+ // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
+ // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
+ if isGoogleEndpoint(c.endpointURL) {
+ if fileSize <= -1 || fileSize > int64(maxSinglePutObjectSize) {
+ return 0, ErrorResponse{
+ Code: "NotImplemented",
+ Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize),
+ Key: objectName,
+ BucketName: bucketName,
+ }
+ }
+ // Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size.
+ n, err := c.putNoChecksum(bucketName, objectName, fileData, fileSize, contentType)
+ return n, err
+ }
+
+ // NOTE: S3 doesn't allow anonymous multipart requests.
+ if isAmazonEndpoint(c.endpointURL) && c.anonymous {
+ if fileSize <= -1 || fileSize > int64(maxSinglePutObjectSize) {
+ return 0, ErrorResponse{
+ Code: "NotImplemented",
+ Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize),
+ Key: objectName,
+ BucketName: bucketName,
+ }
+ }
+ // Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size.
+ n, err := c.putAnonymous(bucketName, objectName, fileData, fileSize, contentType)
+ return n, err
+ }
+
+ // Large file upload is initiated for uploads for input data size
+ // if its greater than 5MiB or data size is negative.
+ if fileSize >= minimumPartSize || fileSize < 0 {
+ n, err := c.fputLargeObject(bucketName, objectName, fileData, fileSize, contentType)
+ return n, err
+ }
+ n, err := c.putSmallObject(bucketName, objectName, fileData, fileSize, contentType)
+ return n, err
+}
+
+// computeHash - calculates MD5 and Sha256 for an input read Seeker.
+func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) {
+ // MD5 and Sha256 hasher.
+ var hashMD5, hashSha256 hash.Hash
+ // MD5 and Sha256 hasher.
+ hashMD5 = md5.New()
+ hashWriter := io.MultiWriter(hashMD5)
+ if c.signature.isV4() {
+ hashSha256 = sha256.New()
+ hashWriter = io.MultiWriter(hashMD5, hashSha256)
+ }
+
+ size, err = io.Copy(hashWriter, reader)
+ if err != nil {
+ return nil, nil, 0, err
+ }
+
+ // Seek back reader to the beginning location.
+ if _, err := reader.Seek(0, 0); err != nil {
+ return nil, nil, 0, err
+ }
+
+ // Finalize md5shum and sha256 sum.
+ md5Sum = hashMD5.Sum(nil)
+ if c.signature.isV4() {
+ sha256Sum = hashSha256.Sum(nil)
+ }
+ return md5Sum, sha256Sum, size, nil
+}
+
+func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File, fileSize int64, contentType string) (int64, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // getUploadID for an object, initiates a new multipart request
+ // if it cannot find any previously partially uploaded object.
+ uploadID, err := c.getUploadID(bucketName, objectName, contentType)
+ if err != nil {
+ return 0, err
+ }
+
+ // total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var completeMultipartUpload completeMultipartUpload
+
+ // Fetch previously upload parts and save the total size.
+ partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
+ if err != nil {
+ return 0, err
+ }
+ // Previous maximum part size
+ var prevMaxPartSize int64
+ // Loop through all parts and calculate totalUploadedSize.
+ for _, partInfo := range partsInfo {
+ totalUploadedSize += partInfo.Size
+ // Choose the maximum part size.
+ if partInfo.Size >= prevMaxPartSize {
+ prevMaxPartSize = partInfo.Size
+ }
+ }
+
+ // Calculate the optimal part size for a given file size.
+ partSize := optimalPartSize(fileSize)
+ // If prevMaxPartSize is set use that.
+ if prevMaxPartSize != 0 {
+ partSize = prevMaxPartSize
+ }
+
+ // Part number always starts with '1'.
+ partNumber := 1
+
+ // Loop through until EOF.
+ for totalUploadedSize < fileSize {
+ // Get a section reader on a particular offset.
+ sectionReader := io.NewSectionReader(fileData, totalUploadedSize, partSize)
+
+ // Calculates MD5 and Sha256 sum for a section reader.
+ md5Sum, sha256Sum, size, err := c.computeHash(sectionReader)
+ if err != nil {
+ return 0, err
+ }
+
+ // Save all the part metadata.
+ partMdata := partMetadata{
+ ReadCloser: ioutil.NopCloser(sectionReader),
+ Size: size,
+ MD5Sum: md5Sum,
+ Sha256Sum: sha256Sum,
+ Number: partNumber, // Part number to be uploaded.
+ }
+
+ // If part number already uploaded, move to the next one.
+ if isPartUploaded(objectPart{
+ ETag: hex.EncodeToString(partMdata.MD5Sum),
+ PartNumber: partMdata.Number,
+ }, partsInfo) {
+ // Close the read closer.
+ partMdata.ReadCloser.Close()
+ continue
+ }
+
+ // Upload the part.
+ objPart, err := c.uploadPart(bucketName, objectName, uploadID, partMdata)
+ if err != nil {
+ partMdata.ReadCloser.Close()
+ return totalUploadedSize, err
+ }
+
+ // Save successfully uploaded size.
+ totalUploadedSize += partMdata.Size
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partMdata.Number] = objPart
+
+ // Increment to next part number.
+ partNumber++
+ }
+
+ // if totalUploadedSize is different than the file 'size'. Do not complete the request throw an error.
+ if totalUploadedSize != fileSize {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
+ }
+
+ // Loop over uploaded parts to save them in a Parts array before completing the multipart request.
+ for _, part := range partsInfo {
+ var complPart completePart
+ complPart.ETag = part.ETag
+ complPart.PartNumber = part.PartNumber
+ completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(completeMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go
new file mode 100644
index 000000000..b331fb44c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-get.go
@@ -0,0 +1,379 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+)
+
+// GetBucketACL get the permissions on an existing bucket.
+//
+// Returned values are:
+//
+// private - owner gets full access.
+// public-read - owner gets full access, others get read access.
+// public-read-write - owner gets full access, others get full access too.
+// authenticated-read - owner gets full access, authenticated users get read access.
+func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
+ if err := isValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+
+ // Set acl query.
+ urlValues := make(url.Values)
+ urlValues.Set("acl", "")
+
+ // Instantiate a new request.
+ req, err := c.newRequest("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+ if err != nil {
+ return "", err
+ }
+
+ // Initiate the request.
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return "", err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return "", HTTPRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Decode access control policy.
+ policy := accessControlPolicy{}
+ err = xmlDecoder(resp.Body, &policy)
+ if err != nil {
+ return "", err
+ }
+
+ // We need to avoid following de-serialization check for Google Cloud Storage.
+ // On Google Cloud Storage "private" canned ACL's policy do not have grant list.
+ // Treat it as a valid case, check for all other vendors.
+ if !isGoogleEndpoint(c.endpointURL) {
+ if policy.AccessControlList.Grant == nil {
+ errorResponse := ErrorResponse{
+ Code: "InternalError",
+ Message: "Access control Grant list is empty. " + reportIssue,
+ BucketName: bucketName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ }
+ return "", errorResponse
+ }
+ }
+
+ // boolean cues to indentify right canned acls.
+ var publicRead, publicWrite bool
+
+ // Handle grants.
+ grants := policy.AccessControlList.Grant
+ for _, g := range grants {
+ if g.Grantee.URI == "" && g.Permission == "FULL_CONTROL" {
+ continue
+ }
+ if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
+ return BucketACL("authenticated-read"), nil
+ } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
+ publicWrite = true
+ } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
+ publicRead = true
+ }
+ }
+
+ // public write and not enabled. return.
+ if !publicWrite && !publicRead {
+ return BucketACL("private"), nil
+ }
+ // public write not enabled but public read is. return.
+ if !publicWrite && publicRead {
+ return BucketACL("public-read"), nil
+ }
+ // public read and public write are enabled return.
+ if publicRead && publicWrite {
+ return BucketACL("public-read-write"), nil
+ }
+
+ return "", ErrorResponse{
+ Code: "NoSuchBucketPolicy",
+ Message: "The specified bucket does not have a bucket policy.",
+ BucketName: bucketName,
+ RequestID: "minio",
+ }
+}
+
+// GetObject gets object content from specified bucket.
+// You may also look at GetPartialObject.
+func (c Client) GetObject(bucketName, objectName string) (io.ReadCloser, ObjectStat, error) {
+ if err := isValidBucketName(bucketName); err != nil {
+ return nil, ObjectStat{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return nil, ObjectStat{}, err
+ }
+ // get the whole object as a stream, no seek or resume supported for this.
+ return c.getObject(bucketName, objectName, 0, 0)
+}
+
+// ReadAtCloser readat closer interface.
+type ReadAtCloser interface {
+ io.ReaderAt
+ io.Closer
+}
+
+// GetObjectPartial returns a io.ReadAt for reading sparse entries.
+func (c Client) GetObjectPartial(bucketName, objectName string) (ReadAtCloser, ObjectStat, error) {
+ if err := isValidBucketName(bucketName); err != nil {
+ return nil, ObjectStat{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return nil, ObjectStat{}, err
+ }
+ // Send an explicit stat to get the actual object size.
+ objectStat, err := c.StatObject(bucketName, objectName)
+ if err != nil {
+ return nil, ObjectStat{}, err
+ }
+
+ // Create request channel.
+ reqCh := make(chan readAtRequest)
+ // Create response channel.
+ resCh := make(chan readAtResponse)
+ // Create done channel.
+ doneCh := make(chan struct{})
+
+ // This routine feeds partial object data as and when the caller reads.
+ go func() {
+ defer close(reqCh)
+ defer close(resCh)
+
+ // Loop through the incoming control messages and read data.
+ for {
+ select {
+ // When the done channel is closed exit our routine.
+ case <-doneCh:
+ return
+ // Request message.
+ case req := <-reqCh:
+ // Get shortest length.
+ // NOTE: Last remaining bytes are usually smaller than
+ // req.Buffer size. Use that as the final length.
+ length := math.Min(float64(len(req.Buffer)), float64(objectStat.Size-req.Offset))
+ httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length))
+ if err != nil {
+ resCh <- readAtResponse{
+ Error: err,
+ }
+ return
+ }
+ size, err := httpReader.Read(req.Buffer)
+ resCh <- readAtResponse{
+ Size: size,
+ Error: err,
+ }
+ }
+ }
+ }()
+ // Return the readerAt backed by routine.
+ return newObjectReadAtCloser(reqCh, resCh, doneCh, objectStat.Size), objectStat, nil
+}
+
+// response message container to reply back for the request.
+type readAtResponse struct {
+ Size int
+ Error error
+}
+
+// request message container to communicate with internal go-routine.
+type readAtRequest struct {
+ Buffer []byte // requested bytes.
+ Offset int64 // readAt offset.
+}
+
+// objectReadAtCloser container for io.ReadAtCloser.
+type objectReadAtCloser struct {
+ // mutex.
+ mutex *sync.Mutex
+
+ // User allocated and defined.
+ reqCh chan<- readAtRequest
+ resCh <-chan readAtResponse
+ doneCh chan<- struct{}
+ objectSize int64
+
+ // Previous error saved for future calls.
+ prevErr error
+}
+
+// newObjectReadAtCloser implements a io.ReadSeeker for a HTTP stream.
+func newObjectReadAtCloser(reqCh chan<- readAtRequest, resCh <-chan readAtResponse, doneCh chan<- struct{}, objectSize int64) *objectReadAtCloser {
+ return &objectReadAtCloser{
+ mutex: new(sync.Mutex),
+ reqCh: reqCh,
+ resCh: resCh,
+ doneCh: doneCh,
+ objectSize: objectSize,
+ }
+}
+
+// ReadAt reads len(b) bytes from the File starting at byte offset off.
+// It returns the number of bytes read and the error, if any.
+// ReadAt always returns a non-nil error when n < len(b).
+// At end of file, that error is io.EOF.
+func (r *objectReadAtCloser) ReadAt(p []byte, offset int64) (int, error) {
+ // Locking.
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+
+ // prevErr is which was saved in previous operation.
+ if r.prevErr != nil {
+ return 0, r.prevErr
+ }
+
+ // Send current information over control channel to indicate we are ready.
+ reqMsg := readAtRequest{}
+
+ // Send the current offset and bytes requested.
+ reqMsg.Buffer = p
+ reqMsg.Offset = offset
+
+ // Send read request over the control channel.
+ r.reqCh <- reqMsg
+
+ // Get data over the response channel.
+ dataMsg := <-r.resCh
+
+ // Save any error.
+ r.prevErr = dataMsg.Error
+ if dataMsg.Error != nil {
+ if dataMsg.Error == io.EOF {
+ return dataMsg.Size, dataMsg.Error
+ }
+ return 0, dataMsg.Error
+ }
+ return dataMsg.Size, nil
+}
+
+// Closer is the interface that wraps the basic Close method.
+//
+// The behavior of Close after the first call returns error for
+// subsequent Close() calls.
+func (r *objectReadAtCloser) Close() (err error) {
+ // Locking.
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+
+ // prevErr is which was saved in previous operation.
+ if r.prevErr != nil {
+ return r.prevErr
+ }
+
+ // Close successfully.
+ close(r.doneCh)
+
+ // Save this for any subsequent frivolous reads.
+ errMsg := "objectReadAtCloser: is already closed. Bad file descriptor."
+ r.prevErr = errors.New(errMsg)
+ return
+}
+
+// getObject - retrieve object from Object Storage.
+//
+// Additionally this function also takes range arguments to download the specified
+// range bytes of an object. Setting offset and length = 0 will download the full object.
+//
+// For more information about the HTTP Range header.
+// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectStat, error) {
+ // Validate input arguments.
+ if err := isValidBucketName(bucketName); err != nil {
+ return nil, ObjectStat{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return nil, ObjectStat{}, err
+ }
+
+ customHeader := make(http.Header)
+ // Set ranges if length and offset are valid.
+ if length > 0 && offset >= 0 {
+ customHeader.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
+ } else if offset > 0 && length == 0 {
+ customHeader.Set("Range", fmt.Sprintf("bytes=%d-", offset))
+ } else if length < 0 && offset == 0 {
+ customHeader.Set("Range", fmt.Sprintf("bytes=%d", length))
+ }
+
+ // Instantiate a new request.
+ req, err := c.newRequest("GET", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeader,
+ })
+ if err != nil {
+ return nil, ObjectStat{}, err
+ }
+ // Execute the request.
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, ObjectStat{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
+ return nil, ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ // trim off the odd double quotes.
+ md5sum := strings.Trim(resp.Header.Get("ETag"), "\"")
+ // parse the date.
+ date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
+ if err != nil {
+ msg := "Last-Modified time format not recognized. " + reportIssue
+ return nil, ObjectStat{}, ErrorResponse{
+ Code: "InternalError",
+ Message: msg,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ }
+ }
+ contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+ var objectStat ObjectStat
+ objectStat.ETag = md5sum
+ objectStat.Key = objectName
+ objectStat.Size = resp.ContentLength
+ objectStat.LastModified = date
+ objectStat.ContentType = contentType
+
+ // do not close body here, caller will close
+ return resp.Body, objectStat, nil
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go
new file mode 100644
index 000000000..180a28a9a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-list.go
@@ -0,0 +1,486 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+)
+
+// ListBuckets list all buckets owned by this authenticated user.
+//
+// This call requires explicit authentication, no anonymous requests are
+// allowed for listing buckets.
+//
+// api := client.New(....)
+// for message := range api.ListBuckets() {
+// fmt.Println(message)
+// }
+//
+func (c Client) ListBuckets() ([]BucketStat, error) {
+ // Instantiate a new request.
+ req, err := c.newRequest("GET", requestMetadata{})
+ if err != nil {
+ return nil, err
+ }
+ // Initiate the request.
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return nil, HTTPRespToErrorResponse(resp, "", "")
+ }
+ }
+ listAllMyBucketsResult := listAllMyBucketsResult{}
+ err = xmlDecoder(resp.Body, &listAllMyBucketsResult)
+ if err != nil {
+ return nil, err
+ }
+ return listAllMyBucketsResult.Buckets.Bucket, nil
+}
+
+// ListObjects - (List Objects) - List some objects or all recursively.
+//
+// ListObjects lists all objects matching the objectPrefix from
+// the specified bucket. If recursion is enabled it would list
+// all subdirectories and all its contents.
+//
+// Your input paramters are just bucketName, objectPrefix and recursive. If you
+// enable recursive as 'true' this function will return back all the
+// objects in a given bucket name and object prefix.
+//
+// api := client.New(....)
+// recursive := true
+// for message := range api.ListObjects("mytestbucket", "starthere", recursive) {
+// fmt.Println(message)
+// }
+//
+func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectStat {
+ // Allocate new list objects channel.
+ objectStatCh := make(chan ObjectStat, 1000)
+ // Default listing is delimited at "/"
+ delimiter := "/"
+ if recursive {
+ // If recursive we do not delimit.
+ delimiter = ""
+ }
+ // Validate bucket name.
+ if err := isValidBucketName(bucketName); err != nil {
+ defer close(objectStatCh)
+ objectStatCh <- ObjectStat{
+ Err: err,
+ }
+ return objectStatCh
+ }
+ // Validate incoming object prefix.
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ defer close(objectStatCh)
+ objectStatCh <- ObjectStat{
+ Err: err,
+ }
+ return objectStatCh
+ }
+
+ // Initiate list objects goroutine here.
+ go func(objectStatCh chan<- ObjectStat) {
+ defer close(objectStatCh)
+ // Save marker for next request.
+ var marker string
+ for {
+ // Get list of objects a maximum of 1000 per request.
+ result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000)
+ if err != nil {
+ objectStatCh <- ObjectStat{
+ Err: err,
+ }
+ return
+ }
+
+ // If contents are available loop through and send over channel.
+ for _, object := range result.Contents {
+ // Save the marker.
+ marker = object.Key
+ select {
+ // Send object content.
+ case objectStatCh <- object:
+ // If receives done from the caller, return here.
+ case <-doneCh:
+ return
+ }
+ }
+
+ // Send all common prefixes if any.
+ // NOTE: prefixes are only present if the request is delimited.
+ for _, obj := range result.CommonPrefixes {
+ object := ObjectStat{}
+ object.Key = obj.Prefix
+ object.Size = 0
+ select {
+ // Send object prefixes.
+ case objectStatCh <- object:
+ // If receives done from the caller, return here.
+ case <-doneCh:
+ return
+ }
+ }
+
+ // If next marker present, save it for next request.
+ if result.NextMarker != "" {
+ marker = result.NextMarker
+ }
+
+ // Listing ends result is not truncated, return right here.
+ if !result.IsTruncated {
+ return
+ }
+ }
+ }(objectStatCh)
+ return objectStatCh
+}
+
+/// Bucket Read Operations.
+
+// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
+//
+// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
+// request paramters :-
+// ---------
+// ?marker - Specifies the key to start with when listing objects in a bucket.
+// ?delimiter - A delimiter is a character you use to group keys.
+// ?prefix - Limits the response to keys that begin with the specified prefix.
+// ?max-keys - Sets the maximum number of keys returned in the response body.
+func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (listBucketResult, error) {
+ // Validate bucket name.
+ if err := isValidBucketName(bucketName); err != nil {
+ return listBucketResult{}, err
+ }
+ // Validate object prefix.
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ return listBucketResult{}, err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ // Set object prefix.
+ urlValues.Set("prefix", urlEncodePath(objectPrefix))
+ // Set object marker.
+ urlValues.Set("marker", urlEncodePath(objectMarker))
+ // Set delimiter.
+ urlValues.Set("delimiter", delimiter)
+ // Set max keys.
+ urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
+
+ // Initialize a new request.
+ req, err := c.newRequest("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+ if err != nil {
+ return listBucketResult{}, err
+ }
+ // Execute list buckets.
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return listBucketResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return listBucketResult{}, HTTPRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ // Decode listBuckets XML.
+ listBucketResult := listBucketResult{}
+ err = xmlDecoder(resp.Body, &listBucketResult)
+ if err != nil {
+ return listBucketResult, err
+ }
+ return listBucketResult, nil
+}
+
+// ListIncompleteUploads - List incompletely uploaded multipart objects.
+//
+// ListIncompleteUploads lists all incompleted objects matching the
+// objectPrefix from the specified bucket. If recursion is enabled
+// it would list all subdirectories and all its contents.
+//
+// Your input paramters are just bucketName, objectPrefix and recursive.
+// If you enable recursive as 'true' this function will return back all
+// the multipart objects in a given bucket name.
+//
+// api := client.New(....)
+// recursive := true
+// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) {
+// fmt.Println(message)
+// }
+//
+func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartStat {
+ // Turn on size aggregation of individual parts.
+ isAggregateSize := true
+ return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh)
+}
+
+// listIncompleteUploads lists all incomplete uploads.
+func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartStat {
+ // Allocate channel for multipart uploads.
+ objectMultipartStatCh := make(chan ObjectMultipartStat, 1000)
+ // Delimiter is set to "/" by default.
+ delimiter := "/"
+ if recursive {
+ // If recursive do not delimit.
+ delimiter = ""
+ }
+ // Validate bucket name.
+ if err := isValidBucketName(bucketName); err != nil {
+ defer close(objectMultipartStatCh)
+ objectMultipartStatCh <- ObjectMultipartStat{
+ Err: err,
+ }
+ return objectMultipartStatCh
+ }
+ // Validate incoming object prefix.
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ defer close(objectMultipartStatCh)
+ objectMultipartStatCh <- ObjectMultipartStat{
+ Err: err,
+ }
+ return objectMultipartStatCh
+ }
+ go func(objectMultipartStatCh chan<- ObjectMultipartStat) {
+ defer close(objectMultipartStatCh)
+ // object and upload ID marker for future requests.
+ var objectMarker string
+ var uploadIDMarker string
+ for {
+ // list all multipart uploads.
+ result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000)
+ if err != nil {
+ objectMultipartStatCh <- ObjectMultipartStat{
+ Err: err,
+ }
+ return
+ }
+ // Save objectMarker and uploadIDMarker for next request.
+ objectMarker = result.NextKeyMarker
+ uploadIDMarker = result.NextUploadIDMarker
+ // Send all multipart uploads.
+ for _, obj := range result.Uploads {
+ // Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
+ if aggregateSize {
+ // Get total multipart size.
+ obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID)
+ if err != nil {
+ objectMultipartStatCh <- ObjectMultipartStat{
+ Err: err,
+ }
+ }
+ }
+ select {
+ // Send individual uploads here.
+ case objectMultipartStatCh <- obj:
+ // If done channel return here.
+ case <-doneCh:
+ return
+ }
+ }
+ // Send all common prefixes if any.
+ // NOTE: prefixes are only present if the request is delimited.
+ for _, obj := range result.CommonPrefixes {
+ object := ObjectMultipartStat{}
+ object.Key = obj.Prefix
+ object.Size = 0
+ select {
+ // Send delimited prefixes here.
+ case objectMultipartStatCh <- object:
+ // If done channel return here.
+ case <-doneCh:
+ return
+ }
+ }
+ // Listing ends if result not truncated, return right here.
+ if !result.IsTruncated {
+ return
+ }
+ }
+ }(objectMultipartStatCh)
+ // return.
+ return objectMultipartStatCh
+}
+
+// listMultipartUploads - (List Multipart Uploads).
+// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
+//
+// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
+// request paramters. :-
+// ---------
+// ?key-marker - Specifies the multipart upload after which listing should begin.
+// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
+// ?delimiter - A delimiter is a character you use to group keys.
+// ?prefix - Limits the response to keys that begin with the specified prefix.
+// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
+func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (listMultipartUploadsResult, error) {
+ // Get resources properly escaped and lined up before using them in http request.
+ urlValues := make(url.Values)
+ // Set uploads.
+ urlValues.Set("uploads", "")
+ // Set object key marker.
+ urlValues.Set("key-marker", urlEncodePath(keyMarker))
+ // Set upload id marker.
+ urlValues.Set("upload-id-marker", uploadIDMarker)
+ // Set prefix marker.
+ urlValues.Set("prefix", urlEncodePath(prefix))
+ // Set delimiter.
+ urlValues.Set("delimiter", delimiter)
+ // Set max-uploads.
+ urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
+
+ // Instantiate a new request.
+ req, err := c.newRequest("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+ if err != nil {
+ return listMultipartUploadsResult{}, err
+ }
+ // Execute list multipart uploads request.
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return listMultipartUploadsResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return listMultipartUploadsResult{}, HTTPRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ // Decode response body.
+ listMultipartUploadsResult := listMultipartUploadsResult{}
+ err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
+ if err != nil {
+ return listMultipartUploadsResult, err
+ }
+ return listMultipartUploadsResult, nil
+}
+
+// listObjectParts list all object parts recursively.
+func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]objectPart, err error) {
+ // Part number marker for the next batch of request.
+ var nextPartNumberMarker int
+ partsInfo = make(map[int]objectPart)
+ for {
+ // Get list of uploaded parts a maximum of 1000 per request.
+ listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
+ if err != nil {
+ return nil, err
+ }
+ // Append to parts info.
+ for _, part := range listObjPartsResult.ObjectParts {
+ partsInfo[part.PartNumber] = part
+ }
+ // Keep part number marker, for the next iteration.
+ nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker
+ // Listing ends result is not truncated, return right here.
+ if !listObjPartsResult.IsTruncated {
+ break
+ }
+ }
+
+ // Return all the parts.
+ return partsInfo, nil
+}
+
+// findUploadID lists all incomplete uploads and finds the uploadID of the matching object name.
+func (c Client) findUploadID(bucketName, objectName string) (string, error) {
+ // Make list incomplete uploads recursive.
+ isRecursive := true
+ // Turn off size aggregation of individual parts, in this request.
+ isAggregateSize := false
+ // NOTE: done Channel is set to 'nil, this will drain go routine until exhaustion.
+ for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, nil) {
+ if mpUpload.Err != nil {
+ return "", mpUpload.Err
+ }
+ // if object name found, return the upload id.
+ if objectName == mpUpload.Key {
+ return mpUpload.UploadID, nil
+ }
+ }
+ // No upload id was found, return success and empty upload id.
+ return "", nil
+}
+
+// getTotalMultipartSize - calculate total uploaded size for the a given multipart object.
+func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) {
+ // Iterate over all parts and aggregate the size.
+ partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
+ if err != nil {
+ return 0, err
+ }
+ for _, partInfo := range partsInfo {
+ size += partInfo.Size
+ }
+ return size, nil
+}
+
+// listObjectPartsQuery (List Parts query)
+// - lists some or all (up to 1000) parts that have been uploaded for a specific multipart upload
+//
+// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
+// request paramters :-
+// ---------
+// ?part-number-marker - Specifies the part after which listing should begin.
+func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) {
+ // Get resources properly escaped and lined up before using them in http request.
+ urlValues := make(url.Values)
+ // Set part number marker.
+ urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
+ // Set upload id.
+ urlValues.Set("uploadId", uploadID)
+ // Set max parts.
+ urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
+
+ req, err := c.newRequest("GET", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ })
+ if err != nil {
+ return listObjectPartsResult{}, err
+ }
+ // Exectue list object parts.
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return listObjectPartsResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return listObjectPartsResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ // Decode list object parts XML.
+ listObjectPartsResult := listObjectPartsResult{}
+ err = xmlDecoder(resp.Body, &listObjectPartsResult)
+ if err != nil {
+ return listObjectPartsResult, err
+ }
+ return listObjectPartsResult, nil
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go
deleted file mode 100644
index 1236058cd..000000000
--- a/Godeps/_workspace/src/github.com/minio/minio-go/api-multipart-core.go
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/hex"
- "encoding/json"
- "encoding/xml"
- "fmt"
- "io"
- "net/http"
- "strconv"
-)
-
-// listMultipartUploadsRequest wrapper creates a new listMultipartUploads request
-func (a apiCore) listMultipartUploadsRequest(bucket, keymarker, uploadIDMarker, prefix, delimiter string, maxuploads int) (*request, error) {
- // resourceQuery - get resources properly escaped and lined up before using them in http request
- resourceQuery := func() (string, error) {
- switch {
- case keymarker != "":
- keymarker = fmt.Sprintf("&key-marker=%s", getURLEncodedPath(keymarker))
- fallthrough
- case uploadIDMarker != "":
- uploadIDMarker = fmt.Sprintf("&upload-id-marker=%s", uploadIDMarker)
- fallthrough
- case prefix != "":
- prefix = fmt.Sprintf("&prefix=%s", getURLEncodedPath(prefix))
- fallthrough
- case delimiter != "":
- delimiter = fmt.Sprintf("&delimiter=%s", delimiter)
- }
- query := fmt.Sprintf("?uploads&max-uploads=%d", maxuploads) + keymarker + uploadIDMarker + prefix + delimiter
- return query, nil
- }
- query, err := resourceQuery()
- if err != nil {
- return nil, err
- }
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "GET",
- HTTPPath: separator + bucket + query,
- }
- r, err := newRequest(op, a.config, nil)
- if err != nil {
- return nil, err
- }
- return r, nil
-}
-
-// listMultipartUploads - (List Multipart Uploads) - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
-//
-// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
-// request paramters :-
-// ---------
-// ?key-marker - Specifies the multipart upload after which listing should begin
-// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin
-// ?delimiter - A delimiter is a character you use to group keys.
-// ?prefix - Limits the response to keys that begin with the specified prefix.
-// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
-func (a apiCore) listMultipartUploads(bucket, keymarker, uploadIDMarker, prefix, delimiter string, maxuploads int) (listMultipartUploadsResult, error) {
- req, err := a.listMultipartUploadsRequest(bucket, keymarker, uploadIDMarker, prefix, delimiter, maxuploads)
- if err != nil {
- return listMultipartUploadsResult{}, err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return listMultipartUploadsResult{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return listMultipartUploadsResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- listMultipartUploadsResult := listMultipartUploadsResult{}
- err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listMultipartUploadsResult)
- if err != nil {
- return listMultipartUploadsResult, err
- }
- // close body while returning, along with any error
- return listMultipartUploadsResult, nil
-}
-
-// initiateMultipartRequest wrapper creates a new initiateMultiPart request
-func (a apiCore) initiateMultipartRequest(bucket, object string) (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "POST",
- HTTPPath: separator + bucket + separator + object + "?uploads",
- }
- return newRequest(op, a.config, nil)
-}
-
-// initiateMultipartUpload initiates a multipart upload and returns an upload ID
-func (a apiCore) initiateMultipartUpload(bucket, object string) (initiateMultipartUploadResult, error) {
- req, err := a.initiateMultipartRequest(bucket, object)
- if err != nil {
- return initiateMultipartUploadResult{}, err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return initiateMultipartUploadResult{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return initiateMultipartUploadResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- initiateMultipartUploadResult := initiateMultipartUploadResult{}
- err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &initiateMultipartUploadResult)
- if err != nil {
- return initiateMultipartUploadResult, err
- }
- return initiateMultipartUploadResult, nil
-}
-
-// completeMultipartUploadRequest wrapper creates a new CompleteMultipartUpload request
-func (a apiCore) completeMultipartUploadRequest(bucket, object, uploadID string, complete completeMultipartUpload) (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "POST",
- HTTPPath: separator + bucket + separator + object + "?uploadId=" + uploadID,
- }
- var completeMultipartUploadBytes []byte
- var err error
- switch {
- case a.config.AcceptType == "application/xml":
- completeMultipartUploadBytes, err = xml.Marshal(complete)
- case a.config.AcceptType == "application/json":
- completeMultipartUploadBytes, err = json.Marshal(complete)
- default:
- completeMultipartUploadBytes, err = xml.Marshal(complete)
- }
- if err != nil {
- return nil, err
- }
- completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
- r, err := newRequest(op, a.config, completeMultipartUploadBuffer)
- if err != nil {
- return nil, err
- }
- r.req.ContentLength = int64(completeMultipartUploadBuffer.Len())
- return r, nil
-}
-
-// completeMultipartUpload completes a multipart upload by assembling previously uploaded parts.
-func (a apiCore) completeMultipartUpload(bucket, object, uploadID string, c completeMultipartUpload) (completeMultipartUploadResult, error) {
- req, err := a.completeMultipartUploadRequest(bucket, object, uploadID, c)
- if err != nil {
- return completeMultipartUploadResult{}, err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return completeMultipartUploadResult{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return completeMultipartUploadResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- completeMultipartUploadResult := completeMultipartUploadResult{}
- err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &completeMultipartUploadResult)
- if err != nil {
- return completeMultipartUploadResult, err
- }
- return completeMultipartUploadResult, nil
-}
-
-// abortMultipartUploadRequest wrapper creates a new AbortMultipartUpload request
-func (a apiCore) abortMultipartUploadRequest(bucket, object, uploadID string) (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "DELETE",
- HTTPPath: separator + bucket + separator + object + "?uploadId=" + uploadID,
- }
- return newRequest(op, a.config, nil)
-}
-
-// abortMultipartUpload aborts a multipart upload for the given uploadID, all parts are deleted
-func (a apiCore) abortMultipartUpload(bucket, object, uploadID string) error {
- req, err := a.abortMultipartUploadRequest(bucket, object, uploadID)
- if err != nil {
- return err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusNoContent {
- // Abort has no response body, handle it
- var errorResponse ErrorResponse
- switch resp.StatusCode {
- case http.StatusNotFound:
- errorResponse = ErrorResponse{
- Code: "NoSuchUpload",
- Message: "The specified multipart upload does not exist.",
- Resource: separator + bucket + separator + object,
- RequestID: resp.Header.Get("x-amz-request-id"),
- }
- case http.StatusForbidden:
- errorResponse = ErrorResponse{
- Code: "AccessDenied",
- Message: "Access Denied.",
- Resource: separator + bucket + separator + object,
- RequestID: resp.Header.Get("x-amz-request-id"),
- }
- default:
- errorResponse = ErrorResponse{
- Code: resp.Status,
- Message: "Unknown error, please report this at https://github.com/minio/minio-go-legacy/issues.",
- Resource: separator + bucket + separator + object,
- RequestID: resp.Header.Get("x-amz-request-id"),
- }
- }
- return errorResponse
- }
- }
- return nil
-}
-
-// listObjectPartsRequest wrapper creates a new ListObjectParts request
-func (a apiCore) listObjectPartsRequest(bucket, object, uploadID string, partNumberMarker, maxParts int) (*request, error) {
- // resourceQuery - get resources properly escaped and lined up before using them in http request
- resourceQuery := func() string {
- var partNumberMarkerStr string
- switch {
- case partNumberMarker != 0:
- partNumberMarkerStr = fmt.Sprintf("&part-number-marker=%d", partNumberMarker)
- }
- return fmt.Sprintf("?uploadId=%s&max-parts=%d", uploadID, maxParts) + partNumberMarkerStr
- }
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "GET",
- HTTPPath: separator + bucket + separator + object + resourceQuery(),
- }
- return newRequest(op, a.config, nil)
-}
-
-// listObjectParts (List Parts) - lists some or all (up to 1000) parts that have been uploaded for a specific multipart upload
-//
-// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
-// request paramters :-
-// ---------
-// ?part-number-marker - Specifies the part after which listing should begin.
-func (a apiCore) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) {
- req, err := a.listObjectPartsRequest(bucket, object, uploadID, partNumberMarker, maxParts)
- if err != nil {
- return listObjectPartsResult{}, err
- }
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return listObjectPartsResult{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return listObjectPartsResult{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- listObjectPartsResult := listObjectPartsResult{}
- err = acceptTypeDecoder(resp.Body, a.config.AcceptType, &listObjectPartsResult)
- if err != nil {
- return listObjectPartsResult, err
- }
- return listObjectPartsResult, nil
-}
-
-// uploadPartRequest wrapper creates a new UploadPart request
-func (a apiCore) uploadPartRequest(bucket, object, uploadID string, md5SumBytes []byte, partNumber int, size int64, body io.ReadSeeker) (*request, error) {
- op := &operation{
- HTTPServer: a.config.Endpoint,
- HTTPMethod: "PUT",
- HTTPPath: separator + bucket + separator + object + "?partNumber=" + strconv.Itoa(partNumber) + "&uploadId=" + uploadID,
- }
- r, err := newRequest(op, a.config, body)
- if err != nil {
- return nil, err
- }
- // set Content-MD5 as base64 encoded md5
- if md5SumBytes != nil {
- r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes))
- }
- r.req.ContentLength = size
- return r, nil
-}
-
-// uploadPart uploads a part in a multipart upload.
-func (a apiCore) uploadPart(bucket, object, uploadID string, md5SumBytes []byte, partNumber int, size int64, body io.ReadSeeker) (completePart, error) {
- req, err := a.uploadPartRequest(bucket, object, uploadID, md5SumBytes, partNumber, size, body)
- if err != nil {
- return completePart{}, err
- }
- cPart := completePart{}
- cPart.PartNumber = partNumber
- cPart.ETag = "\"" + hex.EncodeToString(md5SumBytes) + "\""
-
- // initiate the request
- resp, err := req.Do()
- defer closeResp(resp)
- if err != nil {
- return completePart{}, err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return completePart{}, BodyToErrorResponse(resp.Body, a.config.AcceptType)
- }
- }
- return cPart, nil
-}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-presigned.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-presigned.go
new file mode 100644
index 000000000..d46623631
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-presigned.go
@@ -0,0 +1,147 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "errors"
+ "time"
+)
+
+// PresignedGetObject returns a presigned URL to access an object without credentials.
+// Expires maximum is 7days - ie. 604800 and minimum is 1.
+func (c Client) PresignedGetObject(bucketName, objectName string, expires time.Duration) (string, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return "", err
+ }
+ if err := isValidExpiry(expires); err != nil {
+ return "", err
+ }
+
+ expireSeconds := int64(expires / time.Second)
+ // Instantiate a new request.
+ // Since expires is set newRequest will presign the request.
+ req, err := c.newRequest("GET", requestMetadata{
+ presignURL: true,
+ bucketName: bucketName,
+ objectName: objectName,
+ expires: expireSeconds,
+ })
+ if err != nil {
+ return "", err
+ }
+ return req.URL.String(), nil
+}
+
+// PresignedPutObject returns a presigned URL to upload an object without credentials.
+// Expires maximum is 7days - ie. 604800 and minimum is 1.
+func (c Client) PresignedPutObject(bucketName, objectName string, expires time.Duration) (string, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return "", err
+ }
+ if err := isValidExpiry(expires); err != nil {
+ return "", err
+ }
+
+ expireSeconds := int64(expires / time.Second)
+ // Instantiate a new request.
+ // Since expires is set newRequest will presign the request.
+ req, err := c.newRequest("PUT", requestMetadata{
+ presignURL: true,
+ bucketName: bucketName,
+ objectName: objectName,
+ expires: expireSeconds,
+ })
+ if err != nil {
+ return "", err
+ }
+ return req.URL.String(), nil
+}
+
+// PresignedPostPolicy returns POST form data to upload an object at a location.
+func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
+ // Validate input arguments.
+ if p.expiration.IsZero() {
+ return nil, errors.New("Expiration time must be specified")
+ }
+ if _, ok := p.formData["key"]; !ok {
+ return nil, errors.New("object key must be specified")
+ }
+ if _, ok := p.formData["bucket"]; !ok {
+ return nil, errors.New("bucket name must be specified")
+ }
+
+ bucketName := p.formData["bucket"]
+ // Fetch the location.
+ location, err := c.getBucketLocation(bucketName)
+ if err != nil {
+ return nil, err
+ }
+
+ // Keep time.
+ t := time.Now().UTC()
+ if c.signature.isV2() {
+ policyBase64 := p.base64()
+ p.formData["policy"] = policyBase64
+ // For Google endpoint set this value to be 'GoogleAccessId'.
+ if isGoogleEndpoint(c.endpointURL) {
+ p.formData["GoogleAccessId"] = c.accessKeyID
+ } else {
+ // For all other endpoints set this value to be 'AWSAccessKeyId'.
+ p.formData["AWSAccessKeyId"] = c.accessKeyID
+ }
+ // Sign the policy.
+ p.formData["signature"] = PostPresignSignatureV2(policyBase64, c.secretAccessKey)
+ return p.formData, nil
+ }
+
+ // Add date policy.
+ p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-date",
+ value: t.Format(iso8601DateFormat),
+ })
+ // Add algorithm policy.
+ p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-algorithm",
+ value: signV4Algorithm,
+ })
+ // Add a credential policy.
+ credential := getCredential(c.accessKeyID, location, t)
+ p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-credential",
+ value: credential,
+ })
+ // get base64 encoded policy.
+ policyBase64 := p.base64()
+ // Fill in the form data.
+ p.formData["policy"] = policyBase64
+ p.formData["x-amz-algorithm"] = signV4Algorithm
+ p.formData["x-amz-credential"] = credential
+ p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
+ p.formData["x-amz-signature"] = PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
+ return p.formData, nil
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go
new file mode 100644
index 000000000..97f54f782
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-bucket.go
@@ -0,0 +1,219 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/xml"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+)
+
+/// Bucket operations
+
+// MakeBucket makes a new bucket.
+//
+// Optional arguments are acl and location - by default all buckets are created
+// with ``private`` acl and in US Standard region.
+//
+// ACL valid values - http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
+//
+// private - owner gets full access [default].
+// public-read - owner gets full access, all others get read access.
+// public-read-write - owner gets full access, all others get full access too.
+// authenticated-read - owner gets full access, authenticated users get read access.
+//
+// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
+// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
+func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) error {
+ // Validate if request is made on anonymous requests.
+ if c.anonymous {
+ return ErrInvalidArgument("Make bucket cannot be issued with anonymous credentials.")
+ }
+
+ // Validate the input arguments.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if !acl.isValidBucketACL() {
+ return ErrInvalidArgument("Unrecognized ACL " + acl.String())
+ }
+
+ // If location is empty, treat is a default region 'us-east-1'.
+ if location == "" {
+ location = "us-east-1"
+ }
+
+ // Instantiate the request.
+ req, err := c.makeBucketRequest(bucketName, acl, location)
+ if err != nil {
+ return err
+ }
+
+ // Execute the request.
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return HTTPRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Save the location into cache on a succesful makeBucket response.
+ c.bucketLocCache.Set(bucketName, location)
+
+ // Return.
+ return nil
+}
+
+// makeBucketRequest constructs request for makeBucket.
+func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location string) (*http.Request, error) {
+ // Validate input arguments.
+ if err := isValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+ if !acl.isValidBucketACL() {
+ return nil, ErrInvalidArgument("Unrecognized ACL " + acl.String())
+ }
+
+ // Set get bucket location always as path style.
+ targetURL := c.endpointURL
+ if bucketName != "" {
+ // If endpoint supports virtual host style use that always.
+ // Currently only S3 and Google Cloud Storage would support this.
+ if isVirtualHostSupported(c.endpointURL) {
+ targetURL.Host = bucketName + "/" + c.endpointURL.Host
+ targetURL.Path = "/"
+ } else {
+ // If not fall back to using path style.
+ targetURL.Path = "/" + bucketName
+ }
+ }
+
+ // get a new HTTP request for the method.
+ req, err := http.NewRequest("PUT", targetURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // by default bucket acl is set to private.
+ req.Header.Set("x-amz-acl", "private")
+ if acl != "" {
+ req.Header.Set("x-amz-acl", string(acl))
+ }
+
+ // set UserAgent for the request.
+ c.setUserAgent(req)
+
+ // set sha256 sum for signature calculation only with signature version '4'.
+ if c.signature.isV4() {
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
+ }
+
+ // If location is not 'us-east-1' create bucket location config.
+ if location != "us-east-1" && location != "" {
+ createBucketConfig := new(createBucketConfiguration)
+ createBucketConfig.Location = location
+ var createBucketConfigBytes []byte
+ createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
+ if err != nil {
+ return nil, err
+ }
+ createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
+ req.Body = ioutil.NopCloser(createBucketConfigBuffer)
+ req.ContentLength = int64(createBucketConfigBuffer.Len())
+ if c.signature.isV4() {
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBuffer.Bytes())))
+ }
+ }
+
+ // Sign the request.
+ if c.signature.isV4() {
+ // Signature calculated for MakeBucket request should be for 'us-east-1',
+ // regardless of the bucket's location constraint.
+ req = SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ } else if c.signature.isV2() {
+ req = SignV2(*req, c.accessKeyID, c.secretAccessKey)
+ }
+
+ // Return signed request.
+ return req, nil
+}
+
+// SetBucketACL set the permissions on an existing bucket using access control lists (ACL).
+//
+// For example
+//
+// private - owner gets full access [default].
+// public-read - owner gets full access, all others get read access.
+// public-read-write - owner gets full access, all others get full access too.
+// authenticated-read - owner gets full access, authenticated users get read access.
+func (c Client) SetBucketACL(bucketName string, acl BucketACL) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if !acl.isValidBucketACL() {
+ return ErrInvalidArgument("Unrecognized ACL " + acl.String())
+ }
+
+ // Set acl query.
+ urlValues := make(url.Values)
+ urlValues.Set("acl", "")
+
+ // Add misc headers.
+ customHeader := make(http.Header)
+
+ if acl != "" {
+ customHeader.Set("x-amz-acl", acl.String())
+ } else {
+ customHeader.Set("x-amz-acl", "private")
+ }
+
+ // Instantiate a new request.
+ req, err := c.newRequest("PUT", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ customHeader: customHeader,
+ })
+ if err != nil {
+ return err
+ }
+
+ // Initiate the request.
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+
+ if resp != nil {
+ // if error return.
+ if resp.StatusCode != http.StatusOK {
+ return HTTPRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // return
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go
new file mode 100644
index 000000000..3b7a5b733
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object-partial.go
@@ -0,0 +1,197 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "crypto/md5"
+ "crypto/sha256"
+ "errors"
+ "hash"
+ "io"
+ "sort"
+)
+
+// PutObjectPartial put object partial.
+func (c Client) PutObjectPartial(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Cleanup any previously left stale files, as the function exits.
+ defer cleanupStaleTempfiles("multiparts$-putobject-partial")
+
+ // getUploadID for an object, initiates a new multipart request
+ // if it cannot find any previously partially uploaded object.
+ uploadID, err := c.getUploadID(bucketName, objectName, contentType)
+ if err != nil {
+ return 0, err
+ }
+
+ // total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var completeMultipartUpload completeMultipartUpload
+
+ // Fetch previously upload parts and save the total size.
+ partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
+ if err != nil {
+ return 0, err
+ }
+
+ // Previous maximum part size
+ var prevMaxPartSize int64
+ // previous part number.
+ var prevPartNumber int
+ // Loop through all parts and calculate totalUploadedSize.
+ for _, partInfo := range partsInfo {
+ totalUploadedSize += partInfo.Size
+ // Choose the maximum part size.
+ if partInfo.Size >= prevMaxPartSize {
+ prevMaxPartSize = partInfo.Size
+ }
+ // Save previous part number.
+ prevPartNumber = partInfo.PartNumber
+ }
+
+ // Calculate the optimal part size for a given file size.
+ partSize := optimalPartSize(size)
+ // If prevMaxPartSize is set use that.
+ if prevMaxPartSize != 0 {
+ partSize = prevMaxPartSize
+ }
+
+ // MD5 and Sha256 hasher.
+ var hashMD5, hashSha256 hash.Hash
+
+ // Part number always starts with prevPartNumber + 1. i.e The next part number.
+ partNumber := prevPartNumber + 1
+
+ // Loop through until EOF.
+ for totalUploadedSize < size {
+ // Initialize a new temporary file.
+ tmpFile, err := newTempFile("multiparts$-putobject-partial")
+ if err != nil {
+ return 0, err
+ }
+
+ // Create a hash multiwriter.
+ hashMD5 = md5.New()
+ hashWriter := io.MultiWriter(hashMD5)
+ if c.signature.isV4() {
+ hashSha256 = sha256.New()
+ hashWriter = io.MultiWriter(hashMD5, hashSha256)
+ }
+ writer := io.MultiWriter(tmpFile, hashWriter)
+
+ // totalUploadedSize is the current readAtOffset.
+ readAtOffset := totalUploadedSize
+
+ // Read until partSize.
+ var totalReadPartSize int64
+
+ // readAt defaults to reading at 5MiB buffer.
+ readAtBuffer := make([]byte, optimalReadAtBufferSize)
+
+ // Loop through until partSize.
+ for totalReadPartSize < partSize {
+ readAtSize, rerr := data.ReadAt(readAtBuffer, readAtOffset)
+ if rerr != nil {
+ if rerr != io.EOF {
+ return 0, rerr
+ }
+ }
+ writeSize, werr := writer.Write(readAtBuffer[:readAtSize])
+ if werr != nil {
+ return 0, werr
+ }
+ if readAtSize != writeSize {
+ return 0, errors.New("Something really bad happened here. " + reportIssue)
+ }
+ readAtOffset += int64(writeSize)
+ totalReadPartSize += int64(writeSize)
+ if rerr == io.EOF {
+ break
+ }
+ }
+
+ // Seek back to beginning of the temporary file.
+ if _, err := tmpFile.Seek(0, 0); err != nil {
+ return 0, err
+ }
+
+ // Save all the part metadata.
+ partMdata := partMetadata{
+ ReadCloser: tmpFile,
+ MD5Sum: hashMD5.Sum(nil),
+ Size: totalReadPartSize,
+ }
+
+ // Signature version '4'.
+ if c.signature.isV4() {
+ partMdata.Sha256Sum = hashSha256.Sum(nil)
+ }
+
+ // Current part number to be uploaded.
+ partMdata.Number = partNumber
+
+ // execute upload part.
+ objPart, err := c.uploadPart(bucketName, objectName, uploadID, partMdata)
+ if err != nil {
+ // Close the read closer.
+ partMdata.ReadCloser.Close()
+ return totalUploadedSize, err
+ }
+
+ // Save successfully uploaded size.
+ totalUploadedSize += partMdata.Size
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partMdata.Number] = objPart
+
+ // Move to next part.
+ partNumber++
+ }
+
+ // If size is greater than zero verify totalUploaded.
+ // if totalUploaded is different than the input 'size', do not complete the request throw an error.
+ if totalUploadedSize != size {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ }
+
+ // Loop over uploaded parts to save them in a Parts array before completing the multipart request.
+ for _, part := range partsInfo {
+ var complPart completePart
+ complPart.ETag = part.ETag
+ complPart.PartNumber = part.PartNumber
+ completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(completeMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go
new file mode 100644
index 000000000..a02df778a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-put-object.go
@@ -0,0 +1,559 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/xml"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// completedParts is a collection of parts sortable by their part numbers.
+// used for sorting the uploaded parts before completing the multipart request.
+type completedParts []completePart
+
+func (a completedParts) Len() int { return len(a) }
+func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
+
+// PutObject creates an object in a bucket.
+//
+// You must have WRITE permissions on a bucket to create an object.
+//
+// - For size smaller than 5MiB PutObject automatically does a single atomic Put operation.
+// - For size larger than 5MiB PutObject automatically does a resumable multipart Put operation.
+// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF.
+// Maximum object size that can be uploaded through this operation will be 5TiB.
+//
+// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
+// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
+//
+// NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation.
+func (c Client) PutObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
+ // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
+ if isGoogleEndpoint(c.endpointURL) {
+ if size <= -1 {
+ return 0, ErrorResponse{
+ Code: "NotImplemented",
+ Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.",
+ Key: objectName,
+ BucketName: bucketName,
+ }
+ }
+ // Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size.
+ return c.putNoChecksum(bucketName, objectName, data, size, contentType)
+ }
+
+ // NOTE: S3 doesn't allow anonymous multipart requests.
+ if isAmazonEndpoint(c.endpointURL) && c.anonymous {
+ if size <= -1 || size > int64(maxSinglePutObjectSize) {
+ return 0, ErrorResponse{
+ Code: "NotImplemented",
+ Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", size),
+ Key: objectName,
+ BucketName: bucketName,
+ }
+ }
+ // Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size.
+ return c.putAnonymous(bucketName, objectName, data, size, contentType)
+ }
+
+ // Large file upload is initiated for uploads for input data size
+ // if its greater than 5MiB or data size is negative.
+ if size >= minimumPartSize || size < 0 {
+ return c.putLargeObject(bucketName, objectName, data, size, contentType)
+ }
+ return c.putSmallObject(bucketName, objectName, data, size, contentType)
+}
+
+// putNoChecksum special function used Google Cloud Storage. This special function
+// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
+func (c Client) putNoChecksum(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+ if size > maxPartSize {
+ return 0, ErrEntityTooLarge(size, bucketName, objectName)
+ }
+ // For anonymous requests, we will not calculate sha256 and md5sum.
+ putObjMetadata := putObjectMetadata{
+ MD5Sum: nil,
+ Sha256Sum: nil,
+ ReadCloser: ioutil.NopCloser(data),
+ Size: size,
+ ContentType: contentType,
+ }
+ // Execute put object.
+ if _, err := c.putObject(bucketName, objectName, putObjMetadata); err != nil {
+ return 0, err
+ }
+ return size, nil
+}
+
+// putAnonymous is a special function for uploading content as anonymous request.
+// This special function is necessary since Amazon S3 doesn't allow anonymous
+// multipart uploads.
+func (c Client) putAnonymous(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+ return c.putNoChecksum(bucketName, objectName, data, size, contentType)
+}
+
+// putSmallObject uploads files smaller than 5 mega bytes.
+func (c Client) putSmallObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+ // Read input data fully into buffer.
+ dataBytes, err := ioutil.ReadAll(data)
+ if err != nil {
+ return 0, err
+ }
+ if int64(len(dataBytes)) != size {
+ return 0, ErrUnexpectedEOF(int64(len(dataBytes)), size, bucketName, objectName)
+ }
+ // Construct a new PUT object metadata.
+ putObjMetadata := putObjectMetadata{
+ MD5Sum: sumMD5(dataBytes),
+ Sha256Sum: sum256(dataBytes),
+ ReadCloser: ioutil.NopCloser(bytes.NewReader(dataBytes)),
+ Size: size,
+ ContentType: contentType,
+ }
+ // Single part use case, use putObject directly.
+ if _, err := c.putObject(bucketName, objectName, putObjMetadata); err != nil {
+ return 0, err
+ }
+ return size, nil
+}
+
+// hashCopy - calculates Md5sum and Sha256sum for upto partSize amount of bytes.
+func (c Client) hashCopy(writer io.ReadWriteSeeker, data io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) {
+ // MD5 and Sha256 hasher.
+ var hashMD5, hashSha256 hash.Hash
+ // MD5 and Sha256 hasher.
+ hashMD5 = md5.New()
+ hashWriter := io.MultiWriter(writer, hashMD5)
+ if c.signature.isV4() {
+ hashSha256 = sha256.New()
+ hashWriter = io.MultiWriter(writer, hashMD5, hashSha256)
+ }
+
+ // Copies to input at writer.
+ size, err = io.CopyN(hashWriter, data, partSize)
+ if err != nil {
+ if err != io.EOF {
+ return nil, nil, 0, err
+ }
+ }
+
+ // Seek back to beginning of input.
+ if _, err := writer.Seek(0, 0); err != nil {
+ return nil, nil, 0, err
+ }
+
+ // Finalize md5shum and sha256 sum.
+ md5Sum = hashMD5.Sum(nil)
+ if c.signature.isV4() {
+ sha256Sum = hashSha256.Sum(nil)
+ }
+ return md5Sum, sha256Sum, size, nil
+}
+
+// putLargeObject uploads files bigger than 5 mega bytes.
+func (c Client) putLargeObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Cleanup any previously left stale files, as the function exits.
+ defer cleanupStaleTempfiles("multiparts$-putobject")
+
+ // getUploadID for an object, initiates a new multipart request
+ // if it cannot find any previously partially uploaded object.
+ uploadID, err := c.getUploadID(bucketName, objectName, contentType)
+ if err != nil {
+ return 0, err
+ }
+
+ // total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var completeMultipartUpload completeMultipartUpload
+
+ // Fetch previously upload parts and save the total size.
+ partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
+ if err != nil {
+ return 0, err
+ }
+ // Previous maximum part size
+ var prevMaxPartSize int64
+ // Loop through all parts and calculate totalUploadedSize.
+ for _, partInfo := range partsInfo {
+ totalUploadedSize += partInfo.Size
+ // Choose the maximum part size.
+ if partInfo.Size >= prevMaxPartSize {
+ prevMaxPartSize = partInfo.Size
+ }
+ }
+
+ // Calculate the optimal part size for a given size.
+ partSize := optimalPartSize(size)
+ // If prevMaxPartSize is set use that.
+ if prevMaxPartSize != 0 {
+ partSize = prevMaxPartSize
+ }
+
+ // Part number always starts with '1'.
+ partNumber := 1
+
+ // Loop through until EOF.
+ for {
+ // We have reached EOF, break out.
+ if totalUploadedSize == size {
+ break
+ }
+
+ // Initialize a new temporary file.
+ tmpFile, err := newTempFile("multiparts$-putobject")
+ if err != nil {
+ return 0, err
+ }
+
+ // Calculates MD5 and Sha256 sum while copying partSize bytes into tmpFile.
+ md5Sum, sha256Sum, size, err := c.hashCopy(tmpFile, data, partSize)
+ if err != nil {
+ if err != io.EOF {
+ return 0, err
+ }
+ }
+
+ // Save all the part metadata.
+ partMdata := partMetadata{
+ ReadCloser: tmpFile,
+ Size: size,
+ MD5Sum: md5Sum,
+ Sha256Sum: sha256Sum,
+ Number: partNumber, // Current part number to be uploaded.
+ }
+
+ // If part number already uploaded, move to the next one.
+ if isPartUploaded(objectPart{
+ ETag: hex.EncodeToString(partMdata.MD5Sum),
+ PartNumber: partNumber,
+ }, partsInfo) {
+ // Close the read closer.
+ partMdata.ReadCloser.Close()
+ continue
+ }
+
+ // execute upload part.
+ objPart, err := c.uploadPart(bucketName, objectName, uploadID, partMdata)
+ if err != nil {
+ // Close the read closer.
+ partMdata.ReadCloser.Close()
+ return totalUploadedSize, err
+ }
+
+ // Save successfully uploaded size.
+ totalUploadedSize += partMdata.Size
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partMdata.Number] = objPart
+
+ // Move to next part.
+ partNumber++
+ }
+
+ // If size is greater than zero verify totalWritten.
+ // if totalWritten is different than the input 'size', do not complete the request throw an error.
+ if size > 0 {
+ if totalUploadedSize != size {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ }
+ }
+
+ // Loop over uploaded parts to save them in a Parts array before completing the multipart request.
+ for _, part := range partsInfo {
+ var complPart completePart
+ complPart.ETag = part.ETag
+ complPart.PartNumber = part.PartNumber
+ completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(completeMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
+
+// putObject - add an object to a bucket.
+// NOTE: You must have WRITE permissions on a bucket to add an object to it.
+func (c Client) putObject(bucketName, objectName string, putObjMetadata putObjectMetadata) (ObjectStat, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return ObjectStat{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return ObjectStat{}, err
+ }
+
+ if strings.TrimSpace(putObjMetadata.ContentType) == "" {
+ putObjMetadata.ContentType = "application/octet-stream"
+ }
+
+ // Set headers.
+ customHeader := make(http.Header)
+ customHeader.Set("Content-Type", putObjMetadata.ContentType)
+
+ // Populate request metadata.
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeader,
+ contentBody: putObjMetadata.ReadCloser,
+ contentLength: putObjMetadata.Size,
+ contentSha256Bytes: putObjMetadata.Sha256Sum,
+ contentMD5Bytes: putObjMetadata.MD5Sum,
+ }
+ // Initiate new request.
+ req, err := c.newRequest("PUT", reqMetadata)
+ if err != nil {
+ return ObjectStat{}, err
+ }
+ // Execute the request.
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectStat{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ var metadata ObjectStat
+ // Trim off the odd double quotes from ETag.
+ metadata.ETag = strings.Trim(resp.Header.Get("ETag"), "\"")
+ // A success here means data was written to server successfully.
+ metadata.Size = putObjMetadata.Size
+ return metadata, nil
+}
+
+// initiateMultipartUpload initiates a multipart upload and returns an upload ID.
+func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return initiateMultipartUploadResult{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return initiateMultipartUploadResult{}, err
+ }
+
+ // Initialize url queries.
+ urlValues := make(url.Values)
+ urlValues.Set("uploads", "")
+
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+
+ // set ContentType header.
+ customHeader := make(http.Header)
+ customHeader.Set("Content-Type", contentType)
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ customHeader: customHeader,
+ }
+
+ // Instantiate the request.
+ req, err := c.newRequest("POST", reqMetadata)
+ if err != nil {
+ return initiateMultipartUploadResult{}, err
+ }
+ // Execute the request.
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return initiateMultipartUploadResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return initiateMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ // Decode xml initiate multipart.
+ initiateMultipartUploadResult := initiateMultipartUploadResult{}
+ err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
+ if err != nil {
+ return initiateMultipartUploadResult, err
+ }
+ return initiateMultipartUploadResult, nil
+}
+
+// uploadPart uploads a part in a multipart upload.
+func (c Client) uploadPart(bucketName, objectName, uploadID string, uploadingPart partMetadata) (objectPart, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return objectPart{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return objectPart{}, err
+ }
+
+ // Get resources properly escaped and lined up before using them in http request.
+ urlValues := make(url.Values)
+ // Set part number.
+ urlValues.Set("partNumber", strconv.Itoa(uploadingPart.Number))
+ // Set upload id.
+ urlValues.Set("uploadId", uploadID)
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentBody: uploadingPart.ReadCloser,
+ contentLength: uploadingPart.Size,
+ contentSha256Bytes: uploadingPart.Sha256Sum,
+ contentMD5Bytes: uploadingPart.MD5Sum,
+ }
+
+ // Instantiate a request.
+ req, err := c.newRequest("PUT", reqMetadata)
+ if err != nil {
+ return objectPart{}, err
+ }
+ // Execute the request.
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return objectPart{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return objectPart{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ // Once successfully uploaded, return completed part.
+ objPart := objectPart{}
+ objPart.PartNumber = uploadingPart.Number
+ objPart.ETag = resp.Header.Get("ETag")
+ return objPart, nil
+}
+
+// completeMultipartUpload completes a multipart upload by assembling previously uploaded parts.
+func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+
+ // Initialize url queries.
+ urlValues := make(url.Values)
+ urlValues.Set("uploadId", uploadID)
+
+ // Marshal complete multipart body.
+ completeMultipartUploadBytes, err := xml.Marshal(complete)
+ if err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+
+ // Instantiate all the complete multipart buffer.
+ completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes)
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentBody: ioutil.NopCloser(completeMultipartUploadBuffer),
+ contentLength: int64(completeMultipartUploadBuffer.Len()),
+ contentSha256Bytes: sum256(completeMultipartUploadBuffer.Bytes()),
+ }
+
+ // Instantiate the request.
+ req, err := c.newRequest("POST", reqMetadata)
+ if err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+
+ // Execute the request.
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return completeMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ // If successful response, decode the body.
+ completeMultipartUploadResult := completeMultipartUploadResult{}
+ err = xmlDecoder(resp.Body, &completeMultipartUploadResult)
+ if err != nil {
+ return completeMultipartUploadResult, err
+ }
+ return completeMultipartUploadResult, nil
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go
new file mode 100644
index 000000000..0330c9538
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-remove.go
@@ -0,0 +1,169 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net/http"
+ "net/url"
+)
+
+// RemoveBucket deletes the bucket name.
+//
+// All objects (including all object versions and delete markers).
+// in the bucket must be deleted before successfully attempting this request.
+func (c Client) RemoveBucket(bucketName string) error {
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ req, err := c.newRequest("DELETE", requestMetadata{
+ bucketName: bucketName,
+ })
+ if err != nil {
+ return err
+ }
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusNoContent {
+ return HTTPRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Remove the location from cache on a successful delete.
+ c.bucketLocCache.Delete(bucketName)
+
+ return nil
+}
+
+// RemoveObject remove an object from a bucket.
+func (c Client) RemoveObject(bucketName, objectName string) error {
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return err
+ }
+ req, err := c.newRequest("DELETE", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ })
+ if err != nil {
+ return err
+ }
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ // DeleteObject always responds with http '204' even for
+ // objects which do not exist. So no need to handle them
+ // specifically.
+ return nil
+}
+
+// RemoveIncompleteUpload aborts an partially uploaded object.
+// Requires explicit authentication, no anonymous requests are allowed for multipart API.
+func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
+ // Validate input arguments.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return err
+ }
+ errorCh := make(chan error)
+ go func(errorCh chan<- error) {
+ defer close(errorCh)
+ // Find multipart upload id of the object.
+ uploadID, err := c.findUploadID(bucketName, objectName)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ if uploadID != "" {
+ // If uploadID is not an empty string, initiate the request.
+ err := c.abortMultipartUpload(bucketName, objectName, uploadID)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ return
+ }
+ }(errorCh)
+ err, ok := <-errorCh
+ if ok && err != nil {
+ return err
+ }
+ return nil
+}
+
+// abortMultipartUpload aborts a multipart upload for the given uploadID, all parts are deleted.
+func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
+ // Validate input arguments.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return err
+ }
+
+ // Initialize url queries.
+ urlValues := make(url.Values)
+ urlValues.Set("uploadId", uploadID)
+
+ // Instantiate a new DELETE request.
+ req, err := c.newRequest("DELETE", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ })
+ if err != nil {
+ return err
+ }
+ // execute the request.
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusNoContent {
+ // Abort has no response body, handle it.
+ var errorResponse ErrorResponse
+ switch resp.StatusCode {
+ case http.StatusNotFound:
+ // This is needed specifically for Abort and it cannot be converged.
+ errorResponse = ErrorResponse{
+ Code: "NoSuchUpload",
+ Message: "The specified multipart upload does not exist.",
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ }
+ default:
+ return HTTPRespToErrorResponse(resp, bucketName, objectName)
+ }
+ return errorResponse
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/definitions.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go
similarity index 92%
rename from Godeps/_workspace/src/github.com/minio/minio-go/definitions.go
rename to Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go
index a9a69db6b..16d87a70e 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/definitions.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-s3-definitions.go
@@ -90,8 +90,8 @@ type initiator struct {
DisplayName string
}
-// partMetadata container for particular part of an object
-type partMetadata struct {
+// objectPart container for particular part of an object
+type objectPart struct {
// Part number identifies the part.
PartNumber int
@@ -103,6 +103,9 @@ type partMetadata struct {
// Size of the uploaded part data.
Size int64
+
+ // Error
+ Err error
}
// listObjectPartsResult container for ListObjectParts response.
@@ -121,7 +124,7 @@ type listObjectPartsResult struct {
// Indicates whether the returned list of parts is truncated.
IsTruncated bool
- Parts []partMetadata `xml:"Part"`
+ ObjectParts []objectPart `xml:"Part"`
EncodingType string
}
@@ -162,7 +165,9 @@ type createBucketConfiguration struct {
Location string `xml:"LocationConstraint"`
}
+// grant container for the grantee and his or her permissions.
type grant struct {
+ // grantee container for DisplayName and ID of the person being granted permissions.
Grantee struct {
ID string
DisplayName string
@@ -173,7 +178,9 @@ type grant struct {
Permission string
}
+// accessControlPolicy contains the elements providing ACL permissions for a bucket.
type accessControlPolicy struct {
+ // accessControlList container for ACL information.
AccessControlList struct {
Grant []grant
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go b/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go
new file mode 100644
index 000000000..29bd83fd9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api-stat.go
@@ -0,0 +1,113 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// BucketExists verify if bucket exists and you have permission to access it.
+func (c Client) BucketExists(bucketName string) error {
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ req, err := c.newRequest("HEAD", requestMetadata{
+ bucketName: bucketName,
+ })
+ if err != nil {
+ return err
+ }
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return HTTPRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+// StatObject verifies if object exists and you have permission to access.
+func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) {
+ if err := isValidBucketName(bucketName); err != nil {
+ return ObjectStat{}, err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return ObjectStat{}, err
+ }
+ // Instantiate a new request.
+ req, err := c.newRequest("HEAD", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ })
+ if err != nil {
+ return ObjectStat{}, err
+ }
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectStat{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ md5sum := strings.Trim(resp.Header.Get("ETag"), "\"") // trim off the odd double quotes
+ size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ return ObjectStat{}, ErrorResponse{
+ Code: "InternalError",
+ Message: "Content-Length is invalid. " + reportIssue,
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ }
+ }
+ date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
+ if err != nil {
+ return ObjectStat{}, ErrorResponse{
+ Code: "InternalError",
+ Message: "Last-Modified time format is invalid. " + reportIssue,
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ }
+ }
+ contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+ // Save object metadata info.
+ var objectStat ObjectStat
+ objectStat.ETag = md5sum
+ objectStat.Key = objectName
+ objectStat.Size = size
+ objectStat.LastModified = date
+ objectStat.ContentType = contentType
+ return objectStat, nil
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api.go b/Godeps/_workspace/src/github.com/minio/minio-go/api.go
index 27ad4ca94..788a74d4d 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/api.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api.go
@@ -17,1152 +17,331 @@
package minio
import (
+ "encoding/base64"
"encoding/hex"
- "errors"
"io"
"net/http"
"net/url"
- "path/filepath"
"runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
"time"
)
-// API - Cloud Storage API interface
-type API interface {
- // Bucket Read/Write/Stat operations
- BucketAPI
+// Client implements Amazon S3 compatible methods.
+type Client struct {
+ /// Standard options.
+ accessKeyID string // AccessKeyID required for authorized requests.
+ secretAccessKey string // SecretAccessKey required for authorized requests.
+ signature SignatureType // Choose a signature type if necessary.
+ anonymous bool // Set to 'true' if Client has no access and secret keys.
- // Object Read/Write/Stat operations
- ObjectAPI
-
- // Presigned API
- PresignedAPI
-}
-
-// BucketAPI - bucket specific Read/Write/Stat interface
-type BucketAPI interface {
- MakeBucket(bucket string, cannedACL BucketACL) error
- BucketExists(bucket string) error
- RemoveBucket(bucket string) error
- SetBucketACL(bucket string, cannedACL BucketACL) error
- GetBucketACL(bucket string) (BucketACL, error)
-
- ListBuckets() <-chan BucketStatCh
- ListObjects(bucket, prefix string, recursive bool) <-chan ObjectStatCh
- ListIncompleteUploads(bucket, prefix string, recursive bool) <-chan ObjectMultipartStatCh
-}
-
-// ObjectAPI - object specific Read/Write/Stat interface
-type ObjectAPI interface {
- GetObject(bucket, object string) (io.ReadCloser, ObjectStat, error)
- GetPartialObject(bucket, object string, offset, length int64) (io.ReadCloser, ObjectStat, error)
- PutObject(bucket, object, contentType string, size int64, data io.Reader) error
- StatObject(bucket, object string) (ObjectStat, error)
- RemoveObject(bucket, object string) error
-
- RemoveIncompleteUpload(bucket, object string) <-chan error
-}
-
-// PresignedAPI - object specific for now
-type PresignedAPI interface {
- PresignedGetObject(bucket, object string, expires time.Duration) (string, error)
- PresignedPutObject(bucket, object string, expires time.Duration) (string, error)
- PresignedPostPolicy(*PostPolicy) (map[string]string, error)
-}
-
-// BucketStatCh - bucket metadata over read channel
-type BucketStatCh struct {
- Stat BucketStat
- Err error
-}
-
-// ObjectStatCh - object metadata over read channel
-type ObjectStatCh struct {
- Stat ObjectStat
- Err error
-}
-
-// ObjectMultipartStatCh - multipart object metadata over read channel
-type ObjectMultipartStatCh struct {
- Stat ObjectMultipartStat
- Err error
-}
-
-// BucketStat container for bucket metadata
-type BucketStat struct {
- // The name of the bucket.
- Name string
- // Date the bucket was created.
- CreationDate time.Time
-}
-
-// ObjectStat container for object metadata
-type ObjectStat struct {
- ETag string
- Key string
- LastModified time.Time
- Size int64
- ContentType string
-
- Owner struct {
- DisplayName string
- ID string
+ // User supplied.
+ appInfo struct {
+ appName string
+ appVersion string
}
+ endpointURL *url.URL
- // The class of storage used to store the object.
- StorageClass string
+ // Needs allocation.
+ httpClient *http.Client
+ bucketLocCache *bucketLocationCache
}
-// ObjectMultipartStat container for multipart object metadata
-type ObjectMultipartStat struct {
- // Date and time at which the multipart upload was initiated.
- Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
-
- Initiator initiator
- Owner owner
-
- StorageClass string
-
- // Key of the object for which the multipart upload was initiated.
- Key string
- Size int64
-
- // Upload ID that identifies the multipart upload.
- UploadID string `xml:"UploadId"`
-}
-
-// Regions s3 region map used by bucket location constraint
-var regions = map[string]string{
- "s3-fips-us-gov-west-1.amazonaws.com": "us-gov-west-1",
- "s3.amazonaws.com": "us-east-1",
- "s3-external-1.amazonaws.com": "us-east-1",
- "s3-us-west-1.amazonaws.com": "us-west-1",
- "s3-us-west-2.amazonaws.com": "us-west-2",
- "s3-eu-west-1.amazonaws.com": "eu-west-1",
- "s3-eu-central-1.amazonaws.com": "eu-central-1",
- "s3-ap-southeast-1.amazonaws.com": "ap-southeast-1",
- "s3-ap-southeast-2.amazonaws.com": "ap-southeast-2",
- "s3-ap-northeast-1.amazonaws.com": "ap-northeast-1",
- "s3-sa-east-1.amazonaws.com": "sa-east-1",
- "s3.cn-north-1.amazonaws.com.cn": "cn-north-1",
-
- // Add google cloud storage as one of the regions
- "storage.googleapis.com": "google",
-}
-
-// getRegion returns a region based on its endpoint mapping.
-func getRegion(host string) (region string) {
- if _, ok := regions[host]; ok {
- return regions[host]
- }
- // Region cannot be empty according to Amazon S3.
- // So we address all the four quadrants of our galaxy.
- return "milkyway"
-}
-
-// SignatureType is type of signature to be used for a request
-type SignatureType int
-
-// Different types of supported signatures - default is Latest i.e SignatureV4
+// Global constants.
const (
- Latest SignatureType = iota
- SignatureV4
- SignatureV2
+ libraryName = "minio-go"
+ libraryVersion = "0.2.5"
)
-// isV2 - is signature SignatureV2?
-func (s SignatureType) isV2() bool {
- return s == SignatureV2
+// User Agent should always following the below style.
+// Please open an issue to discuss any new changes here.
+//
+// Minio (OS; ARCH) LIB/VER APP/VER
+const (
+ libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
+ libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
+)
+
+// NewV2 - instantiate minio client with Amazon S3 signature version '2' compatiblity.
+func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
+ clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
+ if err != nil {
+ return nil, err
+ }
+ // Set to use signature version '2'.
+ clnt.signature = SignatureV2
+ return clnt, nil
}
-// isV4 - is signature SignatureV4?
-func (s SignatureType) isV4() bool {
- return s == SignatureV4
+// NewV4 - instantiate minio client with Amazon S3 signature version '4' compatibility.
+func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
+ clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
+ if err != nil {
+ return nil, err
+ }
+ // Set to use signature version '4'.
+ clnt.signature = SignatureV4
+ return clnt, nil
}
-// isLatest - is signature Latest?
-func (s SignatureType) isLatest() bool {
- return s == Latest
+// New - instantiate minio client Client, adds automatic verification of signature.
+func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
+ clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
+ if err != nil {
+ return nil, err
+ }
+ // Google cloud storage should be set to signature V2, force it if not.
+ if isGoogleEndpoint(clnt.endpointURL) {
+ clnt.signature = SignatureV2
+ }
+ // If Amazon S3 set to signature v2.
+ if isAmazonEndpoint(clnt.endpointURL) {
+ clnt.signature = SignatureV4
+ }
+ return clnt, nil
}
-// Config - main configuration struct used by all to set endpoint, credentials, and other options for requests.
-type Config struct {
- // Standard options
- AccessKeyID string
- SecretAccessKey string
- Endpoint string
- Signature SignatureType
+func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
+ // construct endpoint.
+ endpointURL, err := getEndpointURL(endpoint, insecure)
+ if err != nil {
+ return nil, err
+ }
- // Advanced options
- // Specify this to get server response in non XML style if server supports it
- AcceptType string
- // Optional field. If empty, region is determined automatically.
- Region string
+ // instantiate new Client.
+ clnt := new(Client)
+ clnt.accessKeyID = accessKeyID
+ clnt.secretAccessKey = secretAccessKey
+ if clnt.accessKeyID == "" || clnt.secretAccessKey == "" {
+ clnt.anonymous = true
+ }
- // Expert options
- //
- // Set this to override default transport ``http.DefaultTransport``
+ // Save endpoint URL, user agent for future uses.
+ clnt.endpointURL = endpointURL
+
+ // Instantiate http client and bucket location cache.
+ clnt.httpClient = &http.Client{}
+ clnt.bucketLocCache = newBucketLocationCache()
+
+ // Return.
+ return clnt, nil
+}
+
+// SetAppInfo - add application details to user agent.
+func (c *Client) SetAppInfo(appName string, appVersion string) {
+ // if app name and version is not set, we do not a new user agent.
+ if appName != "" && appVersion != "" {
+ c.appInfo = struct {
+ appName string
+ appVersion string
+ }{}
+ c.appInfo.appName = appName
+ c.appInfo.appVersion = appVersion
+ }
+}
+
+// SetCustomTransport - set new custom transport.
+func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
+ // Set this to override default transport ``http.DefaultTransport``.
//
// This transport is usually needed for debugging OR to add your own
// custom TLS certificates on the client transport, for custom CA's and
- // certs which are not part of standard certificate authority
+ // certs which are not part of standard certificate authority follow this
+ // example :-
//
- // For example :-
+ // tr := &http.Transport{
+ // TLSClientConfig: &tls.Config{RootCAs: pool},
+ // DisableCompression: true,
+ // }
+ // api.SetTransport(tr)
//
- // tr := &http.Transport{
- // TLSClientConfig: &tls.Config{RootCAs: pool},
- // DisableCompression: true,
- // }
- //
- Transport http.RoundTripper
-
- // internal
- // use SetUserAgent append to default, useful when minio-go is used with in your application
- userAgent string
- isUserAgentSet bool // allow user agent's to be set only once
- isVirtualStyle bool // set when virtual hostnames are on
-}
-
-// Global constants
-const (
- LibraryName = "minio-go"
- LibraryVersion = "0.2.5"
-)
-
-// SetUserAgent - append to a default user agent
-func (c *Config) SetUserAgent(name string, version string, comments ...string) {
- if c.isUserAgentSet {
- // if user agent already set do not set it
- return
- }
- // if no name and version is set we do not add new user agents
- if name != "" && version != "" {
- c.userAgent = c.userAgent + " " + name + "/" + version + " (" + strings.Join(comments, "; ") + ") "
- c.isUserAgentSet = true
+ if c.httpClient != nil {
+ c.httpClient.Transport = customHTTPTransport
}
}
-type api struct {
- apiCore
+// requestMetadata - is container for all the values to make a request.
+type requestMetadata struct {
+ // If set newRequest presigns the URL.
+ presignURL bool
+
+ // User supplied.
+ bucketName string
+ objectName string
+ queryValues url.Values
+ customHeader http.Header
+ expires int64
+
+ // Generated by our internal code.
+ contentBody io.ReadCloser
+ contentLength int64
+ contentSha256Bytes []byte
+ contentMD5Bytes []byte
}
-// New - instantiate a new minio api client
-func New(config Config) (API, error) {
- if strings.TrimSpace(config.Region) == "" || len(config.Region) == 0 {
- u, err := url.Parse(config.Endpoint)
+func (c Client) newRequest(method string, metadata requestMetadata) (*http.Request, error) {
+ // If no method is supplied default to 'POST'.
+ if method == "" {
+ method = "POST"
+ }
+
+ // construct a new target URL.
+ targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.queryValues)
+ if err != nil {
+ return nil, err
+ }
+
+ // get a new HTTP request for the method.
+ req, err := http.NewRequest(method, targetURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Gather location only if bucketName is present.
+ location := "us-east-1" // Default all other requests to "us-east-1".
+ if metadata.bucketName != "" {
+ location, err = c.getBucketLocation(metadata.bucketName)
if err != nil {
- return api{}, err
- }
- match, _ := filepath.Match("*.s3*.amazonaws.com", u.Host)
- if match {
- config.isVirtualStyle = true
- hostSplits := strings.SplitN(u.Host, ".", 2)
- u.Host = hostSplits[1]
- }
- matchGoogle, _ := filepath.Match("*.storage.googleapis.com", u.Host)
- if matchGoogle {
- config.isVirtualStyle = true
- hostSplits := strings.SplitN(u.Host, ".", 2)
- u.Host = hostSplits[1]
- }
- config.Region = getRegion(u.Host)
- if config.Region == "google" {
- // Google cloud storage is signature V2
- config.Signature = SignatureV2
+ return nil, err
}
}
- config.SetUserAgent(LibraryName, LibraryVersion, runtime.GOOS, runtime.GOARCH)
- config.isUserAgentSet = false // default
- return api{apiCore{&config}}, nil
-}
-// PresignedPostPolicy return POST form data that can be used for object upload
-func (a api) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
- if p.expiration.IsZero() {
- return nil, errors.New("Expiration time must be specified")
- }
- if _, ok := p.formData["key"]; !ok {
- return nil, errors.New("object key must be specified")
- }
- if _, ok := p.formData["bucket"]; !ok {
- return nil, errors.New("bucket name must be specified")
- }
- return a.presignedPostPolicy(p), nil
-}
-
-/// Object operations
-
-/// Expires maximum is 7days - ie. 604800 and minimum is 1
-
-// PresignedPutObject get a presigned URL to upload an object
-func (a api) PresignedPutObject(bucket, object string, expires time.Duration) (string, error) {
- expireSeconds := int64(expires / time.Second)
- if expireSeconds < 1 || expireSeconds > 604800 {
- return "", invalidArgumentError("")
- }
- return a.presignedPutObject(bucket, object, expireSeconds)
-}
-
-// PresignedGetObject get a presigned URL to retrieve an object for third party apps
-func (a api) PresignedGetObject(bucket, object string, expires time.Duration) (string, error) {
- expireSeconds := int64(expires / time.Second)
- if expireSeconds < 1 || expireSeconds > 604800 {
- return "", invalidArgumentError("")
- }
- return a.presignedGetObject(bucket, object, expireSeconds, 0, 0)
-}
-
-// GetObject retrieve object
-
-// Downloads full object with no ranges, if you need ranges use GetPartialObject
-func (a api) GetObject(bucket, object string) (io.ReadCloser, ObjectStat, error) {
- if err := invalidBucketError(bucket); err != nil {
- return nil, ObjectStat{}, err
- }
- if err := invalidObjectError(object); err != nil {
- return nil, ObjectStat{}, err
- }
- // get object
- return a.getObject(bucket, object, 0, 0)
-}
-
-// GetPartialObject retrieve partial object
-//
-// Takes range arguments to download the specified range bytes of an object.
-// Setting offset and length = 0 will download the full object.
-// For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
-func (a api) GetPartialObject(bucket, object string, offset, length int64) (io.ReadCloser, ObjectStat, error) {
- if err := invalidBucketError(bucket); err != nil {
- return nil, ObjectStat{}, err
- }
- if err := invalidObjectError(object); err != nil {
- return nil, ObjectStat{}, err
- }
- // get partial object
- return a.getObject(bucket, object, offset, length)
-}
-
-// completedParts is a wrapper to make parts sortable by their part number
-// multi part completion requires list of multi parts to be sorted
-type completedParts []completePart
-
-func (a completedParts) Len() int { return len(a) }
-func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
-
-// minimumPartSize minimum part size per object after which PutObject behaves internally as multipart
-var minimumPartSize int64 = 1024 * 1024 * 5
-
-// maxParts - unexported right now
-var maxParts = int64(10000)
-
-// maxPartSize - unexported right now
-var maxPartSize int64 = 1024 * 1024 * 1024 * 5
-
-// maxConcurrentQueue - max concurrent upload queue
-var maxConcurrentQueue int64 = 4
-
-// calculatePartSize - calculate the optimal part size for the given objectSize
-//
-// NOTE: Assumption here is that for any given object upload to a S3 compatible object
-// storage it will have the following parameters as constants
-//
-// maxParts
-// maximumPartSize
-// minimumPartSize
-//
-// if a the partSize after division with maxParts is greater than minimumPartSize
-// then choose that to be the new part size, if not return MinimumPartSize
-//
-// special case where it happens to be that partSize is indeed bigger than the
-// maximum part size just return maxPartSize back
-func calculatePartSize(objectSize int64) int64 {
- // make sure last part has enough buffer and handle this poperly
- partSize := (objectSize / (maxParts - 1))
- if partSize > minimumPartSize {
- if partSize > maxPartSize {
- return maxPartSize
+ // If presigned request, return quickly.
+ if metadata.expires != 0 {
+ if c.anonymous {
+ return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.")
}
- return partSize
+ if c.signature.isV2() {
+ // Presign URL with signature v2.
+ req = PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
+ } else {
+ // Presign URL with signature v4.
+ req = PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
+ }
+ return req, nil
}
- return minimumPartSize
+
+ // Set content body if available.
+ if metadata.contentBody != nil {
+ req.Body = metadata.contentBody
+ }
+
+ // set UserAgent for the request.
+ c.setUserAgent(req)
+
+ // Set all headers.
+ for k, v := range metadata.customHeader {
+ req.Header.Set(k, v[0])
+ }
+
+ // set incoming content-length.
+ if metadata.contentLength > 0 {
+ req.ContentLength = metadata.contentLength
+ }
+
+ // Set sha256 sum only for non anonymous credentials.
+ if !c.anonymous {
+ // set sha256 sum for signature calculation only with signature version '4'.
+ if c.signature.isV4() {
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
+ if metadata.contentSha256Bytes != nil {
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSha256Bytes))
+ }
+ }
+ }
+
+ // set md5Sum for content protection.
+ if metadata.contentMD5Bytes != nil {
+ req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
+ }
+
+ // Sign the request if not anonymous.
+ if !c.anonymous {
+ if c.signature.isV2() {
+ // Add signature version '2' authorization header.
+ req = SignV2(*req, c.accessKeyID, c.secretAccessKey)
+ } else if c.signature.isV4() {
+ // Add signature version '4' authorization header.
+ req = SignV4(*req, c.accessKeyID, c.secretAccessKey, location)
+ }
+ }
+ // return request.
+ return req, nil
}
-func (a api) newObjectUpload(bucket, object, contentType string, size int64, data io.Reader) error {
- initMultipartUploadResult, err := a.initiateMultipartUpload(bucket, object)
+func (c Client) setUserAgent(req *http.Request) {
+ req.Header.Set("User-Agent", libraryUserAgent)
+ if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
+ req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
+ }
+}
+
+func (c Client) makeTargetURL(bucketName, objectName string, queryValues url.Values) (*url.URL, error) {
+ urlStr := c.endpointURL.Scheme + "://" + c.endpointURL.Host + "/"
+ // Make URL only if bucketName is available, otherwise use the endpoint URL.
+ if bucketName != "" {
+ // If endpoint supports virtual host style use that always.
+ // Currently only S3 and Google Cloud Storage would support this.
+ if isVirtualHostSupported(c.endpointURL) {
+ urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + c.endpointURL.Host + "/"
+ if objectName != "" {
+ urlStr = urlStr + urlEncodePath(objectName)
+ }
+ } else {
+ // If not fall back to using path style.
+ urlStr = urlStr + bucketName
+ if objectName != "" {
+ urlStr = urlStr + "/" + urlEncodePath(objectName)
+ }
+ }
+ }
+ // If there are any query values, add them to the end.
+ if len(queryValues) > 0 {
+ urlStr = urlStr + "?" + queryValues.Encode()
+ }
+ u, err := url.Parse(urlStr)
if err != nil {
- return err
+ return nil, err
}
- uploadID := initMultipartUploadResult.UploadID
- complMultipartUpload := completeMultipartUpload{}
- var totalLength int64
- // Calculate optimal part size for a given size
- partSize := calculatePartSize(size)
- // Allocate bufferred error channel for maximum parts
- errCh := make(chan error, maxParts)
- // Limit multi part queue size to concurrent
- mpQueueCh := make(chan struct{}, maxConcurrentQueue)
- defer close(errCh)
- defer close(mpQueueCh)
- // Allocate a new wait group
- wg := new(sync.WaitGroup)
-
- for p := range chopper(data, partSize, nil) {
- // This check is primarily for last part
- // This verifies if the part.Len was an unexpected read i.e if we lost few bytes
- if p.Len < partSize && size > 0 {
- expectedPartLen := size - totalLength
- if expectedPartLen != p.Len {
- return ErrorResponse{
- Code: "UnexpectedShortRead",
- Message: "Data read ‘" + strconv.FormatInt(expectedPartLen, 10) + "’ is not equal to expected size ‘" + strconv.FormatInt(p.Len, 10) + "’",
- Resource: separator + bucket + separator + object,
- }
- }
- }
- // Limit to 4 parts a given time
- mpQueueCh <- struct{}{}
- // Account for all parts uploaded simultaneousy
- wg.Add(1)
- go func(errCh chan<- error, mpQueueCh <-chan struct{}, p part) {
- defer wg.Done()
- defer func() {
- <-mpQueueCh
- }()
- if p.Err != nil {
- errCh <- p.Err
- return
- }
- var complPart completePart
- complPart, err = a.uploadPart(bucket, object, uploadID, p.MD5Sum, p.Num, p.Len, p.ReadSeeker)
- if err != nil {
- errCh <- err
- return
- }
- complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart)
- errCh <- nil
- }(errCh, mpQueueCh, p)
- totalLength += p.Len
- }
- wg.Wait()
- if err := <-errCh; err != nil {
- return err
- }
- sort.Sort(completedParts(complMultipartUpload.Parts))
- _, err = a.completeMultipartUpload(bucket, object, uploadID, complMultipartUpload)
- if err != nil {
- return err
- }
- return nil
+ return u, nil
}
-type partCh struct {
- Metadata partMetadata
- Err error
-}
-
-func (a api) listObjectPartsRecursive(bucket, object, uploadID string) <-chan partCh {
- partCh := make(chan partCh, 1000)
- go a.listObjectPartsRecursiveInRoutine(bucket, object, uploadID, partCh)
- return partCh
-}
-
-func (a api) listObjectPartsRecursiveInRoutine(bucket, object, uploadID string, ch chan partCh) {
- defer close(ch)
- listObjPartsResult, err := a.listObjectParts(bucket, object, uploadID, 0, 1000)
- if err != nil {
- ch <- partCh{
- Metadata: partMetadata{},
- Err: err,
- }
- return
- }
- for _, uploadedPart := range listObjPartsResult.Parts {
- ch <- partCh{
- Metadata: uploadedPart,
- Err: nil,
- }
- }
- for {
- if !listObjPartsResult.IsTruncated {
- break
- }
- listObjPartsResult, err = a.listObjectParts(bucket, object, uploadID, listObjPartsResult.NextPartNumberMarker, 1000)
- if err != nil {
- ch <- partCh{
- Metadata: partMetadata{},
- Err: err,
- }
- return
- }
- for _, uploadedPart := range listObjPartsResult.Parts {
- ch <- partCh{
- Metadata: uploadedPart,
- Err: nil,
- }
- }
- }
-}
-
-func (a api) getMultipartSize(bucket, object, uploadID string) (int64, error) {
- var size int64
- for part := range a.listObjectPartsRecursive(bucket, object, uploadID) {
- if part.Err != nil {
- return 0, part.Err
- }
- size += part.Metadata.Size
- }
- return size, nil
-}
-
-func (a api) continueObjectUpload(bucket, object, uploadID string, size int64, data io.Reader) error {
- var skipParts []skipPart
- completeMultipartUpload := completeMultipartUpload{}
- var totalLength int64
- for part := range a.listObjectPartsRecursive(bucket, object, uploadID) {
- if part.Err != nil {
- return part.Err
- }
- var completedPart completePart
- completedPart.PartNumber = part.Metadata.PartNumber
- completedPart.ETag = part.Metadata.ETag
- completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, completedPart)
- md5SumBytes, err := hex.DecodeString(strings.Trim(part.Metadata.ETag, "\"")) // trim off the odd double quotes
- if err != nil {
- return err
- }
- totalLength += part.Metadata.Size
- skipParts = append(skipParts, skipPart{
- md5sum: md5SumBytes,
- partNumber: part.Metadata.PartNumber,
- })
- }
-
- // Calculate the optimal part size for a given size
- partSize := calculatePartSize(size)
- // Allocate bufferred error channel for maximum parts
- errCh := make(chan error, maxParts)
- // Limit multipart queue size to concurrent
- mpQueueCh := make(chan struct{}, maxConcurrentQueue)
- defer close(errCh)
- defer close(mpQueueCh)
- // Allocate a new wait group
- wg := new(sync.WaitGroup)
-
- for p := range chopper(data, partSize, skipParts) {
- // This check is primarily for last part
- // This verifies if the part.Len was an unexpected read i.e if we lost few bytes
- if p.Len < partSize && size > 0 {
- expectedPartLen := size - totalLength
- if expectedPartLen != p.Len {
- return ErrorResponse{
- Code: "UnexpectedShortRead",
- Message: "Data read ‘" + strconv.FormatInt(expectedPartLen, 10) + "’ is not equal to expected size ‘" + strconv.FormatInt(p.Len, 10) + "’",
- Resource: separator + bucket + separator + object,
- }
- }
- }
- // Limit to 4 parts a given time
- mpQueueCh <- struct{}{}
- // Account for all parts uploaded simultaneousy
- wg.Add(1)
- go func(errCh chan<- error, mpQueueCh <-chan struct{}, p part) {
- defer wg.Done()
- defer func() {
- <-mpQueueCh
- }()
- if p.Err != nil {
- errCh <- p.Err
- return
- }
- completedPart, err := a.uploadPart(bucket, object, uploadID, p.MD5Sum, p.Num, p.Len, p.ReadSeeker)
- if err != nil {
- errCh <- err
- return
- }
- completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, completedPart)
- errCh <- nil
- }(errCh, mpQueueCh, p)
- totalLength += p.Len
- }
- wg.Wait()
- if err := <-errCh; err != nil {
- return err
- }
- sort.Sort(completedParts(completeMultipartUpload.Parts))
- _, err := a.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload)
- if err != nil {
- return err
- }
- return nil
-}
-
-// PutObject create an object in a bucket
-//
-// You must have WRITE permissions on a bucket to create an object
-//
-// This version of PutObject automatically does multipart for more than 5MB worth of data
-func (a api) PutObject(bucket, object, contentType string, size int64, data io.Reader) error {
- if err := invalidBucketError(bucket); err != nil {
- return err
- }
- if err := invalidArgumentError(object); err != nil {
- return err
- }
- // for un-authenticated requests do not initiated multipart operation
- //
- // NOTE: this behavior is only kept valid for S3, since S3 doesn't
- // allow unauthenticated multipart requests
- if a.config.Region != "milkyway" {
- if a.config.AccessKeyID == "" || a.config.SecretAccessKey == "" {
- _, err := a.putObjectUnAuthenticated(bucket, object, contentType, size, data)
- if err != nil {
- return err
- }
- return nil
- }
- }
- // Special handling just for Google Cloud Storage.
- // TODO - we should remove this in future when we fully implement Resumable object upload.
- if a.config.Region == "google" {
- if size > maxPartSize {
- return ErrorResponse{
- Code: "EntityTooLarge",
- Message: "Your proposed upload exceeds the maximum allowed object size.",
- Resource: separator + bucket + separator + object,
- }
- }
- if _, err := a.putObject(bucket, object, contentType, nil, size, ReadSeekCloser(data)); err != nil {
- return err
- }
- return nil
- }
- switch {
- case size < minimumPartSize && size > 0:
- // Single Part use case, use PutObject directly
- for part := range chopper(data, minimumPartSize, nil) {
- if part.Err != nil {
- return part.Err
- }
- // This verifies if the part.Len was an unexpected read i.e if we lost few bytes
- if part.Len != size {
- return ErrorResponse{
- Code: "MethodUnexpectedEOF",
- Message: "Data read is less than the requested size",
- Resource: separator + bucket + separator + object,
- }
- }
- _, err := a.putObject(bucket, object, contentType, part.MD5Sum, part.Len, part.ReadSeeker)
- if err != nil {
- return err
- }
- return nil
- }
- default:
- var inProgress bool
- var inProgressUploadID string
- for mpUpload := range a.listMultipartUploadsRecursive(bucket, object) {
- if mpUpload.Err != nil {
- return mpUpload.Err
- }
- if mpUpload.Metadata.Key == object {
- inProgress = true
- inProgressUploadID = mpUpload.Metadata.UploadID
- break
- }
- }
- if !inProgress {
- return a.newObjectUpload(bucket, object, contentType, size, data)
- }
- return a.continueObjectUpload(bucket, object, inProgressUploadID, size, data)
- }
- return errors.New("Unexpected control flow, please report this error at https://github.com/minio/minio-go/issues")
-}
-
-// StatObject verify if object exists and you have permission to access it
-func (a api) StatObject(bucket, object string) (ObjectStat, error) {
- if err := invalidBucketError(bucket); err != nil {
- return ObjectStat{}, err
- }
- if err := invalidObjectError(object); err != nil {
- return ObjectStat{}, err
- }
- return a.headObject(bucket, object)
-}
-
-// RemoveObject remove the object from a bucket
-func (a api) RemoveObject(bucket, object string) error {
- if err := invalidBucketError(bucket); err != nil {
- return err
- }
- if err := invalidObjectError(object); err != nil {
- return err
- }
- return a.deleteObject(bucket, object)
-}
-
-/// Bucket operations
-
-// MakeBucket make a new bucket
-//
-// optional arguments are acl and location - by default all buckets are created
-// with ``private`` acl and location set to US Standard if one wishes to set
-// different ACLs and Location one can set them properly.
-//
-// ACL valid values
-//
-// private - owner gets full access [default]
-// public-read - owner gets full access, all others get read access
-// public-read-write - owner gets full access, all others get full access too
-// authenticated-read - owner gets full access, authenticated users get read access
-//
-// Location valid values which are automatically derived from config endpoint
-//
-// [ us-west-1 | us-west-2 | eu-west-1 | eu-central-1 | ap-southeast-1 | ap-northeast-1 | ap-southeast-2 | sa-east-1 ]
-// Default - US standard
-func (a api) MakeBucket(bucket string, acl BucketACL) error {
- if err := invalidBucketError(bucket); err != nil {
- return err
- }
- if !acl.isValidBucketACL() {
- return invalidArgumentError("")
- }
- location := a.config.Region
- if location == "milkyway" {
- location = ""
- }
- if location == "us-east-1" {
- location = ""
- }
- return a.putBucket(bucket, string(acl), location)
-}
-
-// SetBucketACL set the permissions on an existing bucket using access control lists (ACL)
-//
-// For example
-//
-// private - owner gets full access [default]
-// public-read - owner gets full access, all others get read access
-// public-read-write - owner gets full access, all others get full access too
-// authenticated-read - owner gets full access, authenticated users get read access
-//
-func (a api) SetBucketACL(bucket string, acl BucketACL) error {
- if err := invalidBucketError(bucket); err != nil {
- return err
- }
- if !acl.isValidBucketACL() {
- return invalidArgumentError("")
- }
- return a.putBucketACL(bucket, string(acl))
-}
-
-// GetBucketACL get the permissions on an existing bucket
-//
-// Returned values are:
-//
-// private - owner gets full access
-// public-read - owner gets full access, others get read access
-// public-read-write - owner gets full access, others get full access too
-// authenticated-read - owner gets full access, authenticated users get read access
-//
-func (a api) GetBucketACL(bucket string) (BucketACL, error) {
- if err := invalidBucketError(bucket); err != nil {
- return "", err
- }
- policy, err := a.getBucketACL(bucket)
- if err != nil {
- return "", err
- }
- grants := policy.AccessControlList.Grant
- switch {
- case len(grants) == 1:
- if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" {
- return BucketACL("private"), nil
- }
- case len(grants) == 2:
- for _, g := range grants {
- if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
- return BucketACL("authenticated-read"), nil
- }
- if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
- return BucketACL("public-read"), nil
- }
- }
- case len(grants) == 3:
- for _, g := range grants {
- if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
- return BucketACL("public-read-write"), nil
- }
- }
- }
- return "", ErrorResponse{
- Code: "NoSuchBucketPolicy",
- Message: "The specified bucket does not have a bucket policy.",
- Resource: "/" + bucket,
- RequestID: "minio",
- }
-}
-
-// BucketExists verify if bucket exists and you have permission to access it
-func (a api) BucketExists(bucket string) error {
- if err := invalidBucketError(bucket); err != nil {
- return err
- }
- return a.headBucket(bucket)
-}
-
-// RemoveBucket deletes the bucket named in the URI
-// NOTE: -
-// All objects (including all object versions and delete markers)
-// in the bucket must be deleted before successfully attempting this request
-func (a api) RemoveBucket(bucket string) error {
- if err := invalidBucketError(bucket); err != nil {
- return err
- }
- return a.deleteBucket(bucket)
-}
-
-type multiPartUploadCh struct {
- Metadata ObjectMultipartStat
- Err error
-}
-
-func (a api) listMultipartUploadsRecursive(bucket, object string) <-chan multiPartUploadCh {
- ch := make(chan multiPartUploadCh, 1000)
- go a.listMultipartUploadsRecursiveInRoutine(bucket, object, ch)
- return ch
-}
-
-func (a api) listMultipartUploadsRecursiveInRoutine(bucket, object string, ch chan multiPartUploadCh) {
- defer close(ch)
- listMultipartUplResult, err := a.listMultipartUploads(bucket, "", "", object, "", 1000)
- if err != nil {
- ch <- multiPartUploadCh{
- Metadata: ObjectMultipartStat{},
- Err: err,
- }
- return
- }
- for _, multiPartUpload := range listMultipartUplResult.Uploads {
- ch <- multiPartUploadCh{
- Metadata: multiPartUpload,
- Err: nil,
- }
- }
- for {
- if !listMultipartUplResult.IsTruncated {
- break
- }
- listMultipartUplResult, err = a.listMultipartUploads(bucket,
- listMultipartUplResult.NextKeyMarker, listMultipartUplResult.NextUploadIDMarker, object, "", 1000)
- if err != nil {
- ch <- multiPartUploadCh{
- Metadata: ObjectMultipartStat{},
- Err: err,
- }
- return
- }
- for _, multiPartUpload := range listMultipartUplResult.Uploads {
- ch <- multiPartUploadCh{
- Metadata: multiPartUpload,
- Err: nil,
- }
- }
- }
-}
-
-// listIncompleteUploadsInRoutine is an internal goroutine function called for listing objects
-// This function feeds data into channel
-func (a api) listIncompleteUploadsInRoutine(bucket, prefix string, recursive bool, ch chan ObjectMultipartStatCh) {
- defer close(ch)
- if err := invalidBucketError(bucket); err != nil {
- ch <- ObjectMultipartStatCh{
- Stat: ObjectMultipartStat{},
- Err: err,
- }
- return
- }
- switch {
- case recursive == true:
- var multipartMarker string
- var uploadIDMarker string
- for {
- result, err := a.listMultipartUploads(bucket, multipartMarker, uploadIDMarker, prefix, "", 1000)
- if err != nil {
- ch <- ObjectMultipartStatCh{
- Stat: ObjectMultipartStat{},
- Err: err,
- }
- return
- }
- for _, objectSt := range result.Uploads {
- objectSt.Size, err = a.getMultipartSize(bucket, objectSt.Key, objectSt.UploadID)
- if err != nil {
- ch <- ObjectMultipartStatCh{
- Stat: ObjectMultipartStat{},
- Err: err,
- }
- }
- ch <- ObjectMultipartStatCh{
- Stat: objectSt,
- Err: nil,
- }
- multipartMarker = result.NextKeyMarker
- uploadIDMarker = result.NextUploadIDMarker
- }
- if !result.IsTruncated {
- break
- }
- }
- default:
- var multipartMarker string
- var uploadIDMarker string
- for {
- result, err := a.listMultipartUploads(bucket, multipartMarker, uploadIDMarker, prefix, "/", 1000)
- if err != nil {
- ch <- ObjectMultipartStatCh{
- Stat: ObjectMultipartStat{},
- Err: err,
- }
- return
- }
- multipartMarker = result.NextKeyMarker
- uploadIDMarker = result.NextUploadIDMarker
- for _, objectSt := range result.Uploads {
- objectSt.Size, err = a.getMultipartSize(bucket, objectSt.Key, objectSt.UploadID)
- if err != nil {
- ch <- ObjectMultipartStatCh{
- Stat: ObjectMultipartStat{},
- Err: err,
- }
- }
- ch <- ObjectMultipartStatCh{
- Stat: objectSt,
- Err: nil,
- }
- }
- for _, prefix := range result.CommonPrefixes {
- object := ObjectMultipartStat{}
- object.Key = prefix.Prefix
- object.Size = 0
- ch <- ObjectMultipartStatCh{
- Stat: object,
- Err: nil,
- }
- }
- if !result.IsTruncated {
- break
- }
- }
- }
-}
-
-// ListIncompleteUploads - (List incompletely uploaded multipart objects) - List some multipart objects or all recursively
-//
-// ListIncompleteUploads is a channel based API implemented to facilitate ease of usage of S3 API ListMultipartUploads()
-// by automatically recursively traversing all multipart objects on a given bucket if specified.
-//
-// Your input paramters are just bucket, prefix and recursive
-//
-// If you enable recursive as 'true' this function will return back all the multipart objects in a given bucket
-//
-// eg:-
-// api := client.New(....)
-// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", true) {
-// fmt.Println(message.Stat)
-// }
-//
-func (a api) ListIncompleteUploads(bucket, prefix string, recursive bool) <-chan ObjectMultipartStatCh {
- ch := make(chan ObjectMultipartStatCh, 1000)
- go a.listIncompleteUploadsInRoutine(bucket, prefix, recursive, ch)
- return ch
-}
-
-// listObjectsInRoutine is an internal goroutine function called for listing objects
-// This function feeds data into channel
-func (a api) listObjectsInRoutine(bucket, prefix string, recursive bool, ch chan ObjectStatCh) {
- defer close(ch)
- if err := invalidBucketError(bucket); err != nil {
- ch <- ObjectStatCh{
- Stat: ObjectStat{},
- Err: err,
- }
- return
- }
- switch {
- case recursive == true:
- var marker string
- for {
- result, err := a.listObjects(bucket, marker, prefix, "", 1000)
- if err != nil {
- ch <- ObjectStatCh{
- Stat: ObjectStat{},
- Err: err,
- }
- return
- }
- for _, object := range result.Contents {
- ch <- ObjectStatCh{
- Stat: object,
- Err: nil,
- }
- marker = object.Key
- }
- if !result.IsTruncated {
- break
- }
- }
- default:
- var marker string
- for {
- result, err := a.listObjects(bucket, marker, prefix, "/", 1000)
- if err != nil {
- ch <- ObjectStatCh{
- Stat: ObjectStat{},
- Err: err,
- }
- return
- }
- marker = result.NextMarker
- for _, object := range result.Contents {
- ch <- ObjectStatCh{
- Stat: object,
- Err: nil,
- }
- }
- for _, prefix := range result.CommonPrefixes {
- object := ObjectStat{}
- object.Key = prefix.Prefix
- object.Size = 0
- ch <- ObjectStatCh{
- Stat: object,
- Err: nil,
- }
- }
- if !result.IsTruncated {
- break
- }
- }
- }
-}
-
-// ListObjects - (List Objects) - List some objects or all recursively
-//
-// ListObjects is a channel based API implemented to facilitate ease of usage of S3 API ListObjects()
-// by automatically recursively traversing all objects on a given bucket if specified.
-//
-// Your input paramters are just bucket, prefix and recursive
-//
-// If you enable recursive as 'true' this function will return back all the objects in a given bucket
-//
-// eg:-
-// api := client.New(....)
-// for message := range api.ListObjects("mytestbucket", "starthere", true) {
-// fmt.Println(message.Stat)
-// }
-//
-func (a api) ListObjects(bucket string, prefix string, recursive bool) <-chan ObjectStatCh {
- ch := make(chan ObjectStatCh, 1000)
- go a.listObjectsInRoutine(bucket, prefix, recursive, ch)
- return ch
-}
-
-// listBucketsInRoutine is an internal go routine function called for listing buckets
-// This function feeds data into channel
-func (a api) listBucketsInRoutine(ch chan BucketStatCh) {
- defer close(ch)
- listAllMyBucketListResults, err := a.listBuckets()
- if err != nil {
- ch <- BucketStatCh{
- Stat: BucketStat{},
- Err: err,
- }
- return
- }
- for _, bucket := range listAllMyBucketListResults.Buckets.Bucket {
- ch <- BucketStatCh{
- Stat: bucket,
- Err: nil,
- }
- }
-}
-
-// ListBuckets list of all buckets owned by the authenticated sender of the request
-//
-// NOTE:
-// This call requires explicit authentication, no anonymous
-// requests are allowed for listing buckets
-//
-// eg:-
-// api := client.New(....)
-// for message := range api.ListBuckets() {
-// fmt.Println(message.Stat)
-// }
-//
-func (a api) ListBuckets() <-chan BucketStatCh {
- ch := make(chan BucketStatCh, 100)
- go a.listBucketsInRoutine(ch)
- return ch
-}
-
-func (a api) removeIncompleteUploadInRoutine(bucket, object string, errorCh chan error) {
- defer close(errorCh)
- if err := invalidBucketError(bucket); err != nil {
- errorCh <- err
- return
- }
- if err := invalidObjectError(object); err != nil {
- errorCh <- err
- return
- }
- listMultipartUplResult, err := a.listMultipartUploads(bucket, "", "", object, "", 1000)
- if err != nil {
- errorCh <- err
- return
- }
- for _, multiPartUpload := range listMultipartUplResult.Uploads {
- if object == multiPartUpload.Key {
- err := a.abortMultipartUpload(bucket, multiPartUpload.Key, multiPartUpload.UploadID)
- if err != nil {
- errorCh <- err
- return
- }
- return
- }
- }
- for {
- if !listMultipartUplResult.IsTruncated {
- break
- }
- listMultipartUplResult, err = a.listMultipartUploads(bucket,
- listMultipartUplResult.NextKeyMarker, listMultipartUplResult.NextUploadIDMarker, object, "", 1000)
- if err != nil {
- errorCh <- err
- return
- }
- for _, multiPartUpload := range listMultipartUplResult.Uploads {
- if object == multiPartUpload.Key {
- err := a.abortMultipartUpload(bucket, multiPartUpload.Key, multiPartUpload.UploadID)
- if err != nil {
- errorCh <- err
- return
- }
- return
- }
- }
-
- }
-}
-
-// RemoveIncompleteUpload - abort a specific in progress active multipart upload
-// requires explicit authentication, no anonymous requests are allowed for multipart API
-func (a api) RemoveIncompleteUpload(bucket, object string) <-chan error {
- errorCh := make(chan error)
- go a.removeIncompleteUploadInRoutine(bucket, object, errorCh)
- return errorCh
+// CloudStorageClient - Cloud Storage Client interface.
+type CloudStorageClient interface {
+ // Bucket Read/Write/Stat operations.
+ MakeBucket(bucketName string, cannedACL BucketACL, location string) error
+ BucketExists(bucketName string) error
+ RemoveBucket(bucketName string) error
+ SetBucketACL(bucketName string, cannedACL BucketACL) error
+ GetBucketACL(bucketName string) (BucketACL, error)
+
+ ListBuckets() ([]BucketStat, error)
+ ListObjects(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectStat
+ ListIncompleteUploads(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartStat
+
+ // Object Read/Write/Stat operations.
+ GetObject(bucketName, objectName string) (reader io.ReadCloser, stat ObjectStat, err error)
+ PutObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error)
+ StatObject(bucketName, objectName string) (ObjectStat, error)
+ RemoveObject(bucketName, objectName string) error
+ RemoveIncompleteUpload(bucketName, objectName string) error
+
+ // Object Read/Write for sparse upload.
+ GetObjectPartial(bucketName, objectName string) (reader ReadAtCloser, stat ObjectStat, err error)
+ PutObjectPartial(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error)
+
+ // File to Object API.
+ FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error)
+ FGetObject(bucketName, objectName, filePath string) error
+
+ // Presigned operations.
+ PresignedGetObject(bucketName, objectName string, expires time.Duration) (presignedURL string, err error)
+ PresignedPutObject(bucketName, objectName string, expires time.Duration) (presignedURL string, err error)
+ PresignedPostPolicy(*PostPolicy) (formData map[string]string, err error)
+
+ // Application info.
+ SetAppInfo(appName, appVersion string)
+
+ // Set custom transport.
+ SetCustomTransport(customTransport http.RoundTripper)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go
new file mode 100644
index 000000000..9e1d60cf9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_functional_test.go
@@ -0,0 +1,158 @@
+package minio_test
+
+import (
+ "bytes"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/minio/minio-go"
+)
+
+const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
+const (
+ letterIdxBits = 6 // 6 bits to represent a letter index
+ letterIdxMask = 1<= 0; {
+ if remain == 0 {
+ cache, remain = src.Int63(), letterIdxMax
+ }
+ if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
+ b[i] = letterBytes[idx]
+ i--
+ }
+ cache >>= letterIdxBits
+ remain--
+ }
+ return string(b[0:30])
+}
+
+func TestFunctional(t *testing.T) {
+ c, err := minio.New(
+ "play.minio.io:9002",
+ "Q3AM3UQ867SPQQA43P2F",
+ "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
+ false,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Test", "0.1.0")
+
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
+ err = c.MakeBucket(bucketName, "private", "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ fileName := randString(60, rand.NewSource(time.Now().UnixNano()))
+ file, err := os.Create(fileName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ for i := 0; i < 10; i++ {
+ file.WriteString(fileName)
+ }
+ file.Close()
+
+ err = c.BucketExists(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ err = c.SetBucketACL(bucketName, "public-read-write")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ acl, err := c.GetBucketACL(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if acl != minio.BucketACL("public-read-write") {
+ t.Fatal("Error:", acl)
+ }
+
+ _, err = c.ListBuckets()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ objectName := bucketName + "Minio"
+ reader := bytes.NewReader([]byte("Hello World!"))
+
+ n, err := c.PutObject(bucketName, objectName, reader, int64(reader.Len()), "")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if n != int64(len([]byte("Hello World!"))) {
+ t.Fatal("Error: bad length ", n, reader.Len())
+ }
+
+ newReader, _, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ n, err = c.FPutObject(bucketName, objectName+"-f", fileName, "text/plain")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if n != int64(10*len(fileName)) {
+ t.Fatal("Error: bad length ", n, int64(10*len(fileName)))
+ }
+
+ err = c.FGetObject(bucketName, objectName+"-f", fileName+"-f")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ newReadBytes, err := ioutil.ReadAll(newReader)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ if !bytes.Equal(newReadBytes, []byte("Hello World!")) {
+ t.Fatal("Error: bytes invalid.")
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveObject(bucketName, objectName+"-f")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveBucket("bucket1")
+ if err == nil {
+ t.Fatal("Error:")
+ }
+
+ if err.Error() != "The specified bucket does not exist." {
+ t.Fatal("Error: ", err)
+ }
+
+ if err = os.Remove(fileName); err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if err = os.Remove(fileName + "-f"); err != nil {
+ t.Fatal("Error: ", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_handlers_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_handlers_test.go
deleted file mode 100644
index 146f4d6e1..000000000
--- a/Godeps/_workspace/src/github.com/minio/minio-go/api_handlers_test.go
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio_test
-
-// bucketHandler is an http.Handler that verifies bucket responses and validates incoming requests
-import (
- "bytes"
- "io"
- "net/http"
- "strconv"
- "time"
-)
-
-type bucketHandler struct {
- resource string
-}
-
-func (h bucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- switch {
- case r.Method == "GET":
- switch {
- case r.URL.Path == "/":
- response := []byte("bucket2015-05-20T23:05:09.230Zminiominio")
- w.Header().Set("Content-Length", strconv.Itoa(len(response)))
- w.Write(response)
- case r.URL.Path == "/bucket":
- _, ok := r.URL.Query()["acl"]
- if ok {
- response := []byte("75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06aCustomersName@amazon.com75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06aCustomersName@amazon.comFULL_CONTROL")
- w.Header().Set("Content-Length", strconv.Itoa(len(response)))
- w.Write(response)
- return
- }
- fallthrough
- case r.URL.Path == "/bucket":
- response := []byte("\"259d04a13802ae09c7e41be50ccc6baa\"object2015-05-21T18:24:21.097Z22061miniominioSTANDARDfalse1000testbucket")
- w.Header().Set("Content-Length", strconv.Itoa(len(response)))
- w.Write(response)
- }
- case r.Method == "PUT":
- switch {
- case r.URL.Path == h.resource:
- _, ok := r.URL.Query()["acl"]
- if ok {
- switch r.Header.Get("x-amz-acl") {
- case "public-read-write":
- fallthrough
- case "public-read":
- fallthrough
- case "private":
- fallthrough
- case "authenticated-read":
- w.WriteHeader(http.StatusOK)
- return
- default:
- w.WriteHeader(http.StatusNotImplemented)
- return
- }
- }
- w.WriteHeader(http.StatusOK)
- default:
- w.WriteHeader(http.StatusBadRequest)
- }
- case r.Method == "HEAD":
- switch {
- case r.URL.Path == h.resource:
- w.WriteHeader(http.StatusOK)
- default:
- w.WriteHeader(http.StatusForbidden)
- }
- case r.Method == "DELETE":
- switch {
- case r.URL.Path != h.resource:
- w.WriteHeader(http.StatusNotFound)
- default:
- h.resource = ""
- w.WriteHeader(http.StatusNoContent)
- }
- }
-}
-
-// objectHandler is an http.Handler that verifies object responses and validates incoming requests
-type objectHandler struct {
- resource string
- data []byte
-}
-
-func (h objectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- switch {
- case r.Method == "PUT":
- length, err := strconv.Atoi(r.Header.Get("Content-Length"))
- if err != nil {
- w.WriteHeader(http.StatusBadRequest)
- return
- }
- var buffer bytes.Buffer
- _, err = io.CopyN(&buffer, r.Body, int64(length))
- if err != nil {
- w.WriteHeader(http.StatusInternalServerError)
- return
- }
- if !bytes.Equal(h.data, buffer.Bytes()) {
- w.WriteHeader(http.StatusInternalServerError)
- return
- }
- w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"")
- w.WriteHeader(http.StatusOK)
- case r.Method == "HEAD":
- if r.URL.Path != h.resource {
- w.WriteHeader(http.StatusNotFound)
- return
- }
- w.Header().Set("Content-Length", strconv.Itoa(len(h.data)))
- w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
- w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"")
- w.WriteHeader(http.StatusOK)
- case r.Method == "POST":
- _, ok := r.URL.Query()["uploads"]
- if ok {
- response := []byte("example-bucketobjectXXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA")
- w.Header().Set("Content-Length", strconv.Itoa(len(response)))
- w.Write(response)
- return
- }
- case r.Method == "GET":
- _, ok := r.URL.Query()["uploadId"]
- if ok {
- uploadID := r.URL.Query().Get("uploadId")
- if uploadID != "XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA" {
- w.WriteHeader(http.StatusNotFound)
- return
- }
- response := []byte("example-bucketexample-objectXXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZAarn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xxumat-user-11116a31-17b5-4fb7-9df5-b288870f11xx75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06asomeNameSTANDARD132true22010-11-10T20:48:34.000Z\"7778aef83f66abc1fa1e8477f296d394\"1048576032010-11-10T20:48:33.000Z\"aaaa18db4cc2f85cedef654fccc4a4x8\"10485760")
- w.Header().Set("Content-Length", strconv.Itoa(len(response)))
- w.Write(response)
- return
- }
- if r.URL.Path != h.resource {
- w.WriteHeader(http.StatusNotFound)
- return
- }
- w.Header().Set("Content-Length", strconv.Itoa(len(h.data)))
- w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
- w.Header().Set("ETag", "\"9af2f8218b150c351ad802c6f3d66abe\"")
- w.WriteHeader(http.StatusOK)
- io.Copy(w, bytes.NewReader(h.data))
- case r.Method == "DELETE":
- if r.URL.Path != h.resource {
- w.WriteHeader(http.StatusNotFound)
- return
- }
- h.resource = ""
- h.data = nil
- w.WriteHeader(http.StatusNoContent)
- }
-}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go
index 23d1832a2..2bda99f47 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/api_private_test.go
@@ -17,25 +17,25 @@
package minio
import (
- "strings"
+ "net/url"
"testing"
)
func TestSignature(t *testing.T) {
- conf := new(Config)
- if !conf.Signature.isLatest() {
- t.Fatalf("Error")
+ clnt := Client{}
+ if !clnt.signature.isV4() {
+ t.Fatal("Error")
}
- conf.Signature = SignatureV2
- if !conf.Signature.isV2() {
- t.Fatalf("Error")
+ clnt.signature = SignatureV2
+ if !clnt.signature.isV2() {
+ t.Fatal("Error")
}
- if conf.Signature.isV4() {
- t.Fatalf("Error")
+ if clnt.signature.isV4() {
+ t.Fatal("Error")
}
- conf.Signature = SignatureV4
- if !conf.Signature.isV4() {
- t.Fatalf("Error")
+ clnt.signature = SignatureV4
+ if !clnt.signature.isV4() {
+ t.Fatal("Error")
}
}
@@ -54,36 +54,17 @@ func TestACLTypes(t *testing.T) {
}
}
-func TestUserAgent(t *testing.T) {
- conf := new(Config)
- conf.SetUserAgent("minio", "1.0", "amd64")
- if !strings.Contains(conf.userAgent, "minio") {
- t.Fatalf("Error")
- }
-}
-
-func TestGetRegion(t *testing.T) {
- region := getRegion("s3.amazonaws.com")
- if region != "us-east-1" {
- t.Fatalf("Error")
- }
- region = getRegion("localhost:9000")
- if region != "milkyway" {
- t.Fatalf("Error")
- }
-}
-
func TestPartSize(t *testing.T) {
var maxPartSize int64 = 1024 * 1024 * 1024 * 5
- partSize := calculatePartSize(5000000000000000000)
+ partSize := optimalPartSize(5000000000000000000)
if partSize > minimumPartSize {
if partSize > maxPartSize {
- t.Fatal("invalid result, cannot be bigger than maxPartSize 5GB")
+ t.Fatal("invalid result, cannot be bigger than maxPartSize 5GiB")
}
}
- partSize = calculatePartSize(50000000000)
+ partSize = optimalPartSize(50000000000)
if partSize > minimumPartSize {
- t.Fatal("invalid result, cannot be bigger than minimumPartSize 5MB")
+ t.Fatal("invalid result, cannot be bigger than minimumPartSize 5MiB")
}
}
@@ -121,8 +102,148 @@ func TestURLEncoding(t *testing.T) {
}
for _, u := range want {
- if u.encodedName != getURLEncodedPath(u.name) {
- t.Errorf("Error")
+ if u.encodedName != urlEncodePath(u.name) {
+ t.Fatal("Error")
+ }
+ }
+}
+
+func TestGetEndpointURL(t *testing.T) {
+ if _, err := getEndpointURL("s3.amazonaws.com", false); err != nil {
+ t.Fatal("Error:", err)
+ }
+ if _, err := getEndpointURL("192.168.1.1", false); err != nil {
+ t.Fatal("Error:", err)
+ }
+ if _, err := getEndpointURL("13333.123123.-", false); err == nil {
+ t.Fatal("Error")
+ }
+ if _, err := getEndpointURL("s3.aamzza.-", false); err == nil {
+ t.Fatal("Error")
+ }
+ if _, err := getEndpointURL("s3.amazonaws.com:443", false); err == nil {
+ t.Fatal("Error")
+ }
+}
+
+func TestValidIP(t *testing.T) {
+ type validIP struct {
+ ip string
+ valid bool
+ }
+
+ want := []validIP{
+ {
+ ip: "192.168.1.1",
+ valid: true,
+ },
+ {
+ ip: "192.1.8",
+ valid: false,
+ },
+ {
+ ip: "..192.",
+ valid: false,
+ },
+ {
+ ip: "192.168.1.1.1",
+ valid: false,
+ },
+ }
+ for _, w := range want {
+ valid := isValidIP(w.ip)
+ if valid != w.valid {
+ t.Fatal("Error")
+ }
+ }
+}
+
+func TestValidEndpointDomain(t *testing.T) {
+ type validEndpoint struct {
+ endpointDomain string
+ valid bool
+ }
+
+ want := []validEndpoint{
+ {
+ endpointDomain: "s3.amazonaws.com",
+ valid: true,
+ },
+ {
+ endpointDomain: "s3.amazonaws.com_",
+ valid: false,
+ },
+ {
+ endpointDomain: "%$$$",
+ valid: false,
+ },
+ {
+ endpointDomain: "s3.amz.test.com",
+ valid: true,
+ },
+ {
+ endpointDomain: "s3.%%",
+ valid: false,
+ },
+ {
+ endpointDomain: "localhost",
+ valid: true,
+ },
+ {
+ endpointDomain: "-localhost",
+ valid: false,
+ },
+ {
+ endpointDomain: "",
+ valid: false,
+ },
+ {
+ endpointDomain: "\n \t",
+ valid: false,
+ },
+ {
+ endpointDomain: " ",
+ valid: false,
+ },
+ }
+ for _, w := range want {
+ valid := isValidDomain(w.endpointDomain)
+ if valid != w.valid {
+ t.Fatal("Error:", w.endpointDomain)
+ }
+ }
+}
+
+func TestValidEndpointURL(t *testing.T) {
+ type validURL struct {
+ url string
+ valid bool
+ }
+ want := []validURL{
+ {
+ url: "https://s3.amazonaws.com",
+ valid: true,
+ },
+ {
+ url: "https://s3.amazonaws.com/bucket/object",
+ valid: false,
+ },
+ {
+ url: "192.168.1.1",
+ valid: false,
+ },
+ }
+ for _, w := range want {
+ u, err := url.Parse(w.url)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ valid := false
+ if err := isValidEndpointURL(u); err == nil {
+ valid = true
+ }
+ if valid != w.valid {
+ t.Fatal("Error")
}
}
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go b/Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go
deleted file mode 100644
index 674f5d770..000000000
--- a/Godeps/_workspace/src/github.com/minio/minio-go/api_public_test.go
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio_test
-
-import (
- "bytes"
- "io"
- "net/http/httptest"
- "testing"
- "time"
-
- "github.com/minio/minio-go"
-)
-
-func TestBucketOperations(t *testing.T) {
- bucket := bucketHandler(bucketHandler{
- resource: "/bucket",
- })
- server := httptest.NewServer(bucket)
- defer server.Close()
-
- a, err := minio.New(minio.Config{Endpoint: server.URL})
- if err != nil {
- t.Fatal("Error")
- }
- err = a.MakeBucket("bucket", "private")
- if err != nil {
- t.Fatal("Error")
- }
-
- err = a.BucketExists("bucket")
- if err != nil {
- t.Fatal("Error")
- }
-
- err = a.BucketExists("bucket1")
- if err == nil {
- t.Fatal("Error")
- }
- if err.Error() != "Access Denied." {
- t.Fatal("Error")
- }
-
- err = a.SetBucketACL("bucket", "public-read-write")
- if err != nil {
- t.Fatal("Error")
- }
-
- acl, err := a.GetBucketACL("bucket")
- if err != nil {
- t.Fatal("Error")
- }
- if acl != minio.BucketACL("private") {
- t.Fatal("Error")
- }
-
- for b := range a.ListBuckets() {
- if b.Err != nil {
- t.Fatal(b.Err.Error())
- }
- if b.Stat.Name != "bucket" {
- t.Fatal("Error")
- }
- }
-
- for o := range a.ListObjects("bucket", "", true) {
- if o.Err != nil {
- t.Fatal(o.Err.Error())
- }
- if o.Stat.Key != "object" {
- t.Fatal("Error")
- }
- }
-
- err = a.RemoveBucket("bucket")
- if err != nil {
- t.Fatal("Error")
- }
-
- err = a.RemoveBucket("bucket1")
- if err == nil {
- t.Fatal("Error")
- }
- if err.Error() != "The specified bucket does not exist." {
- t.Fatal("Error")
- }
-}
-
-func TestBucketOperationsFail(t *testing.T) {
- bucket := bucketHandler(bucketHandler{
- resource: "/bucket",
- })
- server := httptest.NewServer(bucket)
- defer server.Close()
-
- a, err := minio.New(minio.Config{Endpoint: server.URL})
- if err != nil {
- t.Fatal("Error")
- }
- err = a.MakeBucket("bucket$$$", "private")
- if err == nil {
- t.Fatal("Error")
- }
-
- err = a.BucketExists("bucket.")
- if err == nil {
- t.Fatal("Error")
- }
-
- err = a.SetBucketACL("bucket-.", "public-read-write")
- if err == nil {
- t.Fatal("Error")
- }
-
- _, err = a.GetBucketACL("bucket??")
- if err == nil {
- t.Fatal("Error")
- }
-
- for o := range a.ListObjects("bucket??", "", true) {
- if o.Err == nil {
- t.Fatal(o.Err.Error())
- }
- }
-
- err = a.RemoveBucket("bucket??")
- if err == nil {
- t.Fatal("Error")
- }
-
- if err.Error() != "The specified bucket is not valid." {
- t.Fatal("Error")
- }
-}
-
-func TestObjectOperations(t *testing.T) {
- object := objectHandler(objectHandler{
- resource: "/bucket/object",
- data: []byte("Hello, World"),
- })
- server := httptest.NewServer(object)
- defer server.Close()
-
- a, err := minio.New(minio.Config{Endpoint: server.URL})
- if err != nil {
- t.Fatal("Error")
- }
- data := []byte("Hello, World")
- err = a.PutObject("bucket", "object", "", int64(len(data)), bytes.NewReader(data))
- if err != nil {
- t.Fatal("Error")
- }
- metadata, err := a.StatObject("bucket", "object")
- if err != nil {
- t.Fatal("Error")
- }
- if metadata.Key != "object" {
- t.Fatal("Error")
- }
- if metadata.ETag != "9af2f8218b150c351ad802c6f3d66abe" {
- t.Fatal("Error")
- }
-
- reader, metadata, err := a.GetObject("bucket", "object")
- if err != nil {
- t.Fatal("Error")
- }
- if metadata.Key != "object" {
- t.Fatal("Error")
- }
- if metadata.ETag != "9af2f8218b150c351ad802c6f3d66abe" {
- t.Fatal("Error")
- }
-
- var buffer bytes.Buffer
- _, err = io.Copy(&buffer, reader)
- if !bytes.Equal(buffer.Bytes(), data) {
- t.Fatal("Error")
- }
-
- err = a.RemoveObject("bucket", "object")
- if err != nil {
- t.Fatal("Error")
- }
- err = a.RemoveObject("bucket", "object1")
- if err == nil {
- t.Fatal("Error")
- }
- if err.Error() != "The specified key does not exist." {
- t.Fatal("Error")
- }
-}
-
-func TestPresignedURL(t *testing.T) {
- object := objectHandler(objectHandler{
- resource: "/bucket/object",
- data: []byte("Hello, World"),
- })
- server := httptest.NewServer(object)
- defer server.Close()
-
- a, err := minio.New(minio.Config{Endpoint: server.URL})
- if err != nil {
- t.Fatal("Error")
- }
- // should error out for invalid access keys
- _, err = a.PresignedGetObject("bucket", "object", time.Duration(1000)*time.Second)
- if err == nil {
- t.Fatal("Error")
- }
-
- a, err = minio.New(minio.Config{
- Endpoint: server.URL,
- AccessKeyID: "accessKey",
- SecretAccessKey: "secretKey",
- })
- if err != nil {
- t.Fatal("Error")
- }
- url, err := a.PresignedGetObject("bucket", "object", time.Duration(1000)*time.Second)
- if err != nil {
- t.Fatal("Error")
- }
- if url == "" {
- t.Fatal("Error")
- }
- _, err = a.PresignedGetObject("bucket", "object", time.Duration(0)*time.Second)
- if err == nil {
- t.Fatal("Error")
- }
- _, err = a.PresignedGetObject("bucket", "object", time.Duration(604801)*time.Second)
- if err == nil {
- t.Fatal("Error")
- }
-}
-
-func TestErrorResponse(t *testing.T) {
- errorResponse := []byte("AccessDenied
Access Denied/mybucket/myphoto.jpgF19772218238A85AGuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD")
- errorReader := bytes.NewReader(errorResponse)
- err := minio.BodyToErrorResponse(errorReader, "application/xml")
- if err == nil {
- t.Fatal("Error")
- }
- if err.Error() != "Access Denied" {
- t.Fatal("Error")
- }
- resp := minio.ToErrorResponse(err)
- // valid all fields
- if resp == nil {
- t.Fatal("Error")
- }
- if resp.Code != "AccessDenied" {
- t.Fatal("Error")
- }
- if resp.RequestID != "F19772218238A85A" {
- t.Fatal("Error")
- }
- if resp.Message != "Access Denied" {
- t.Fatal("Error")
- }
- if resp.Resource != "/mybucket/myphoto.jpg" {
- t.Fatal("Error")
- }
- if resp.HostID != "GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD" {
- t.Fatal("Error")
- }
- if resp.ToXML() == "" {
- t.Fatal("Error")
- }
- if resp.ToJSON() == "" {
- t.Fatal("Error")
- }
-}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml b/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml
index 1d140afd9..7f624a459 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/appveyor.yml
@@ -14,9 +14,6 @@ environment:
# scripts that run after cloning repository
install:
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- - rd C:\Go /s /q
- - appveyor DownloadFile https://storage.googleapis.com/golang/go1.5.1.windows-amd64.zip
- - 7z x go1.5.1.windows-amd64.zip -oC:\ >nul
- go version
- go env
- go get -u github.com/golang/lint/golint
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go
index 5718dbbd3..89c386ca1 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-acl.go
@@ -16,10 +16,10 @@
package minio
-// BucketACL - bucket level access control
+// BucketACL - bucket level access control.
type BucketACL string
-// different types of ACL's currently supported for buckets
+// Different types of ACL's currently supported for buckets.
const (
bucketPrivate = BucketACL("private")
bucketReadOnly = BucketACL("public-read")
@@ -27,7 +27,7 @@ const (
bucketAuthenticated = BucketACL("authenticated-read")
)
-// String printer helper
+// Stringify acl.
func (b BucketACL) String() string {
if string(b) == "" {
return "private"
@@ -35,7 +35,7 @@ func (b BucketACL) String() string {
return string(b)
}
-// isValidBucketACL - is provided acl string supported
+// isValidBucketACL - is provided acl string supported.
func (b BucketACL) isValidBucketACL() bool {
switch true {
case b.isPrivate():
@@ -54,22 +54,22 @@ func (b BucketACL) isValidBucketACL() bool {
}
}
-// IsPrivate - is acl Private
+// isPrivate - is acl Private.
func (b BucketACL) isPrivate() bool {
return b == bucketPrivate
}
-// IsPublicRead - is acl PublicRead
+// isPublicRead - is acl PublicRead.
func (b BucketACL) isReadOnly() bool {
return b == bucketReadOnly
}
-// IsPublicReadWrite - is acl PublicReadWrite
+// isPublicReadWrite - is acl PublicReadWrite.
func (b BucketACL) isPublic() bool {
return b == bucketPublic
}
-// IsAuthenticated - is acl AuthenticatedRead
+// isAuthenticated - is acl AuthenticatedRead.
func (b BucketACL) isAuthenticated() bool {
return b == bucketAuthenticated
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go
new file mode 100644
index 000000000..29fb6aa36
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/bucket-cache.go
@@ -0,0 +1,153 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/hex"
+ "net/http"
+ "net/url"
+ "path/filepath"
+ "sync"
+)
+
+// bucketLocationCache provides simple mechansim to hold bucket locations in memory.
+type bucketLocationCache struct {
+ // Mutex is used for handling the concurrent
+ // read/write requests for cache
+ sync.RWMutex
+
+ // items holds the cached bucket locations.
+ items map[string]string
+}
+
+// newBucketLocationCache provides a new bucket location cache to be used
+// internally with the client object.
+func newBucketLocationCache() *bucketLocationCache {
+ return &bucketLocationCache{
+ items: make(map[string]string),
+ }
+}
+
+// Get returns a value of a given key if it exists
+func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
+ r.RLock()
+ defer r.RUnlock()
+ location, ok = r.items[bucketName]
+ return
+}
+
+// Set will persist a value to the cache
+func (r *bucketLocationCache) Set(bucketName string, location string) {
+ r.Lock()
+ defer r.Unlock()
+ r.items[bucketName] = location
+}
+
+// Delete deletes a bucket name.
+func (r *bucketLocationCache) Delete(bucketName string) {
+ r.Lock()
+ defer r.Unlock()
+ delete(r.items, bucketName)
+}
+
+// getBucketLocation - get location for the bucketName from location map cache.
+func (c Client) getBucketLocation(bucketName string) (string, error) {
+ // For anonymous requests, default to "us-east-1" and let other calls
+ // move forward.
+ if c.anonymous {
+ return "us-east-1", nil
+ }
+ if location, ok := c.bucketLocCache.Get(bucketName); ok {
+ return location, nil
+ }
+
+ // Initialize a new request.
+ req, err := c.getBucketLocationRequest(bucketName)
+ if err != nil {
+ return "", err
+ }
+
+ // Initiate the request.
+ resp, err := c.httpClient.Do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return "", err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return "", HTTPRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Extract location.
+ var locationConstraint string
+ err = xmlDecoder(resp.Body, &locationConstraint)
+ if err != nil {
+ return "", err
+ }
+
+ location := locationConstraint
+ // location is empty will be 'us-east-1'.
+ if location == "" {
+ location = "us-east-1"
+ }
+
+ // location can be 'EU' convert it to meaningful 'eu-west-1'.
+ if location == "EU" {
+ location = "eu-west-1"
+ }
+
+ // Save the location into cache.
+ c.bucketLocCache.Set(bucketName, location)
+
+ // Return.
+ return location, nil
+}
+
+// getBucketLocationRequest wrapper creates a new getBucketLocation request.
+func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) {
+ // Set location query.
+ urlValues := make(url.Values)
+ urlValues.Set("location", "")
+
+ // Set get bucket location always as path style.
+ targetURL := c.endpointURL
+ targetURL.Path = filepath.Join(bucketName, "")
+ targetURL.RawQuery = urlValues.Encode()
+
+ // get a new HTTP request for the method.
+ req, err := http.NewRequest("GET", targetURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // set UserAgent for the request.
+ c.setUserAgent(req)
+
+ // set sha256 sum for signature calculation only with signature version '4'.
+ if c.signature.isV4() {
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
+ }
+
+ // Sign the request.
+ if c.signature.isV4() {
+ req = SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ } else if c.signature.isV2() {
+ req = SignV2(*req, c.accessKeyID, c.secretAccessKey)
+ }
+ return req, nil
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/chopper.go b/Godeps/_workspace/src/github.com/minio/minio-go/chopper.go
deleted file mode 100644
index 6b2ff9a19..000000000
--- a/Godeps/_workspace/src/github.com/minio/minio-go/chopper.go
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "crypto/md5"
- "io"
-)
-
-// part - message structure for results from the MultiPart
-type part struct {
- MD5Sum []byte
- ReadSeeker io.ReadSeeker
- Err error
- Len int64
- Num int // part number
-}
-
-// skipPart - skipping uploaded parts
-type skipPart struct {
- md5sum []byte
- partNumber int
-}
-
-// chopper reads from io.Reader, partitions the data into chunks of given chunksize, and sends
-// each chunk as io.ReadSeeker to the caller over a channel
-//
-// This method runs until an EOF or error occurs. If an error occurs,
-// the method sends the error over the channel and returns.
-// Before returning, the channel is always closed.
-//
-// additionally this function also skips list of parts if provided
-func chopper(reader io.Reader, chunkSize int64, skipParts []skipPart) <-chan part {
- ch := make(chan part, 3)
- go chopperInRoutine(reader, chunkSize, skipParts, ch)
- return ch
-}
-
-func chopperInRoutine(reader io.Reader, chunkSize int64, skipParts []skipPart, ch chan part) {
- defer close(ch)
- p := make([]byte, chunkSize)
- n, err := io.ReadFull(reader, p)
- if err == io.EOF || err == io.ErrUnexpectedEOF { // short read, only single part return
- m := md5.Sum(p[0:n])
- ch <- part{
- MD5Sum: m[:],
- ReadSeeker: bytes.NewReader(p[0:n]),
- Err: nil,
- Len: int64(n),
- Num: 1,
- }
- return
- }
- // catastrophic error send error and return
- if err != nil {
- ch <- part{
- ReadSeeker: nil,
- Err: err,
- Num: 0,
- }
- return
- }
- // send the first part
- var num = 1
- md5SumBytes := md5.Sum(p)
- sp := skipPart{
- partNumber: num,
- md5sum: md5SumBytes[:],
- }
- if !isPartNumberUploaded(sp, skipParts) {
- ch <- part{
- MD5Sum: md5SumBytes[:],
- ReadSeeker: bytes.NewReader(p),
- Err: nil,
- Len: int64(n),
- Num: num,
- }
- }
- for err == nil {
- var n int
- p := make([]byte, chunkSize)
- n, err = io.ReadFull(reader, p)
- if err != nil {
- if err != io.EOF && err != io.ErrUnexpectedEOF { // catastrophic error
- ch <- part{
- ReadSeeker: nil,
- Err: err,
- Num: 0,
- }
- return
- }
- }
- num++
- md5SumBytes := md5.Sum(p[0:n])
- sp := skipPart{
- partNumber: num,
- md5sum: md5SumBytes[:],
- }
- if isPartNumberUploaded(sp, skipParts) {
- continue
- }
- ch <- part{
- MD5Sum: md5SumBytes[:],
- ReadSeeker: bytes.NewReader(p[0:n]),
- Err: nil,
- Len: int64(n),
- Num: num,
- }
-
- }
-}
-
-// to verify if partNumber is part of the skip part list
-func isPartNumberUploaded(part skipPart, skipParts []skipPart) bool {
- for _, p := range skipParts {
- if p.partNumber == part.partNumber && bytes.Equal(p.md5sum, part.md5sum) {
- return true
- }
- }
- return false
-}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/common-methods.go b/Godeps/_workspace/src/github.com/minio/minio-go/common-methods.go
new file mode 100644
index 000000000..636e06f6f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/common-methods.go
@@ -0,0 +1,52 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "crypto/hmac"
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/xml"
+ "io"
+)
+
+// xmlDecoder provide decoded value in xml.
+func xmlDecoder(body io.Reader, v interface{}) error {
+ d := xml.NewDecoder(body)
+ return d.Decode(v)
+}
+
+// sum256 calculate sha256 sum for an input byte array.
+func sum256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// sumMD5 calculate md5 sum for an input byte array.
+func sumMD5(data []byte) []byte {
+ hash := md5.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// sumHMAC calculate hmac between two input byte array.
+func sumHMAC(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/common.go b/Godeps/_workspace/src/github.com/minio/minio-go/common.go
deleted file mode 100644
index 8ac854681..000000000
--- a/Godeps/_workspace/src/github.com/minio/minio-go/common.go
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "crypto/hmac"
- "crypto/sha256"
- "encoding/hex"
- "encoding/json"
- "encoding/xml"
- "io"
- "strings"
- "time"
-)
-
-// decoder provides a unified decoding method interface
-type decoder interface {
- Decode(v interface{}) error
-}
-
-// acceptTypeDecoder provide decoded value in given acceptType
-func acceptTypeDecoder(body io.Reader, acceptType string, v interface{}) error {
- var d decoder
- switch {
- case acceptType == "application/xml":
- d = xml.NewDecoder(body)
- case acceptType == "application/json":
- d = json.NewDecoder(body)
- default:
- d = xml.NewDecoder(body)
- }
- return d.Decode(v)
-}
-
-// sum256Reader calculate sha256 sum for an input read seeker
-func sum256Reader(reader io.ReadSeeker) ([]byte, error) {
- h := sha256.New()
- var err error
-
- start, _ := reader.Seek(0, 1)
- defer reader.Seek(start, 0)
-
- for err == nil {
- length := 0
- byteBuffer := make([]byte, 1024*1024)
- length, err = reader.Read(byteBuffer)
- byteBuffer = byteBuffer[0:length]
- h.Write(byteBuffer)
- }
-
- if err != io.EOF {
- return nil, err
- }
-
- return h.Sum(nil), nil
-}
-
-// sum256 calculate sha256 sum for an input byte array
-func sum256(data []byte) []byte {
- hash := sha256.New()
- hash.Write(data)
- return hash.Sum(nil)
-}
-
-// sumHMAC calculate hmac between two input byte array
-func sumHMAC(key []byte, data []byte) []byte {
- hash := hmac.New(sha256.New, key)
- hash.Write(data)
- return hash.Sum(nil)
-}
-
-// getSigningKey hmac seed to calculate final signature
-func getSigningKey(secret, region string, t time.Time) []byte {
- date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
- regionbytes := sumHMAC(date, []byte(region))
- service := sumHMAC(regionbytes, []byte("s3"))
- signingKey := sumHMAC(service, []byte("aws4_request"))
- return signingKey
-}
-
-// getSignature final signature in hexadecimal form
-func getSignature(signingKey []byte, stringToSign string) string {
- return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
-}
-
-// getScope generate a string of a specific date, an AWS region, and a service
-func getScope(region string, t time.Time) string {
- scope := strings.Join([]string{
- t.Format(yyyymmdd),
- region,
- "s3",
- "aws4_request",
- }, "/")
- return scope
-}
-
-// getCredential generate a credential string
-func getCredential(accessKeyID, region string, t time.Time) string {
- scope := getScope(region, t)
- return accessKeyID + "/" + scope
-}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/constants.go b/Godeps/_workspace/src/github.com/minio/minio-go/constants.go
new file mode 100644
index 000000000..617621298
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/constants.go
@@ -0,0 +1,38 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+/// Multipart upload defaults.
+
+// minimumPartSize - minimum part size 5MiB per object after which
+// putObject behaves internally as multipart.
+var minimumPartSize int64 = 1024 * 1024 * 5
+
+// maxParts - maximum parts for a single multipart session.
+var maxParts = int64(10000)
+
+// maxPartSize - maximum part size 5GiB for a single multipart upload operation.
+var maxPartSize int64 = 1024 * 1024 * 1024 * 5
+
+// maxSinglePutObjectSize - maximum size 5GiB of object per PUT operation.
+var maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
+
+// maxMultipartPutObjectSize - maximum size 5TiB of object for Multipart operation.
+var maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
+
+// optimalReadAtBufferSize - optimal buffer 5MiB used for reading through ReadAt operation.
+var optimalReadAtBufferSize = 1024 * 1024 * 5
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/errors.go b/Godeps/_workspace/src/github.com/minio/minio-go/errors.go
deleted file mode 100644
index b85e36e51..000000000
--- a/Godeps/_workspace/src/github.com/minio/minio-go/errors.go
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "encoding/json"
- "encoding/xml"
- "io"
- "regexp"
- "strings"
- "unicode/utf8"
-)
-
-/* **** SAMPLE ERROR RESPONSE ****
-
-
- AccessDenied
- Access Denied
- /mybucket/myphoto.jpg
- F19772218238A85A
- GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD
-
-*/
-
-// ErrorResponse is the type error returned by some API operations.
-type ErrorResponse struct {
- XMLName xml.Name `xml:"Error" json:"-"`
- Code string
- Message string
- Resource string
- RequestID string `xml:"RequestId"`
- HostID string `xml:"HostId"`
-}
-
-// ToErrorResponse returns parsed ErrorResponse struct, if input is nil or not ErrorResponse return value is nil
-// this fuction is useful when some one wants to dig deeper into the error structures over the network.
-//
-// for example:
-//
-// import s3 "github.com/minio/minio-go"
-// ...
-// ...
-// ..., err := s3.GetObject(...)
-// if err != nil {
-// resp := s3.ToErrorResponse(err)
-// fmt.Println(resp.ToXML())
-// }
-// ...
-// ...
-func ToErrorResponse(err error) *ErrorResponse {
- switch err := err.(type) {
- case ErrorResponse:
- return &err
- default:
- return nil
- }
-}
-
-// ToXML send raw xml marshalled as string
-func (e ErrorResponse) ToXML() string {
- b, err := xml.Marshal(&e)
- if err != nil {
- panic(err)
- }
- return string(b)
-}
-
-// ToJSON send raw json marshalled as string
-func (e ErrorResponse) ToJSON() string {
- b, err := json.Marshal(&e)
- if err != nil {
- panic(err)
- }
- return string(b)
-}
-
-// Error formats HTTP error string
-func (e ErrorResponse) Error() string {
- return e.Message
-}
-
-// BodyToErrorResponse returns a new encoded ErrorResponse structure
-func BodyToErrorResponse(errBody io.Reader, acceptType string) error {
- var errorResponse ErrorResponse
- err := acceptTypeDecoder(errBody, acceptType, &errorResponse)
- if err != nil {
- return err
- }
- return errorResponse
-}
-
-// invalidBucketToError - invalid bucket to errorResponse
-func invalidBucketError(bucket string) error {
- // verify bucket name in accordance with
- // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
- isValidBucket := func(bucket string) bool {
- if len(bucket) < 3 || len(bucket) > 63 {
- return false
- }
- if bucket[0] == '.' || bucket[len(bucket)-1] == '.' {
- return false
- }
- if match, _ := regexp.MatchString("\\.\\.", bucket); match == true {
- return false
- }
- // We don't support buckets with '.' in them
- match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket)
- return match
- }
-
- if !isValidBucket(strings.TrimSpace(bucket)) {
- // no resource since bucket is empty string
- errorResponse := ErrorResponse{
- Code: "InvalidBucketName",
- Message: "The specified bucket is not valid.",
- RequestID: "minio",
- }
- return errorResponse
- }
- return nil
-}
-
-// invalidObjectError invalid object name to errorResponse
-func invalidObjectError(object string) error {
- if strings.TrimSpace(object) == "" || object == "" {
- // no resource since object name is empty
- errorResponse := ErrorResponse{
- Code: "NoSuchKey",
- Message: "The specified key does not exist.",
- RequestID: "minio",
- }
- return errorResponse
- }
- return nil
-}
-
-// invalidArgumentError invalid argument to errorResponse
-func invalidArgumentError(arg string) error {
- errorResponse := ErrorResponse{
- Code: "InvalidArgument",
- Message: "Invalid Argument.",
- RequestID: "minio",
- }
- if strings.TrimSpace(arg) == "" || arg == "" {
- // no resource since arg is empty string
- return errorResponse
- }
- if !utf8.ValidString(arg) {
- // add resource to reply back with invalid string
- errorResponse.Resource = arg
- return errorResponse
- }
- return nil
-}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/bucketexists.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/bucketexists.go
index cb9e3e288..0629d0a2d 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/bucketexists.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/bucketexists.go
@@ -25,16 +25,22 @@ import (
)
func main() {
- config := minio.Config{
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
+ // Note: my-bucketname is a dummy value, please replace them with original value.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- err = s3Client.BucketExists("mybucket")
+
+ err = s3Client.BucketExists("my-bucketname")
if err != nil {
log.Fatalln(err)
}
+
log.Println("Success")
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fgetobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fgetobject.go
new file mode 100644
index 000000000..57856a578
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fgetobject.go
@@ -0,0 +1,44 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: my-bucketname, my-objectname and my-filename.csv are dummy values, please replace them with original values.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ if err := s3Client.FGetObject("bucket-name", "objectName", "fileName.csv"); err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Successfully saved fileName.csv")
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fputobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fputobject.go
new file mode 100644
index 000000000..5f85b5c07
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/fputobject.go
@@ -0,0 +1,44 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: my-bucketname, my-objectname and my-filename.csv are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", "application/csv"); err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Successfully uploaded my-filename.csv")
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getbucketacl.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getbucketacl.go
index 5b0cec786..202baa3a3 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getbucketacl.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getbucketacl.go
@@ -25,14 +25,19 @@ import (
)
func main() {
- config := minio.Config{
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
+ // Note: my-bucketname is a dummy value, please replace them with original value.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- acl, err := s3Client.GetBucketACL("mybucket")
+
+ acl, err := s3Client.GetBucketACL("my-bucketname")
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go
index 71a6d92a0..041a136c1 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobject.go
@@ -27,25 +27,30 @@ import (
)
func main() {
- config := minio.Config{
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
- if err != nil {
- log.Fatalln(err)
- }
- reader, stat, err := s3Client.GetObject("mybucket", "myobject")
+ // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- localfile, err := os.Create("testfile")
+ reader, _, err := s3Client.GetObject("my-bucketname", "my-objectname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ localfile, err := os.Create("my-testfile")
if err != nil {
log.Fatalln(err)
}
defer localfile.Close()
- if _, err = io.CopyN(localfile, reader, stat.Size); err != nil {
+ if _, err = io.Copy(localfile, reader); err != nil {
log.Fatalln(err)
}
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobjectpartial.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobjectpartial.go
new file mode 100644
index 000000000..db65359ca
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getobjectpartial.go
@@ -0,0 +1,91 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "errors"
+ "io"
+ "log"
+ "os"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ reader, stat, err := s3Client.GetObjectPartial("my-bucketname", "my-objectname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer reader.Close()
+
+ localFile, err := os.OpenFile("my-testfile", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer localfile.Close()
+
+ st, err := localFile.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ readAtOffset := st.Size()
+ readAtBuffer := make([]byte, 5*1024*1024)
+
+ // Loop and write.
+ for {
+ readAtSize, rerr := reader.ReadAt(readAtBuffer, readAtOffset)
+ if rerr != nil {
+ if rerr != io.EOF {
+ log.Fatalln(rerr)
+ }
+ }
+ writeSize, werr := localFile.Write(readAtBuffer[:readAtSize])
+ if werr != nil {
+ log.Fatalln(werr)
+ }
+ if readAtSize != writeSize {
+ log.Fatalln(errors.New("Something really bad happened here."))
+ }
+ readAtOffset += int64(writeSize)
+ if rerr == io.EOF {
+ break
+ }
+ }
+
+ // totalWritten size.
+ totalWritten := readAtOffset
+
+ // If found mismatch error out.
+ if totalWritten != stat.Size {
+ log.Fatalln(errors.New("Something really bad happened here."))
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listbuckets.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listbuckets.go
index 8148ba8a2..b5e505ccc 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listbuckets.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listbuckets.go
@@ -25,17 +25,21 @@ import (
)
func main() {
- config := minio.Config{
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- for bucket := range s3Client.ListBuckets() {
- if bucket.Err != nil {
- log.Fatalln(bucket.Err)
- }
- log.Println(bucket.Stat)
+
+ buckets, err := s3Client.ListBuckets()
+ if err != nil {
+ log.Fatalln(err)
+ }
+ for _, bucket := range buckets {
+ log.Println(bucket)
}
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go
index f73833aca..a4fcc95e8 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go
@@ -19,26 +19,38 @@
package main
import (
+ "fmt"
"log"
"github.com/minio/minio-go"
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
+ // Note: my-bucketname and my-prefixname are dummy values, please replace them with original values.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- // Recursive
- for multipartObject := range s3Client.ListIncompleteUploads("mybucket", "myobject", true) {
+
+ // Create a done channel to control 'ListObjects' go routine.
+ doneCh := make(struct{})
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(doneCh)
+
+ // List all multipart uploads from a bucket-name with a matching prefix.
+ for multipartObject := range s3Client.ListIncompleteUploads("my-bucketname", "my-prefixname", true, doneCh) {
if multipartObject.Err != nil {
- log.Fatalln(multipartObject.Err)
+ fmt.Println(multipartObject.Err)
+ return
}
- log.Println(multipartObject)
+ fmt.Println(multipartObject)
}
+ return
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listobjects.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listobjects.go
index 1908d7224..cd1ad6b7f 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listobjects.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/listobjects.go
@@ -19,23 +19,38 @@
package main
import (
+ "fmt"
"log"
"github.com/minio/minio-go"
)
func main() {
- config := minio.Config{
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
+ // Note: my-bucketname and my-prefixname are dummy values, please replace them with original values.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- for object := range s3Client.ListObjects("mybucket", "", true) {
+
+ // Create a done channel to control 'ListObjects' go routine.
+ doneCh := make(struct{})
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(doneCh)
+
+ // List all objects from a bucket-name with a matching prefix.
+ for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) {
if object.Err != nil {
- log.Fatalln(object.Err)
+ fmt.Println(object.Err)
+ return
}
- log.Println(object.Stat)
+ fmt.Println(object)
}
+ return
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/makebucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/makebucket.go
index 1fcfb7151..52bebf1a5 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/makebucket.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/makebucket.go
@@ -25,14 +25,19 @@ import (
)
func main() {
- config := minio.Config{
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
+ // Note: my-bucketname is a dummy value, please replace them with original value.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- err = s3Client.MakeBucket("mybucket", "")
+
+ err = s3Client.MakeBucket("my-bucketname", minio.BucketACL("private"), "us-east-1")
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getpartialobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedgetobject.go
similarity index 56%
rename from Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getpartialobject.go
rename to Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedgetobject.go
index b4e2c54b4..2ba878a97 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/getpartialobject.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedgetobject.go
@@ -19,33 +19,28 @@
package main
import (
- "io"
"log"
- "os"
+ "time"
"github.com/minio/minio-go"
)
func main() {
- config := minio.Config{
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
- if err != nil {
- log.Fatalln(err)
- }
- reader, stat, err := s3Client.GetPartialObject("mybucket", "myobject", 0, 10)
+ // Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- localfile, err := os.Create("testfile")
+ presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
if err != nil {
log.Fatalln(err)
}
- defer localfile.Close()
-
- if _, err = io.CopyN(localfile, reader, stat.Size); err != nil {
- log.Fatalln(err)
- }
+ log.Println(presignedURL)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedpostpolicy.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedpostpolicy.go
new file mode 100644
index 000000000..65fa66ddf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedpostpolicy.go
@@ -0,0 +1,56 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ policy := minio.NewPostPolicy()
+ policy.SetBucket("my-bucketname")
+ policy.SetKey("my-objectname")
+ policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
+ m, err := s3Client.PresignedPostPolicy(policy)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ fmt.Printf("curl ")
+ for k, v := range m {
+ fmt.Printf("-F %s=%s ", k, v)
+ }
+ fmt.Printf("-F file=@/etc/bashrc ")
+ fmt.Printf("https://play.minio.io:9002/my-bucketname\n")
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getpartialobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedputobject.go
similarity index 56%
rename from Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getpartialobject.go
rename to Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedputobject.go
index 591b4be3a..b55f721f7 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getpartialobject.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/presignedputobject.go
@@ -19,35 +19,28 @@
package main
import (
- "io"
"log"
- "os"
+ "time"
"github.com/minio/minio-go"
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
- if err != nil {
- log.Fatalln(err)
- }
- reader, stat, err := s3Client.GetPartialObject("mybucket", "myobject", 0, 10)
+ // Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- localfile, err := os.Create("testfile")
+ presignedURL, err := s3Client.PresignedPutObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
if err != nil {
log.Fatalln(err)
}
- defer localfile.Close()
-
- if _, err = io.CopyN(localfile, reader, stat.Size); err != nil {
- log.Fatalln(err)
- }
+ log.Println(presignedURL)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go
index 5cf057286..d7efb7b43 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobject.go
@@ -26,27 +26,28 @@ import (
)
func main() {
- config := minio.Config{
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
+ // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- object, err := os.Open("testfile")
+
+ object, err := os.Open("my-testfile")
if err != nil {
log.Fatalln(err)
}
defer object.Close()
- objectInfo, err := object.Stat()
- if err != nil {
- object.Close()
- log.Fatalln(err)
- }
- err = s3Client.PutObject("mybucket", "myobject", "application/octet-stream", objectInfo.Size(), object)
+ st, _ := object.Stat()
+ n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, st.Size(), "application/octet-stream")
if err != nil {
log.Fatalln(err)
}
-
+ log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobjectpartial.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobjectpartial.go
new file mode 100644
index 000000000..aff67f8e9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/putobjectpartial.go
@@ -0,0 +1,56 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ localFile, err := os.Open("testfile")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ st, err := localFile.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer localFile.Close()
+
+ _, err = s3Client.PutObjectPartial("bucket-name", "objectName", localFile, st.Size(), "text/plain")
+ if err != nil {
+ log.Fatalln(err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removebucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removebucket.go
index 6004c90e2..1d2d03ba3 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removebucket.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removebucket.go
@@ -25,16 +25,19 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
+ // Note: my-bucketname is a dummy value, please replace them with original value.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- err = s3Client.RemoveBucket("mybucket")
+ // This operation will only work if your bucket is empty.
+ err = s3Client.RemoveBucket("my-bucketname")
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go
index 4d5b49c1c..458a4c450 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go
@@ -25,14 +25,19 @@ import (
)
func main() {
- config := minio.Config{
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
+ // Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- for err := range s3Client.RemoveIncompleteUpload("mybucket", "myobject") {
+
+ for err := range s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname") {
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeobject.go
index 4447b65ab..2301a77de 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeobject.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/removeobject.go
@@ -25,16 +25,18 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
+ // Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- err = s3Client.RemoveObject("mybucket", "myobject")
+ err = s3Client.RemoveObject("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/setbucketacl.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/setbucketacl.go
index f85d1256a..7893018f7 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/setbucketacl.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/setbucketacl.go
@@ -25,14 +25,19 @@ import (
)
func main() {
- config := minio.Config{
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
+ // Note: my-bucketname is a dummy value, please replace them with original value.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- err = s3Client.SetBucketACL("mybucket", minio.BucketACL("public-read-write"))
+
+ err = s3Client.SetBucketACL("my-bucketname", minio.BucketACL("public-read-write"))
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/statobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/statobject.go
index bb3844900..8f24460ab 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/statobject.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/play/statobject.go
@@ -25,14 +25,18 @@ import (
)
func main() {
- config := minio.Config{
- Endpoint: "https://play.minio.io:9000",
- }
- s3Client, err := minio.New(config)
+ // Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure by default. set inSecure=true to enable insecure access.
+ // inSecure boolean is the last argument for New().
+
+ // New provides a client object backend by automatically detected signature type based
+ // on the provider.
+ s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
- stat, err := s3Client.StatObject("mybucket", "myobject")
+ stat, err := s3Client.StatObject("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/bucketexists.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/bucketexists.go
index 7b0b17f82..59b205dab 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/bucketexists.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/bucketexists.go
@@ -25,18 +25,23 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- err = s3Client.BucketExists("mybucket")
+
+ err = s3Client.BucketExists("my-bucketname")
if err != nil {
log.Fatalln(err)
}
+
log.Println("Success")
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fgetobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fgetobject.go
new file mode 100644
index 000000000..a936d5a3a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fgetobject.go
@@ -0,0 +1,45 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
+ // and my-filename.csv are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv"); err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Successfully saved my-filename.csv")
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fputobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fputobject.go
new file mode 100644
index 000000000..f295dd778
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/fputobject.go
@@ -0,0 +1,45 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
+ // and my-filename.csv are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", "application/csv"); err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Successfully uploaded my-filename.csv")
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getbucketacl.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getbucketacl.go
index c9fbe78c3..24991df0c 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getbucketacl.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getbucketacl.go
@@ -25,16 +25,20 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- acl, err := s3Client.GetBucketACL("mybucket")
+
+ acl, err := s3Client.GetBucketACL("my-bucketname")
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go
index d0082d90a..0125491ab 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobject.go
@@ -27,27 +27,31 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
- if err != nil {
- log.Fatalln(err)
- }
- reader, stat, err := s3Client.GetObject("mybucket", "myobject")
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
+ // my-testfile are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- localfile, err := os.Create("testfile")
+ reader, _, err := s3Client.GetObject("my-bucketname", "my-objectname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ localfile, err := os.Create("my-testfile")
if err != nil {
log.Fatalln(err)
}
defer localfile.Close()
- if _, err = io.CopyN(localfile, reader, stat.Size); err != nil {
+ if _, err = io.Copy(localfile, reader); err != nil {
log.Fatalln(err)
}
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobjectpartial.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobjectpartial.go
new file mode 100644
index 000000000..2c32c8449
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/getobjectpartial.go
@@ -0,0 +1,92 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "errors"
+ "io"
+ "log"
+ "os"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
+ // my-testfile are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ reader, stat, err := s3Client.GetObjectPartial("my-bucketname", "my-objectname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer reader.Close()
+
+ localFile, err := os.OpenFile("my-testfile", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer localfile.Close()
+
+ st, err := localFile.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ readAtOffset := st.Size()
+ readAtBuffer := make([]byte, 5*1024*1024)
+
+ // For loop.
+ for {
+ readAtSize, rerr := reader.ReadAt(readAtBuffer, readAtOffset)
+ if rerr != nil {
+ if rerr != io.EOF {
+ log.Fatalln(rerr)
+ }
+ }
+ writeSize, werr := localFile.Write(readAtBuffer[:readAtSize])
+ if werr != nil {
+ log.Fatalln(werr)
+ }
+ if readAtSize != writeSize {
+ log.Fatalln(errors.New("Something really bad happened here."))
+ }
+ readAtOffset += int64(writeSize)
+ if rerr == io.EOF {
+ break
+ }
+ }
+
+ // totalWritten size.
+ totalWritten := readAtOffset
+
+ // If found mismatch error out.
+ if totalWritten != stat.Size {
+ log.Fatalln(errors.New("Something really bad happened here."))
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listbuckets.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listbuckets.go
index 5aff5a1a2..1b29ebbcf 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listbuckets.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listbuckets.go
@@ -25,19 +25,24 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID and YOUR-SECRETACCESSKEY are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- for bucket := range s3Client.ListBuckets() {
- if bucket.Err != nil {
- log.Fatalln(bucket.Err)
- }
- log.Println(bucket.Stat)
+
+ buckets, err := s3Client.ListBuckets()
+ if err != nil {
+ log.Fatalln(err)
+ }
+ for _, bucket := range buckets {
+ log.Println(bucket)
}
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listincompleteuploads.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listincompleteuploads.go
index 0ceab2b28..93f91d581 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listincompleteuploads.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listincompleteuploads.go
@@ -19,26 +19,39 @@
package main
import (
+ "fmt"
"log"
"github.com/minio/minio-go"
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- // Recursive
- for multipartObject := range s3Client.ListIncompleteUploads("mybucket", "myobject", true) {
+
+ // Create a done channel to control 'ListObjects' go routine.
+ doneCh := make(struct{})
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(doneCh)
+
+ // List all multipart uploads from a bucket-name with a matching prefix.
+ for multipartObject := range s3Client.ListIncompleteUploads("my-bucketname", "my-prefixname", true, doneCh) {
if multipartObject.Err != nil {
- log.Fatalln(multipartObject.Err)
+ fmt.Println(multipartObject.Err)
+ return
}
- log.Println(multipartObject)
+ fmt.Println(multipartObject)
}
+ return
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listobjects.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listobjects.go
index a091fbbf4..29b61dc94 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listobjects.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/listobjects.go
@@ -19,25 +19,39 @@
package main
import (
- "log"
+ "fmt"
"github.com/minio/minio-go"
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
- log.Fatalln(err)
+ fmt.Println(err)
+ return
}
- for object := range s3Client.ListObjects("mybucket", "", true) {
+
+ // Create a done channel to control 'ListObjects' go routine.
+ doneCh := make(struct{})
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(doneCh)
+
+ // List all objects from a bucket-name with a matching prefix.
+ for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) {
if object.Err != nil {
- log.Fatalln(object.Err)
+ fmt.Println(object.Err)
+ return
}
- log.Println(object.Stat)
+ fmt.Println(object)
}
+ return
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/makebucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/makebucket.go
index 5b97ca128..22f9e18f2 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/makebucket.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/makebucket.go
@@ -25,16 +25,20 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- err = s3Client.MakeBucket("mybucket", "")
+
+ err = s3Client.MakeBucket("my-bucketname", minio.BucketACL("private"), "us-east-1")
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go
index fc96bb002..08929cdc0 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go
@@ -26,18 +26,22 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- string, err := s3Client.PresignedGetObject("mybucket", "myobject", time.Duration(1000)*time.Second)
+
+ presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
if err != nil {
log.Fatalln(err)
}
- log.Println(string)
+ log.Println(presignedURL)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go
index c41cae461..eb73e88e8 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go
@@ -27,28 +27,31 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
+
policy := minio.NewPostPolicy()
- policy.SetKey("myobject")
- policy.SetBucket("mybucket")
+ policy.SetBucket("my-bucketname")
+ policy.SetKey("my-objectname")
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
m, err := s3Client.PresignedPostPolicy(policy)
if err != nil {
- fmt.Println(err)
- return
+ log.Fatalln(err)
}
fmt.Printf("curl ")
for k, v := range m {
fmt.Printf("-F %s=%s ", k, v)
}
- fmt.Printf("-F file=@/etc/bashrc ")
- fmt.Printf(config.Endpoint + "/mybucket\n")
+ fmt.Printf("-F file=@/etc/bash.bashrc ")
+ fmt.Printf("https://my-bucketname.s3.amazonaws.com\n")
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedputobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedputobject.go
index 7675cabb8..96d243c7e 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedputobject.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/presignedputobject.go
@@ -26,18 +26,22 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- string, err := s3Client.PresignedPutObject("mybucket", "myobject", time.Duration(1000)*time.Second)
+
+ presignedURL, err := s3Client.PresignedPutObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
if err != nil {
log.Fatalln(err)
}
- log.Println(string)
+ log.Println(presignedURL)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go
index b67832b7f..963060487 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobject.go
@@ -26,29 +26,29 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
+ // my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- object, err := os.Open("testfile")
+
+ object, err := os.Open("my-testfile")
if err != nil {
log.Fatalln(err)
}
defer object.Close()
- objectInfo, err := object.Stat()
- if err != nil {
- object.Close()
- log.Fatalln(err)
- }
- err = s3Client.PutObject("mybucket", "myobject", "application/octet-stream", objectInfo.Size(), object)
+ st, _ := object.Stat()
+ n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, st.Size(), "application/octet-stream")
if err != nil {
log.Fatalln(err)
}
-
+ log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobjectpartial.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobjectpartial.go
new file mode 100644
index 000000000..e59b2ad4d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/putobjectpartial.go
@@ -0,0 +1,57 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
+ // my-testfile are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ localFile, err := os.Open("my-testfile")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ st, err := localFile.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer localFile.Close()
+
+ _, err = s3Client.PutObjectPartial("my-bucketname", "my-objectname", localFile, st.Size(), "text/plain")
+ if err != nil {
+ log.Fatalln(err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removebucket.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removebucket.go
index 65f9e16d9..d22d18bea 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removebucket.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removebucket.go
@@ -25,16 +25,21 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- err = s3Client.RemoveBucket("mybucket")
+
+ // This operation will only work if your bucket is empty.
+ err = s3Client.RemoveBucket("my-bucketname")
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go
index cb78304d3..8b7533472 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go
@@ -25,16 +25,20 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- for err := range s3Client.RemoveIncompleteUpload("mybucket", "myobject") {
+
+ for err := range s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname") {
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeobject.go
index 07761ebd9..c1b08458f 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeobject.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/removeobject.go
@@ -25,16 +25,19 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- err = s3Client.RemoveObject("mybucket", "myobject")
+ err = s3Client.RemoveObject("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/setbucketacl.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/setbucketacl.go
index dfe3af630..59fb10ef7 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/setbucketacl.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/setbucketacl.go
@@ -25,16 +25,20 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- err = s3Client.SetBucketACL("mybucket", minio.BucketACL("public-read-write"))
+
+ err = s3Client.SetBucketACL("my-bucketname", minio.BucketACL("public-read-write"))
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/statobject.go b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/statobject.go
index 400670f19..1eb6c604f 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/statobject.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/examples/s3/statobject.go
@@ -25,16 +25,19 @@ import (
)
func main() {
- config := minio.Config{
- AccessKeyID: "YOUR-ACCESS-KEY-HERE",
- SecretAccessKey: "YOUR-PASSWORD-HERE",
- Endpoint: "https://s3.amazonaws.com",
- }
- s3Client, err := minio.New(config)
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- stat, err := s3Client.StatObject("mybucket", "myobject")
+ stat, err := s3Client.StatObject("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/io.go b/Godeps/_workspace/src/github.com/minio/minio-go/io.go
deleted file mode 100644
index 71b4363a8..000000000
--- a/Godeps/_workspace/src/github.com/minio/minio-go/io.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package minio
-
-import "io"
-
-// ReadSeekCloser wraps an io.Reader returning a ReaderSeekerCloser
-func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
- return ReaderSeekerCloser{r}
-}
-
-// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
-// io.Closer interfaces to the underlying object if available.
-type ReaderSeekerCloser struct {
- r io.Reader
-}
-
-// Read reads up to len(p) bytes into p. It returns the number of bytes
-// read (0 <= n <= len(p)) and any error encountered. Even if Read
-// returns n < len(p), it may use all of p as scratch space during the call.
-// If some data is available but not len(p) bytes, Read conventionally
-// returns what is available instead of waiting for more.
-//
-// When Read encounters an error or end-of-file condition after
-// successfully reading n > 0 bytes, it returns the number of
-// bytes read. It may return the (non-nil) error from the same call
-// or return the error (and n == 0) from a subsequent call.
-// An instance of this general case is that a Reader returning
-// a non-zero number of bytes at the end of the input stream may
-// return either err == EOF or err == nil. The next Read should
-// return 0, EOF.
-func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
- switch t := r.r.(type) {
- case io.Reader:
- return t.Read(p)
- }
- return 0, nil
-}
-
-// Seek sets the offset for the next Read or Write to offset,
-// interpreted according to whence: 0 means relative to the start of
-// the file, 1 means relative to the current offset, and 2 means
-// relative to the end. Seek returns the new offset relative to the
-// start of the file and an error, if any.
-//
-// Seeking to an offset before the start of the file is an error.
-//
-// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
-func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
- switch t := r.r.(type) {
- case io.Seeker:
- return t.Seek(offset, whence)
- }
- return int64(0), nil
-}
-
-// Close closes the ReaderSeekerCloser.
-//
-// The behavior of Close after the first call is undefined.
-// Specific implementations may document their own behavior.
-//
-// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
-func (r ReaderSeekerCloser) Close() error {
- switch t := r.r.(type) {
- case io.Closer:
- return t.Close()
- }
- return nil
-}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go b/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go
index a1637545a..2d3082755 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/post-policy.go
@@ -8,145 +8,177 @@ import (
"time"
)
-// expirationDateFormat date format for expiration key in json policy
+// expirationDateFormat date format for expiration key in json policy.
const expirationDateFormat = "2006-01-02T15:04:05.999Z"
-// Policy explanation: http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
-type policy struct {
+// policyCondition explanation: http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
+//
+// Example:
+//
+// policyCondition {
+// matchType: "$eq",
+// key: "$Content-Type",
+// value: "image/png",
+// }
+//
+type policyCondition struct {
matchType string
- key string
+ condition string
value string
}
// PostPolicy provides strict static type conversion and validation for Amazon S3's POST policy JSON string.
type PostPolicy struct {
- expiration time.Time // expiration date and time of the POST policy.
- policies []policy
+ expiration time.Time // expiration date and time of the POST policy.
+ conditions []policyCondition // collection of different policy conditions.
+ // contentLengthRange minimum and maximum allowable size for the uploaded content.
contentLengthRange struct {
- min int
- max int
+ min int64
+ max int64
}
- // Post form data
+ // Post form data.
formData map[string]string
}
-// NewPostPolicy instantiate new post policy
+// NewPostPolicy instantiate new post policy.
func NewPostPolicy() *PostPolicy {
p := &PostPolicy{}
- p.policies = make([]policy, 0)
+ p.conditions = make([]policyCondition, 0)
p.formData = make(map[string]string)
return p
}
-// SetExpires expiration time
+// SetExpires expiration time.
func (p *PostPolicy) SetExpires(t time.Time) error {
if t.IsZero() {
- return errors.New("time input invalid")
+ return errors.New("No expiry time set.")
}
p.expiration = t
return nil
}
-// SetKey Object name
+// SetKey Object name.
func (p *PostPolicy) SetKey(key string) error {
if strings.TrimSpace(key) == "" || key == "" {
- return errors.New("key invalid")
+ return errors.New("Object name is not specified.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$key",
+ value: key,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
}
- policy := policy{"eq", "$key", key}
- p.policies = append(p.policies, policy)
p.formData["key"] = key
return nil
}
-// SetKeyStartsWith Object name that can start with
+// SetKeyStartsWith Object name that can start with.
func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" {
- return errors.New("key-starts-with invalid")
+ return errors.New("Object prefix is not specified.")
+ }
+ policyCond := policyCondition{
+ matchType: "starts-with",
+ condition: "$key",
+ value: keyStartsWith,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
}
- policy := policy{"starts-with", "$key", keyStartsWith}
- p.policies = append(p.policies, policy)
p.formData["key"] = keyStartsWith
return nil
}
-// SetBucket bucket name
-func (p *PostPolicy) SetBucket(bucket string) error {
- if strings.TrimSpace(bucket) == "" || bucket == "" {
- return errors.New("bucket invalid")
+// SetBucket bucket name.
+func (p *PostPolicy) SetBucket(bucketName string) error {
+ if strings.TrimSpace(bucketName) == "" || bucketName == "" {
+ return errors.New("Bucket name is not specified.")
}
- policy := policy{"eq", "$bucket", bucket}
- p.policies = append(p.policies, policy)
- p.formData["bucket"] = bucket
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$bucket",
+ value: bucketName,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["bucket"] = bucketName
return nil
}
-// SetContentType content-type
+// SetContentType content-type.
func (p *PostPolicy) SetContentType(contentType string) error {
if strings.TrimSpace(contentType) == "" || contentType == "" {
- return errors.New("contentType invalid")
+ return errors.New("No content type specified.")
}
- policy := policy{"eq", "$Content-Type", contentType}
- if err := p.addNewPolicy(policy); err != nil {
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$Content-Type",
+ value: contentType,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["Content-Type"] = contentType
return nil
}
-// SetContentLength - set new min and max content legnth condition
-func (p *PostPolicy) SetContentLength(min, max int) error {
+// SetContentLengthRange - set new min and max content length condition.
+func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
if min > max {
- return errors.New("minimum cannot be bigger than maximum")
+ return errors.New("minimum limit is larger than maximum limit")
}
if min < 0 {
- return errors.New("minimum cannot be negative")
+ return errors.New("minimum limit cannot be negative")
}
if max < 0 {
- return errors.New("maximum cannot be negative")
+ return errors.New("maximum limit cannot be negative")
}
p.contentLengthRange.min = min
p.contentLengthRange.max = max
return nil
}
-// addNewPolicy - internal helper to validate adding new policies
-func (p *PostPolicy) addNewPolicy(po policy) error {
- if po.matchType == "" || po.key == "" || po.value == "" {
- return errors.New("policy invalid")
+// addNewPolicy - internal helper to validate adding new policies.
+func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
+ if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
+ return errors.New("Policy fields empty.")
}
- p.policies = append(p.policies, po)
+ p.conditions = append(p.conditions, policyCond)
return nil
}
-// Stringer interface for printing in pretty manner
+// Stringer interface for printing in pretty manner.
func (p PostPolicy) String() string {
return string(p.marshalJSON())
}
-// marshalJSON provides Marshalled JSON
+// marshalJSON provides Marshalled JSON.
func (p PostPolicy) marshalJSON() []byte {
expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
- var policiesStr string
- policies := []string{}
- for _, po := range p.policies {
- policies = append(policies, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.key, po.value))
+ var conditionsStr string
+ conditions := []string{}
+ for _, po := range p.conditions {
+ conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value))
}
if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 {
- policies = append(policies, fmt.Sprintf("[\"content-length-range\", %d, %d]",
+ conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]",
p.contentLengthRange.min, p.contentLengthRange.max))
}
- if len(policies) > 0 {
- policiesStr = `"conditions":[` + strings.Join(policies, ",") + "]"
+ if len(conditions) > 0 {
+ conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]"
}
retStr := "{"
retStr = retStr + expirationStr + ","
- retStr = retStr + policiesStr
+ retStr = retStr + conditionsStr
retStr = retStr + "}"
return []byte(retStr)
}
-// base64 produces base64 of PostPolicy's Marshalled json
+// base64 produces base64 of PostPolicy's Marshalled json.
func (p PostPolicy) base64() string {
return base64.StdEncoding.EncodeToString(p.marshalJSON())
}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request-common.go b/Godeps/_workspace/src/github.com/minio/minio-go/request-common.go
deleted file mode 100644
index c63c16a13..000000000
--- a/Godeps/_workspace/src/github.com/minio/minio-go/request-common.go
+++ /dev/null
@@ -1,283 +0,0 @@
-package minio
-
-import (
- "encoding/hex"
- "io"
- "io/ioutil"
- "net/http"
- "regexp"
- "strings"
- "unicode/utf8"
-)
-
-// operation - rest operation
-type operation struct {
- HTTPServer string
- HTTPMethod string
- HTTPPath string
-}
-
-// request - a http request
-type request struct {
- req *http.Request
- config *Config
- body io.ReadSeeker
- expires int64
-}
-
-// Do - start the request
-func (r *request) Do() (resp *http.Response, err error) {
- if r.config.AccessKeyID != "" && r.config.SecretAccessKey != "" {
- if r.config.Signature.isV2() {
- r.SignV2()
- }
- if r.config.Signature.isV4() || r.config.Signature.isLatest() {
- r.SignV4()
- }
- }
- transport := http.DefaultTransport
- if r.config.Transport != nil {
- transport = r.config.Transport
- }
- // do not use http.Client{}, while it may seem intuitive but the problem seems to be
- // that http.Client{} internally follows redirects and there is no easier way to disable
- // it from outside using a configuration parameter -
- // this auto redirect causes complications in verifying subsequent errors
- //
- // The best is to use RoundTrip() directly, so the request comes back to the caller where
- // we are going to handle such replies. And indeed that is the right thing to do here.
- //
- return transport.RoundTrip(r.req)
-}
-
-// Set - set additional headers if any
-func (r *request) Set(key, value string) {
- r.req.Header.Set(key, value)
-}
-
-// Get - get header values
-func (r *request) Get(key string) string {
- return r.req.Header.Get(key)
-}
-
-func path2BucketAndObject(path string) (bucketName, objectName string) {
- pathSplits := strings.SplitN(path, "?", 2)
- splits := strings.SplitN(pathSplits[0], separator, 3)
- switch len(splits) {
- case 0, 1:
- bucketName = ""
- objectName = ""
- case 2:
- bucketName = splits[1]
- objectName = ""
- case 3:
- bucketName = splits[1]
- objectName = splits[2]
- }
- return bucketName, objectName
-}
-
-// path2Object gives objectName from URL path
-func path2Object(path string) (objectName string) {
- _, objectName = path2BucketAndObject(path)
- return
-}
-
-// path2Bucket gives bucketName from URL path
-func path2Bucket(path string) (bucketName string) {
- bucketName, _ = path2BucketAndObject(path)
- return
-}
-
-// path2Query gives query part from URL path
-func path2Query(path string) (query string) {
- pathSplits := strings.SplitN(path, "?", 2)
- if len(pathSplits) > 1 {
- query = pathSplits[1]
- }
- return
-}
-
-// getURLEncodedPath encode the strings from UTF-8 byte representations to HTML hex escape sequences
-//
-// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
-// non english characters cannot be parsed due to the nature in which url.Encode() is written
-//
-// This function on the other hand is a direct replacement for url.Encode() technique to support
-// pretty much every UTF-8 character.
-func getURLEncodedPath(pathName string) string {
- // if object matches reserved string, no need to encode them
- reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
- if reservedNames.MatchString(pathName) {
- return pathName
- }
- var encodedPathname string
- for _, s := range pathName {
- if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
- encodedPathname = encodedPathname + string(s)
- continue
- }
- switch s {
- case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
- encodedPathname = encodedPathname + string(s)
- continue
- default:
- len := utf8.RuneLen(s)
- if len < 0 {
- // if utf8 cannot convert return the same string as is
- return pathName
- }
- u := make([]byte, len)
- utf8.EncodeRune(u, s)
- for _, r := range u {
- hex := hex.EncodeToString([]byte{r})
- encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
- }
- }
- }
- return encodedPathname
-}
-
-func (op *operation) getRequestURL(config Config) (url string) {
- // parse URL for the combination of HTTPServer + HTTPPath
- url = op.HTTPServer + separator
- if !config.isVirtualStyle {
- url += path2Bucket(op.HTTPPath)
- }
- objectName := getURLEncodedPath(path2Object(op.HTTPPath))
- queryPath := path2Query(op.HTTPPath)
- if objectName == "" && queryPath != "" {
- url += "?" + queryPath
- return
- }
- if objectName != "" && queryPath == "" {
- if strings.HasSuffix(url, separator) {
- url += objectName
- } else {
- url += separator + objectName
- }
- return
- }
- if objectName != "" && queryPath != "" {
- if strings.HasSuffix(url, separator) {
- url += objectName + "?" + queryPath
- } else {
- url += separator + objectName + "?" + queryPath
- }
- }
- return
-}
-
-func newPresignedRequest(op *operation, config *Config, expires int64) (*request, error) {
- // if no method default to POST
- method := op.HTTPMethod
- if method == "" {
- method = "POST"
- }
-
- u := op.getRequestURL(*config)
-
- // get a new HTTP request, for the requested method
- req, err := http.NewRequest(method, u, nil)
- if err != nil {
- return nil, err
- }
-
- // set UserAgent
- req.Header.Set("User-Agent", config.userAgent)
-
- // set Accept header for response encoding style, if available
- if config.AcceptType != "" {
- req.Header.Set("Accept", config.AcceptType)
- }
-
- // save for subsequent use
- r := new(request)
- r.config = config
- r.expires = expires
- r.req = req
- r.body = nil
-
- return r, nil
-}
-
-// newUnauthenticatedRequest - instantiate a new unauthenticated request
-func newUnauthenticatedRequest(op *operation, config *Config, body io.Reader) (*request, error) {
- // if no method default to POST
- method := op.HTTPMethod
- if method == "" {
- method = "POST"
- }
-
- u := op.getRequestURL(*config)
-
- // get a new HTTP request, for the requested method
- req, err := http.NewRequest(method, u, nil)
- if err != nil {
- return nil, err
- }
-
- // set UserAgent
- req.Header.Set("User-Agent", config.userAgent)
-
- // set Accept header for response encoding style, if available
- if config.AcceptType != "" {
- req.Header.Set("Accept", config.AcceptType)
- }
-
- // add body
- switch {
- case body == nil:
- req.Body = nil
- default:
- req.Body = ioutil.NopCloser(body)
- }
-
- // save for subsequent use
- r := new(request)
- r.req = req
- r.config = config
-
- return r, nil
-}
-
-// newRequest - instantiate a new request
-func newRequest(op *operation, config *Config, body io.ReadSeeker) (*request, error) {
- // if no method default to POST
- method := op.HTTPMethod
- if method == "" {
- method = "POST"
- }
-
- u := op.getRequestURL(*config)
-
- // get a new HTTP request, for the requested method
- req, err := http.NewRequest(method, u, nil)
- if err != nil {
- return nil, err
- }
-
- // set UserAgent
- req.Header.Set("User-Agent", config.userAgent)
-
- // set Accept header for response encoding style, if available
- if config.AcceptType != "" {
- req.Header.Set("Accept", config.AcceptType)
- }
-
- // add body
- switch {
- case body == nil:
- req.Body = nil
- default:
- req.Body = ioutil.NopCloser(body)
- }
-
- // save for subsequent use
- r := new(request)
- r.config = config
- r.req = req
- r.body = body
-
- return r, nil
-}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request-v2.go b/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v2.go
similarity index 52%
rename from Godeps/_workspace/src/github.com/minio/minio-go/request-v2.go
rename to Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v2.go
index aac4066b6..956b04f23 100644
--- a/Godeps/_workspace/src/github.com/minio/minio-go/request-v2.go
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v2.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Legacy v2 Signature Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,7 +21,6 @@ import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
- "errors"
"fmt"
"net/http"
"net/url"
@@ -31,45 +30,77 @@ import (
"time"
)
-// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}
-func (r *request) PreSignV2() (string, error) {
- if r.config.AccessKeyID == "" || r.config.SecretAccessKey == "" {
- return "", errors.New("presign requires accesskey and secretkey")
- }
- // Add date if not present
- d := time.Now().UTC()
- if date := r.Get("Date"); date == "" {
- r.Set("Date", d.Format(http.TimeFormat))
- }
- epochExpires := d.Unix() + r.expires
- var path string
- if r.config.isVirtualStyle {
- for k, v := range regions {
- if v == r.config.Region {
- path = "/" + strings.TrimSuffix(r.req.URL.Host, "."+k)
- path += r.req.URL.Path
- path = getURLEncodedPath(path)
- break
- }
- }
- } else {
- path = getURLEncodedPath(r.req.URL.Path)
- }
- signText := fmt.Sprintf("%s\n\n\n%d\n%s", r.req.Method, epochExpires, path)
- hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey))
- hm.Write([]byte(signText))
+// signature and API related constants.
+const (
+ signV2Algorithm = "AWS"
+)
- query := r.req.URL.Query()
- query.Set("AWSAccessKeyId", r.config.AccessKeyID)
- query.Set("Expires", strconv.FormatInt(epochExpires, 10))
- query.Set("Signature", base64.StdEncoding.EncodeToString(hm.Sum(nil)))
- r.req.URL.RawQuery = query.Encode()
-
- return r.req.URL.String(), nil
+// Encode input URL path to URL encoded path.
+func encodeURL2Path(u *url.URL) (path string) {
+ // Encode URL path.
+ if strings.HasSuffix(u.Host, ".s3.amazonaws.com") {
+ path = "/" + strings.TrimSuffix(u.Host, ".s3.amazonaws.com")
+ path += u.Path
+ path = urlEncodePath(path)
+ return
+ }
+ if strings.HasSuffix(u.Host, ".storage.googleapis.com") {
+ path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com")
+ path += u.Path
+ path = urlEncodePath(path)
+ return
+ }
+ path = urlEncodePath(u.Path)
+ return
}
-func (r *request) PostPresignSignatureV2(policyBase64 string) string {
- hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey))
+// PreSignV2 - presign the request in following style.
+// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}
+func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
+ // presign is a noop for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return nil
+ }
+ d := time.Now().UTC()
+ // Add date if not present
+ if date := req.Header.Get("Date"); date == "" {
+ req.Header.Set("Date", d.Format(http.TimeFormat))
+ }
+
+ // Get encoded URL path.
+ path := encodeURL2Path(req.URL)
+
+ // Find epoch expires when the request will expire.
+ epochExpires := d.Unix() + expires
+
+ // get string to sign.
+ stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", req.Method, epochExpires, path)
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(stringToSign))
+
+ // calculate signature.
+ signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
+
+ query := req.URL.Query()
+ // Handle specially for Google Cloud Storage.
+ if strings.Contains(req.URL.Host, ".storage.googleapis.com") {
+ query.Set("GoogleAccessId", accessKeyID)
+ } else {
+ query.Set("AWSAccessKeyId", accessKeyID)
+ }
+
+ // Fill in Expires and Signature for presigned query.
+ query.Set("Expires", strconv.FormatInt(epochExpires, 10))
+ query.Set("Signature", signature)
+
+ // Encode query and save.
+ req.URL.RawQuery = query.Encode()
+ return &req
+}
+
+// PostPresignSignatureV2 - presigned signature for PostPolicy request
+func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(policyBase64))
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
return signature
@@ -91,25 +122,32 @@ func (r *request) PostPresignSignatureV2(policyBase64 string) string {
//
// CanonicalizedProtocolHeaders =
-// SignV2 the request before Do() (version 2.0)
-func (r *request) SignV2() {
- // Add date if not present
- if date := r.Get("Date"); date == "" {
- r.Set("Date", time.Now().UTC().Format(http.TimeFormat))
- }
- // Calculate HMAC for secretAccessKey
- hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey))
- hm.Write([]byte(r.getStringToSignV2()))
+// SignV2 sign the request before Do() (AWS Signature Version 2).
+func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
+ // Initial time.
+ d := time.Now().UTC()
- // prepare auth header
+ // Add date if not present.
+ if date := req.Header.Get("Date"); date == "" {
+ req.Header.Set("Date", d.Format(http.TimeFormat))
+ }
+
+ // Calculate HMAC for secretAccessKey.
+ stringToSign := getStringToSignV2(req)
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(stringToSign))
+
+ // Prepare auth header.
authHeader := new(bytes.Buffer)
- authHeader.WriteString(fmt.Sprintf("AWS %s:", r.config.AccessKeyID))
+ authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID))
encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
encoder.Write(hm.Sum(nil))
encoder.Close()
- // Set Authorization header
- r.req.Header.Set("Authorization", authHeader.String())
+ // Set Authorization header.
+ req.Header.Set("Authorization", authHeader.String())
+
+ return &req
}
// From the Amazon docs:
@@ -120,32 +158,34 @@ func (r *request) SignV2() {
// Date + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
-func (r *request) getStringToSignV2() string {
+func getStringToSignV2(req http.Request) string {
buf := new(bytes.Buffer)
- // write standard headers
- r.writeDefaultHeaders(buf)
- // write canonicalized protocol headers if any
- r.writeCanonicalizedHeaders(buf)
- // write canonicalized Query resources if any
- r.writeCanonicalizedResource(buf)
+ // write standard headers.
+ writeDefaultHeaders(buf, req)
+ // write canonicalized protocol headers if any.
+ writeCanonicalizedHeaders(buf, req)
+ // write canonicalized Query resources if any.
+ writeCanonicalizedResource(buf, req)
return buf.String()
}
-func (r *request) writeDefaultHeaders(buf *bytes.Buffer) {
- buf.WriteString(r.req.Method)
+// writeDefaultHeader - write all default necessary headers
+func writeDefaultHeaders(buf *bytes.Buffer, req http.Request) {
+ buf.WriteString(req.Method)
buf.WriteByte('\n')
- buf.WriteString(r.req.Header.Get("Content-MD5"))
+ buf.WriteString(req.Header.Get("Content-MD5"))
buf.WriteByte('\n')
- buf.WriteString(r.req.Header.Get("Content-Type"))
+ buf.WriteString(req.Header.Get("Content-Type"))
buf.WriteByte('\n')
- buf.WriteString(r.req.Header.Get("Date"))
+ buf.WriteString(req.Header.Get("Date"))
buf.WriteByte('\n')
}
-func (r *request) writeCanonicalizedHeaders(buf *bytes.Buffer) {
+// writeCanonicalizedHeaders - write canonicalized headers.
+func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
var protoHeaders []string
vals := make(map[string][]string)
- for k, vv := range r.req.Header {
+ for k, vv := range req.Header {
// all the AMZ and GOOG headers should be lowercase
lk := strings.ToLower(k)
if strings.HasPrefix(lk, "x-amz") {
@@ -205,25 +245,18 @@ var resourceList = []string{
// CanonicalizedResource = [ "/" + Bucket ] +
// +
// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
-func (r *request) writeCanonicalizedResource(buf *bytes.Buffer) error {
- requestURL := r.req.URL
- if r.config.isVirtualStyle {
- for k, v := range regions {
- if v == r.config.Region {
- path := "/" + strings.TrimSuffix(requestURL.Host, "."+k)
- path += requestURL.Path
- buf.WriteString(getURLEncodedPath(path))
- break
- }
- }
- } else {
- buf.WriteString(getURLEncodedPath(requestURL.Path))
- }
+func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) error {
+ requestURL := req.URL
+
+ // Get encoded URL path.
+ path := encodeURL2Path(requestURL)
+ buf.WriteString(path)
+
sort.Strings(resourceList)
if requestURL.RawQuery != "" {
var n int
vals, _ := url.ParseQuery(requestURL.RawQuery)
- // loop through all the supported resourceList
+ // loop through all the supported resourceList.
for _, resource := range resourceList {
if vv, ok := vals[resource]; ok && len(vv) > 0 {
n++
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v4.go b/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v4.go
new file mode 100644
index 000000000..515d8ab18
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/request-signature-v4.go
@@ -0,0 +1,282 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "encoding/hex"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// signature and API related constants.
+const (
+ signV4Algorithm = "AWS4-HMAC-SHA256"
+ iso8601DateFormat = "20060102T150405Z"
+ yyyymmdd = "20060102"
+)
+
+///
+/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
+///
+/// User-Agent:
+///
+/// This is ignored from signing because signing this causes problems with generating pre-signed URLs
+/// (that are executed by other agents) or when customers pass requests through proxies, which may
+/// modify the user-agent.
+///
+/// Content-Length:
+///
+/// This is ignored from signing because generating a pre-signed URL should not provide a content-length
+/// constraint, specifically when vending a S3 pre-signed PUT URL. The corollary to this is that when
+/// sending regular requests (non-pre-signed), the signature contains a checksum of the body, which
+/// implicitly validates the payload length (since changing the number of bytes would change the checksum)
+/// and therefore this header is not valuable in the signature.
+///
+/// Content-Type:
+///
+/// Signing this header causes quite a number of problems in browser environments, where browsers
+/// like to modify and normalize the content-type header in different ways. There is more information
+/// on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding this field simplifies logic
+/// and reduces the possibility of future bugs
+///
+/// Authorization:
+///
+/// Is skipped for obvious reasons
+///
+var ignoredHeaders = map[string]bool{
+ "Authorization": true,
+ "Content-Type": true,
+ "Content-Length": true,
+ "User-Agent": true,
+}
+
+// getSigningKey hmac seed to calculate final signature
+func getSigningKey(secret, loc string, t time.Time) []byte {
+ date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
+ location := sumHMAC(date, []byte(loc))
+ service := sumHMAC(location, []byte("s3"))
+ signingKey := sumHMAC(service, []byte("aws4_request"))
+ return signingKey
+}
+
+// getSignature final signature in hexadecimal form
+func getSignature(signingKey []byte, stringToSign string) string {
+ return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
+}
+
+// getScope generate a string of a specific date, an AWS region, and a service
+func getScope(location string, t time.Time) string {
+ scope := strings.Join([]string{
+ t.Format(yyyymmdd),
+ location,
+ "s3",
+ "aws4_request",
+ }, "/")
+ return scope
+}
+
+// getCredential generate a credential string
+func getCredential(accessKeyID, location string, t time.Time) string {
+ scope := getScope(location, t)
+ return accessKeyID + "/" + scope
+}
+
+// getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload
+func getHashedPayload(req http.Request) string {
+ hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
+ if hashedPayload == "" {
+ // Presign does not have a payload, use S3 recommended value.
+ hashedPayload = "UNSIGNED-PAYLOAD"
+ }
+ return hashedPayload
+}
+
+// getCanonicalHeaders generate a list of request headers for signature.
+func getCanonicalHeaders(req http.Request) string {
+ var headers []string
+ vals := make(map[string][]string)
+ for k, vv := range req.Header {
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+ continue // ignored header
+ }
+ headers = append(headers, strings.ToLower(k))
+ vals[strings.ToLower(k)] = vv
+ }
+ headers = append(headers, "host")
+ sort.Strings(headers)
+
+ var buf bytes.Buffer
+ for _, k := range headers {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ switch {
+ case k == "host":
+ buf.WriteString(req.URL.Host)
+ fallthrough
+ default:
+ for idx, v := range vals[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(v)
+ }
+ buf.WriteByte('\n')
+ }
+ }
+ return buf.String()
+}
+
+// getSignedHeaders generate all signed request headers.
+// i.e alphabetically sorted, semicolon-separated list of lowercase request header names
+func getSignedHeaders(req http.Request) string {
+ var headers []string
+ for k := range req.Header {
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+ continue // ignored header
+ }
+ headers = append(headers, strings.ToLower(k))
+ }
+ headers = append(headers, "host")
+ sort.Strings(headers)
+ return strings.Join(headers, ";")
+}
+
+// getCanonicalRequest generate a canonical request of style.
+//
+// canonicalRequest =
+// \n
+// \n
+// \n
+// \n
+// \n
+//
+//
+func getCanonicalRequest(req http.Request) string {
+ req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
+ canonicalRequest := strings.Join([]string{
+ req.Method,
+ urlEncodePath(req.URL.Path),
+ req.URL.RawQuery,
+ getCanonicalHeaders(req),
+ getSignedHeaders(req),
+ getHashedPayload(req),
+ }, "\n")
+ return canonicalRequest
+}
+
+// getStringToSign a string based on selected query values.
+func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
+ stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n"
+ stringToSign = stringToSign + getScope(location, t) + "\n"
+ stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
+ return stringToSign
+}
+
+// PreSignV4 presign the request, in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
+func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
+ // presign is a noop for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return nil
+ }
+ // Initial time.
+ t := time.Now().UTC()
+
+ // get credential string.
+ credential := getCredential(accessKeyID, location, t)
+
+ // Get all signed headers.
+ signedHeaders := getSignedHeaders(req)
+
+ // set URL query.
+ query := req.URL.Query()
+ query.Set("X-Amz-Algorithm", signV4Algorithm)
+ query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+ query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
+ query.Set("X-Amz-SignedHeaders", signedHeaders)
+ query.Set("X-Amz-Credential", credential)
+ req.URL.RawQuery = query.Encode()
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(req)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(t, location, canonicalRequest)
+
+ // get hmac signing key.
+ signingKey := getSigningKey(secretAccessKey, location, t)
+
+ // calculate signature.
+ signature := getSignature(signingKey, stringToSign)
+
+ // Add signature header to RawQuery.
+ req.URL.RawQuery += "&X-Amz-Signature=" + signature
+
+ return &req
+}
+
+// PostPresignSignatureV4 - presigned signature for PostPolicy requests.
+func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
+ signingkey := getSigningKey(secretAccessKey, location, t)
+ signature := getSignature(signingkey, policyBase64)
+ return signature
+}
+
+// SignV4 sign the request before Do(), in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
+func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
+ // Initial time.
+ t := time.Now().UTC()
+
+ // Set x-amz-date.
+ req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(req)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(t, location, canonicalRequest)
+
+ // get hmac signing key.
+ signingKey := getSigningKey(secretAccessKey, location, t)
+
+ // get credential string.
+ credential := getCredential(accessKeyID, location, t)
+
+ // Get all signed headers.
+ signedHeaders := getSignedHeaders(req)
+
+ // calculate signature.
+ signature := getSignature(signingKey, stringToSign)
+
+ // if regular request, construct the final authorization header.
+ parts := []string{
+ signV4Algorithm + " Credential=" + credential,
+ "SignedHeaders=" + signedHeaders,
+ "Signature=" + signature,
+ }
+
+ // Set authorization header.
+ auth := strings.Join(parts, ", ")
+ req.Header.Set("Authorization", auth)
+
+ return &req
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/request-v4.go b/Godeps/_workspace/src/github.com/minio/minio-go/request-v4.go
deleted file mode 100644
index 09ef06a9a..000000000
--- a/Godeps/_workspace/src/github.com/minio/minio-go/request-v4.go
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "encoding/hex"
- "errors"
- "net/http"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-const (
- authHeader = "AWS4-HMAC-SHA256"
- iso8601DateFormat = "20060102T150405Z"
- yyyymmdd = "20060102"
-)
-
-///
-/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258
-///
-/// User-Agent:
-///
-/// This is ignored from signing because signing this causes problems with generating pre-signed URLs
-/// (that are executed by other agents) or when customers pass requests through proxies, which may
-/// modify the user-agent.
-///
-/// Content-Length:
-///
-/// This is ignored from signing because generating a pre-signed URL should not provide a content-length
-/// constraint, specifically when vending a S3 pre-signed PUT URL. The corollary to this is that when
-/// sending regular requests (non-pre-signed), the signature contains a checksum of the body, which
-/// implicitly validates the payload length (since changing the number of bytes would change the checksum)
-/// and therefore this header is not valuable in the signature.
-///
-/// Content-Type:
-///
-/// Signing this header causes quite a number of problems in browser environments, where browsers
-/// like to modify and normalize the content-type header in different ways. There is more information
-/// on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding this field simplifies logic
-/// and reduces the possibility of future bugs
-///
-/// Authorization:
-///
-/// Is skipped for obvious reasons
-///
-var ignoredHeaders = map[string]bool{
- "Authorization": true,
- "Content-Type": true,
- "Content-Length": true,
- "User-Agent": true,
-}
-
-// getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload
-func (r *request) getHashedPayload() string {
- hash := func() string {
- switch {
- case r.expires != 0:
- return "UNSIGNED-PAYLOAD"
- case r.body == nil:
- return hex.EncodeToString(sum256([]byte{}))
- default:
- sum256Bytes, _ := sum256Reader(r.body)
- return hex.EncodeToString(sum256Bytes)
- }
- }
- hashedPayload := hash()
- if hashedPayload != "UNSIGNED-PAYLOAD" {
- r.req.Header.Set("X-Amz-Content-Sha256", hashedPayload)
- }
- return hashedPayload
-}
-
-// getCanonicalHeaders generate a list of request headers with their values
-func (r *request) getCanonicalHeaders() string {
- var headers []string
- vals := make(map[string][]string)
- for k, vv := range r.req.Header {
- if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
- continue // ignored header
- }
- headers = append(headers, strings.ToLower(k))
- vals[strings.ToLower(k)] = vv
- }
- headers = append(headers, "host")
- sort.Strings(headers)
-
- var buf bytes.Buffer
- for _, k := range headers {
- buf.WriteString(k)
- buf.WriteByte(':')
- switch {
- case k == "host":
- buf.WriteString(r.req.URL.Host)
- fallthrough
- default:
- for idx, v := range vals[k] {
- if idx > 0 {
- buf.WriteByte(',')
- }
- buf.WriteString(v)
- }
- buf.WriteByte('\n')
- }
- }
- return buf.String()
-}
-
-// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
-func (r *request) getSignedHeaders() string {
- var headers []string
- for k := range r.req.Header {
- if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
- continue // ignored header
- }
- headers = append(headers, strings.ToLower(k))
- }
- headers = append(headers, "host")
- sort.Strings(headers)
- return strings.Join(headers, ";")
-}
-
-// getCanonicalRequest generate a canonical request of style
-//
-// canonicalRequest =
-// \n
-// \n
-// \n
-// \n
-// \n
-//
-//
-func (r *request) getCanonicalRequest(hashedPayload string) string {
- r.req.URL.RawQuery = strings.Replace(r.req.URL.Query().Encode(), "+", "%20", -1)
- canonicalRequest := strings.Join([]string{
- r.req.Method,
- getURLEncodedPath(r.req.URL.Path),
- r.req.URL.RawQuery,
- r.getCanonicalHeaders(),
- r.getSignedHeaders(),
- hashedPayload,
- }, "\n")
- return canonicalRequest
-}
-
-// getStringToSign a string based on selected query values
-func (r *request) getStringToSignV4(canonicalRequest string, t time.Time) string {
- stringToSign := authHeader + "\n" + t.Format(iso8601DateFormat) + "\n"
- stringToSign = stringToSign + getScope(r.config.Region, t) + "\n"
- stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
- return stringToSign
-}
-
-// Presign the request, in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
-func (r *request) PreSignV4() (string, error) {
- if r.config.AccessKeyID == "" && r.config.SecretAccessKey == "" {
- return "", errors.New("presign requires accesskey and secretkey")
- }
- r.SignV4()
- return r.req.URL.String(), nil
-}
-
-func (r *request) PostPresignSignatureV4(policyBase64 string, t time.Time) string {
- signingkey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t)
- signature := getSignature(signingkey, policyBase64)
- return signature
-}
-
-// SignV4 the request before Do(), in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
-func (r *request) SignV4() {
- query := r.req.URL.Query()
- if r.expires != 0 {
- query.Set("X-Amz-Algorithm", authHeader)
- }
- t := time.Now().UTC()
- // Add date if not present
- if r.expires != 0 {
- query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
- query.Set("X-Amz-Expires", strconv.FormatInt(r.expires, 10))
- } else {
- r.Set("X-Amz-Date", t.Format(iso8601DateFormat))
- }
-
- hashedPayload := r.getHashedPayload()
- signedHeaders := r.getSignedHeaders()
- if r.expires != 0 {
- query.Set("X-Amz-SignedHeaders", signedHeaders)
- }
- credential := getCredential(r.config.AccessKeyID, r.config.Region, t)
- if r.expires != 0 {
- query.Set("X-Amz-Credential", credential)
- r.req.URL.RawQuery = query.Encode()
- }
- canonicalRequest := r.getCanonicalRequest(hashedPayload)
- stringToSign := r.getStringToSignV4(canonicalRequest, t)
- signingKey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t)
- signature := getSignature(signingKey, stringToSign)
-
- if r.expires != 0 {
- r.req.URL.RawQuery += "&X-Amz-Signature=" + signature
- } else {
- // final Authorization header
- parts := []string{
- authHeader + " Credential=" + credential,
- "SignedHeaders=" + signedHeaders,
- "Signature=" + signature,
- }
- auth := strings.Join(parts, ", ")
- r.Set("Authorization", auth)
- }
-}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/signature-type.go b/Godeps/_workspace/src/github.com/minio/minio-go/signature-type.go
new file mode 100644
index 000000000..8eec3f0eb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/signature-type.go
@@ -0,0 +1,21 @@
+package minio
+
+// SignatureType is type of Authorization requested for a given HTTP request.
+type SignatureType int
+
+// Different types of supported signatures - default is Latest i.e SignatureV4.
+const (
+ Latest SignatureType = iota
+ SignatureV4
+ SignatureV2
+)
+
+// isV2 - is signature SignatureV2?
+func (s SignatureType) isV2() bool {
+ return s == SignatureV2
+}
+
+// isV4 - is signature SignatureV4?
+func (s SignatureType) isV4() bool {
+ return s == SignatureV4 || s == Latest
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go b/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go
new file mode 100644
index 000000000..34508569f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/tempfile.go
@@ -0,0 +1,76 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+)
+
+// tempFile - temporary file container.
+type tempFile struct {
+ *os.File
+ mutex *sync.Mutex
+}
+
+// newTempFile returns a new temporary file, once closed it automatically deletes itself.
+func newTempFile(prefix string) (*tempFile, error) {
+ // use platform specific temp directory.
+ file, err := ioutil.TempFile(os.TempDir(), prefix)
+ if err != nil {
+ return nil, err
+ }
+ return &tempFile{
+ File: file,
+ mutex: new(sync.Mutex),
+ }, nil
+}
+
+// cleanupStaleTempFiles - cleanup any stale files present in temp directory at a prefix.
+func cleanupStaleTempfiles(prefix string) error {
+ globPath := filepath.Join(os.TempDir(), prefix) + "*"
+ staleFiles, err := filepath.Glob(globPath)
+ if err != nil {
+ return err
+ }
+ for _, staleFile := range staleFiles {
+ if err := os.Remove(staleFile); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Close - closer wrapper to close and remove temporary file.
+func (t *tempFile) Close() error {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ if t.File != nil {
+ // Close the file.
+ if err := t.File.Close(); err != nil {
+ return err
+ }
+ // Remove file.
+ if err := os.Remove(t.File.Name()); err != nil {
+ return err
+ }
+ t.File = nil
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/minio/minio-go/utils.go b/Godeps/_workspace/src/github.com/minio/minio-go/utils.go
new file mode 100644
index 000000000..2e2532b6c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/minio/minio-go/utils.go
@@ -0,0 +1,319 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// isPartUploaded - true if part is already uploaded.
+func isPartUploaded(objPart objectPart, objectParts map[int]objectPart) (isUploaded bool) {
+ _, isUploaded = objectParts[objPart.PartNumber]
+ if isUploaded {
+ isUploaded = (objPart.ETag == objectParts[objPart.PartNumber].ETag)
+ }
+ return
+}
+
+// getEndpointURL - construct a new endpoint.
+func getEndpointURL(endpoint string, inSecure bool) (*url.URL, error) {
+ if strings.Contains(endpoint, ":") {
+ host, _, err := net.SplitHostPort(endpoint)
+ if err != nil {
+ return nil, err
+ }
+ if !isValidIP(host) && !isValidDomain(host) {
+ msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
+ return nil, ErrInvalidArgument(msg)
+ }
+ } else {
+ if !isValidIP(endpoint) && !isValidDomain(endpoint) {
+ msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
+ return nil, ErrInvalidArgument(msg)
+ }
+ }
+ // if inSecure is true, use 'http' scheme.
+ scheme := "https"
+ if inSecure {
+ scheme = "http"
+ }
+
+ // Construct a secured endpoint URL.
+ endpointURLStr := scheme + "://" + endpoint
+ endpointURL, err := url.Parse(endpointURLStr)
+ if err != nil {
+ return nil, err
+ }
+
+ // Validate incoming endpoint URL.
+ if err := isValidEndpointURL(endpointURL); err != nil {
+ return nil, err
+ }
+ return endpointURL, nil
+}
+
+// isValidDomain validates if input string is a valid domain name.
+func isValidDomain(host string) bool {
+ // See RFC 1035, RFC 3696.
+ host = strings.TrimSpace(host)
+ if len(host) == 0 || len(host) > 255 {
+ return false
+ }
+ // host cannot start or end with "-"
+ if host[len(host)-1:] == "-" || host[:1] == "-" {
+ return false
+ }
+ // host cannot start or end with "_"
+ if host[len(host)-1:] == "_" || host[:1] == "_" {
+ return false
+ }
+ // host cannot start or end with a "."
+ if host[len(host)-1:] == "." || host[:1] == "." {
+ return false
+ }
+ // All non alphanumeric characters are invalid.
+ if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:>/") {
+ return false
+ }
+ // No need to regexp match, since the list is non-exhaustive.
+ // We let it valid and fail later.
+ return true
+}
+
+// isValidIP parses input string for ip address validity.
+func isValidIP(ip string) bool {
+ return net.ParseIP(ip) != nil
+}
+
+// closeResponse close non nil response with any response Body.
+// convenient wrapper to drain any remaining data on response body.
+//
+// Subsequently this allows golang http RoundTripper
+// to re-use the same connection for future requests.
+func closeResponse(resp *http.Response) {
+ // Callers should close resp.Body when done reading from it.
+ // If resp.Body is not closed, the Client's underlying RoundTripper
+ // (typically Transport) may not be able to re-use a persistent TCP
+ // connection to the server for a subsequent "keep-alive" request.
+ if resp != nil && resp.Body != nil {
+ // Drain any remaining Body and then close the connection.
+ // Without this closing connection would disallow re-using
+ // the same connection for future uses.
+ // - http://stackoverflow.com/a/17961593/4465767
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ }
+}
+
+// isVirtualHostSupported - verify if host supports virtual hosted style.
+// Currently only Amazon S3 and Google Cloud Storage would support this.
+func isVirtualHostSupported(endpointURL *url.URL) bool {
+ return isAmazonEndpoint(endpointURL) || isGoogleEndpoint(endpointURL)
+}
+
+// Match if it is exactly Amazon S3 endpoint.
+func isAmazonEndpoint(endpointURL *url.URL) bool {
+ if endpointURL == nil {
+ return false
+ }
+ if endpointURL.Host == "s3.amazonaws.com" {
+ return true
+ }
+ return false
+}
+
+// Match if it is exactly Google cloud storage endpoint.
+func isGoogleEndpoint(endpointURL *url.URL) bool {
+ if endpointURL == nil {
+ return false
+ }
+ if endpointURL.Host == "storage.googleapis.com" {
+ return true
+ }
+ return false
+}
+
+// Verify if input endpoint URL is valid.
+func isValidEndpointURL(endpointURL *url.URL) error {
+ if endpointURL == nil {
+ return ErrInvalidArgument("Endpoint url cannot be empty.")
+ }
+ if endpointURL.Path != "/" && endpointURL.Path != "" {
+ return ErrInvalidArgument("Endpoing url cannot have fully qualified paths.")
+ }
+ if strings.Contains(endpointURL.Host, ".amazonaws.com") {
+ if !isAmazonEndpoint(endpointURL) {
+ return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
+ }
+ }
+ if strings.Contains(endpointURL.Host, ".googleapis.com") {
+ if !isGoogleEndpoint(endpointURL) {
+ return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
+ }
+ }
+ return nil
+}
+
+// Verify if input expires value is valid.
+func isValidExpiry(expires time.Duration) error {
+ expireSeconds := int64(expires / time.Second)
+ if expireSeconds < 1 {
+ return ErrInvalidArgument("Expires cannot be lesser than 1 second.")
+ }
+ if expireSeconds > 604800 {
+ return ErrInvalidArgument("Expires cannot be greater than 7 days.")
+ }
+ return nil
+}
+
+/// Excerpts from - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
+/// When using virtual hosted–style buckets with SSL, the SSL wild card
+/// certificate only matches buckets that do not contain periods.
+/// To work around this, use HTTP or write your own certificate verification logic.
+
+// We decided to not support bucketNames with '.' in them.
+var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\-]{1,61}[a-z0-9]$`)
+
+// isValidBucketName - verify bucket name in accordance with
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
+func isValidBucketName(bucketName string) error {
+ if strings.TrimSpace(bucketName) == "" {
+ return ErrInvalidBucketName("Bucket name cannot be empty.")
+ }
+ if len(bucketName) < 3 {
+ return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.")
+ }
+ if len(bucketName) > 63 {
+ return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.")
+ }
+ if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
+ return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
+ }
+ if !validBucketName.MatchString(bucketName) {
+ return ErrInvalidBucketName("Bucket name contains invalid characters.")
+ }
+ return nil
+}
+
+// isValidObjectName - verify object name in accordance with
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
+func isValidObjectName(objectName string) error {
+ if strings.TrimSpace(objectName) == "" {
+ return ErrInvalidObjectName("Object name cannot be empty.")
+ }
+ if len(objectName) > 1024 {
+ return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.")
+ }
+ if !utf8.ValidString(objectName) {
+ return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.")
+ }
+ return nil
+}
+
+// isValidObjectPrefix - verify if object prefix is valid.
+func isValidObjectPrefix(objectPrefix string) error {
+ if len(objectPrefix) > 1024 {
+ return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.")
+ }
+ if !utf8.ValidString(objectPrefix) {
+ return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.")
+ }
+ return nil
+}
+
+// optimalPartSize - calculate the optimal part size for the given objectSize.
+//
+// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
+// object storage it will have the following parameters as constants.
+//
+// maxParts - 10000
+// minimumPartSize - 5MiB
+// maximumPartSize - 5GiB
+//
+// if the partSize after division with maxParts is greater than minimumPartSize
+// then choose miniumPartSize as the new part size, if not return minimumPartSize.
+//
+// Special cases
+//
+// - if input object size is -1 then return maxPartSize.
+// - if it happens to be that partSize is indeed bigger
+// than the maximum part size just return maxPartSize.
+//
+func optimalPartSize(objectSize int64) int64 {
+ // if object size is -1 choose part size as 5GiB.
+ if objectSize == -1 {
+ return maxPartSize
+ }
+ // make sure last part has enough buffer and handle this poperly.
+ partSize := (objectSize / (maxParts - 1))
+ if partSize > minimumPartSize {
+ if partSize > maxPartSize {
+ return maxPartSize
+ }
+ return partSize
+ }
+ return minimumPartSize
+}
+
+// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func urlEncodePath(pathName string) string {
+ // if object matches reserved string, no need to encode them
+ reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
+ if reservedNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}