2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-28 04:56:04 +00:00

Merge pull request #366 from restic/howeyc-s3-minio

rebase: Switch s3 library to allow for s3 compatible backends
This commit is contained in:
Alexander Neumann 2016-01-17 19:21:13 +01:00
commit 5df9bdec9a
110 changed files with 10986 additions and 4786 deletions

55
Dockerfile Normal file
View File

@ -0,0 +1,55 @@
# This Dockerfiles configures a container that is similar to the Travis CI
# environment and can be used to run tests locally.
#
# build the image:
# docker build -t restic/test .
#
# run tests:
# docker run --rm -v $PWD:/home/travis/gopath/src/github.com/restic/restic restic/test go run run_integration_tests.go
#
# run interactively with:
# docker run --interactive --tty --rm -v $PWD:/home/travis/gopath/src/github.com/restic/restic restic/test /bin/bash
#
# run a tests:
# docker run --rm -v $PWD:/home/travis/gopath/src/github.com/restic/restic restic/test go test -v ./backend
FROM ubuntu:14.04
ARG GOVERSION=1.5.3
ARG GOARCH=amd64
# install dependencies
RUN apt-get update
RUN apt-get install -y --no-install-recommends ca-certificates wget git build-essential openssh-server
# add and configure user
ENV HOME /home/travis
RUN useradd -m -d $HOME -s /bin/bash travis
# run everything below as user travis
USER travis
WORKDIR $HOME
# download and install Go
RUN wget -q -O /tmp/go.tar.gz https://storage.googleapis.com/golang/go${GOVERSION}.linux-${GOARCH}.tar.gz
RUN tar xf /tmp/go.tar.gz && rm -f /tmp/go.tar.gz
ENV GOROOT $HOME/go
ENV PATH $PATH:$GOROOT/bin
ENV GOPATH $HOME/gopath
ENV PATH $PATH:$GOPATH/bin
RUN mkdir -p $GOPATH/src/github.com/restic/restic
# install tools
RUN go get golang.org/x/tools/cmd/cover
RUN go get github.com/mattn/goveralls
RUN go get github.com/mitchellh/gox
RUN go get github.com/pierrre/gotestcover
RUN GO15VENDOREXPERIMENT=1 go get github.com/minio/minio
# set TRAVIS_BUILD_DIR for integration script
ENV TRAVIS_BUILD_DIR $GOPATH/src/github.com/restic/restic
ENV GOPATH $GOPATH:${TRAVIS_BUILD_DIR}/Godeps/_workspace
WORKDIR $TRAVIS_BUILD_DIR

13
Godeps/Godeps.json generated
View File

@ -22,6 +22,11 @@
"ImportPath": "github.com/kr/fs",
"Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
},
{
"ImportPath": "github.com/minio/minio-go",
"Comment": "v0.2.5-209-g77f35ea",
"Rev": "77f35ea56099f50b0425d0e2f3949773dae723c0"
},
{
"ImportPath": "github.com/pkg/sftp",
"Rev": "518aed2757a65cfa64d4b1b2baf08410f8b7a6bc"
@ -49,14 +54,6 @@
{
"ImportPath": "golang.org/x/net/context",
"Rev": "7654728e381988afd88e58cabfd6363a5ea91810"
},
{
"ImportPath": "gopkg.in/amz.v3/aws",
"Rev": "bff3a097c4108da57bb8cbe3aad2990d74d23676"
},
{
"ImportPath": "gopkg.in/amz.v3/s3",
"Rev": "bff3a097c4108da57bb8cbe3aad2990d74d23676"
}
]
}

View File

@ -0,0 +1,2 @@
*~
*.test

View File

@ -0,0 +1,22 @@
sudo: false
language: go
os:
- linux
- osx
env:
- ARCH=x86_64
- ARCH=i686
go:
- 1.5.1
- 1.5.2
script:
- go vet ./...
- go test -short -race -v ./...
notifications:
slack:
secure: HrOX2k6F/sEl6Rr4m5vHOdJCIwV42be0kz1Jy/WSMvrl/fQ8YkldKviLeWh4aWt1kclsYhNQ4FqGML+RIZYsdOqej4fAw9Vi5pZkI1MzPJq0UjrtMqkqzvD90eDGQYCKwaXjEIN8cohwJeb6X0B0HKAd9sqJW5GH5SwnhH5WWP8=

View File

@ -0,0 +1,21 @@
### Developer Guidelines
``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following:
* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes.
- Fork it
- Create your feature branch (git checkout -b my-new-feature)
- Commit your changes (git commit -am 'Add some feature')
- Push to the branch (git push origin my-new-feature)
- Create new Pull Request
* When you're ready to create a pull request, be sure to:
- Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
- Run `go fmt`
- Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
- Make sure `go test -race ./...` and `go build` completes.
* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
- `minio-go` project is strictly conformant with Golang style
- if you happen to observe offending code, please feel free to send a pull request

View File

@ -0,0 +1,83 @@
## Ubuntu (Kylin) 14.04
### Build Dependencies
This installation guide is based on Ubuntu 14.04+ on x86-64 platform.
##### Install Git, GCC
```sh
$ sudo apt-get install git build-essential
```
##### Install Go 1.5+
Download Go 1.5+ from [https://golang.org/dl/](https://golang.org/dl/).
```sh
$ wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz
$ mkdir -p ${HOME}/bin/
$ mkdir -p ${HOME}/go/
$ tar -C ${HOME}/bin/ -xzf go1.5.1.linux-amd64.tar.gz
```
##### Setup GOROOT and GOPATH
Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries
and GOPATH specifies the location of your project workspace.
```sh
export GOROOT=${HOME}/bin/go
export GOPATH=${HOME}/go
export PATH=$PATH:${HOME}/bin/go/bin:${GOPATH}/bin
```
```sh
$ source ~/.bashrc
```
##### Testing it all
```sh
$ go env
```
## OS X (Yosemite) 10.10
### Build Dependencies
This installation document assumes OS X Yosemite 10.10+ on x86-64 platform.
##### Install brew
```sh
$ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
```
##### Install Git, Python
```sh
$ brew install git python
```
##### Install Go 1.5+
Install golang binaries using `brew`
```sh
$ brew install go
$ mkdir -p $HOME/go
```
##### Setup GOROOT and GOPATH
Add the following exports to your ``~/.bash_profile``. Environment variable GOROOT specifies the location of your golang binaries
and GOPATH specifies the location of your project workspace.
```sh
export GOPATH=${HOME}/go
export GOVERSION=$(brew list go | head -n 1 | cut -d '/' -f 6)
export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec
export PATH=$PATH:${GOPATH}/bin
```
##### Source the new enviornment
```sh
$ source ~/.bash_profile
```
##### Testing it all
```sh
$ go env
```

202
Godeps/_workspace/src/github.com/minio/minio-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,19 @@
# For maintainers only
## Responsibilities
Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
### Making new releases
Edit `libraryVersion` constant in `api.go`.
```
$ grep libraryVersion api.go
libraryVersion = "0.3.0"
```
```
$ git tag 0.3.0
$ git push --tags
```

View File

@ -0,0 +1,98 @@
# Minio Go Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
## Description
Minio Go library is a simple client library for S3 compatible cloud storage servers. Supports AWS Signature Version 4 and 2. AWS Signature Version 4 is chosen as default.
List of supported cloud storage providers.
- AWS Signature Version 4
- Amazon S3
- Minio
- AWS Signature Version 2
- Google Cloud Storage (Compatibility Mode)
- Openstack Swift + Swift3 middleware
- Ceph Object Gateway
- Riak CS
## Install
If you do not have a working Golang environment, please follow [Install Golang](./INSTALLGO.md).
```sh
$ go get github.com/minio/minio-go
```
## Example
### ListBuckets()
This example shows how to List your buckets.
```go
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false)
if err != nil {
log.Fatalln(err)
}
buckets, err := s3Client.ListBuckets()
if err != nil {
log.Fatalln(err)
}
for _, bucket := range buckets {
log.Println(bucket)
}
}
```
## Documentation
### Bucket Operations.
* [MakeBucket(bucketName, BucketACL, location) error](examples/s3/makebucket.go)
* [BucketExists(bucketName) error](examples/s3/bucketexists.go)
* [RemoveBucket(bucketName) error](examples/s3/removebucket.go)
* [GetBucketACL(bucketName) (BucketACL, error)](examples/s3/getbucketacl.go)
* [SetBucketACL(bucketName, BucketACL) error)](examples/s3/setbucketacl.go)
* [ListBuckets() []BucketInfo](examples/s3/listbuckets.go)
* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectInfo](examples/s3/listobjects.go)
* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartInfo](examples/s3/listincompleteuploads.go)
### Object Operations.
* [PutObject(bucketName, objectName, io.Reader, size, contentType) error](examples/s3/putobject.go)
* [GetObject(bucketName, objectName) (io.ReadCloser, ObjectInfo, error)](examples/s3/getobject.go)
* [StatObject(bucketName, objectName) (ObjectInfo, error)](examples/s3/statobject.go)
* [RemoveObject(bucketName, objectName) error](examples/s3/removeobject.go)
* [RemoveIncompleteUpload(bucketName, objectName) <-chan error](examples/s3/removeincompleteupload.go)
### File Object Operations.
* [FPutObject(bucketName, objectName, filePath, contentType) (size, error)](examples/s3/fputobject.go)
* [FGetObject(bucketName, objectName, filePath) error](examples/s3/fgetobject.go)
### Presigned Operations.
* [PresignedGetObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedgetobject.go)
* [PresignedPutObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedputobject.go)
* [PresignedPostPolicy(NewPostPolicy()) (map[string]string, error)](examples/s3/presignedpostpolicy.go)
### API Reference
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/minio/minio-go)
## Contribute
[Contributors Guide](./CONTRIBUTING.md)
[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) [![Build status](https://ci.appveyor.com/api/projects/status/1ep7n2resn6fk1w6?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)

View File

@ -0,0 +1,76 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import "time"
// BucketInfo container for bucket metadata.
type BucketInfo struct {
// The name of the bucket.
Name string
// Date the bucket was created.
CreationDate time.Time
}
// ObjectInfo container for object metadata.
type ObjectInfo struct {
// An ETag is optionally set to md5sum of an object. In case of multipart objects,
// ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of
// each parts concatenated into one string.
ETag string
Key string // Name of the object
LastModified time.Time // Date and time the object was last modified.
Size int64 // Size in bytes of the object.
ContentType string // A standard MIME type describing the format of the object data.
// Owner name.
Owner struct {
DisplayName string
ID string
}
// The class of storage used to store the object.
StorageClass string
// Error
Err error
}
// ObjectMultipartInfo container for multipart object metadata.
type ObjectMultipartInfo struct {
// Date and time at which the multipart upload was initiated.
Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
Initiator initiator
Owner owner
// The type of storage to use for the object. Defaults to 'STANDARD'.
StorageClass string
// Key of the object for which the multipart upload was initiated.
Key string
// Size in bytes of the object.
Size int64
// Upload ID that identifies the multipart upload.
UploadID string `xml:"UploadId"`
// Error
Err error
}

View File

@ -0,0 +1,249 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/xml"
"fmt"
"net/http"
"strconv"
)
/* **** SAMPLE ERROR RESPONSE ****
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>AccessDenied</Code>
<Message>Access Denied</Message>
<BucketName>bucketName</BucketName>
<Key>objectName</Key>
<RequestId>F19772218238A85A</RequestId>
<HostId>GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD</HostId>
</Error>
*/
// ErrorResponse - Is the typed error returned by all API operations.
type ErrorResponse struct {
XMLName xml.Name `xml:"Error" json:"-"`
Code string
Message string
BucketName string
Key string
RequestID string `xml:"RequestId"`
HostID string `xml:"HostId"`
// Region where the bucket is located. This header is returned
// only in HEAD bucket and ListObjects response.
AmzBucketRegion string
}
// ToErrorResponse - Returns parsed ErrorResponse struct from body and
// http headers.
//
// For example:
//
// import s3 "github.com/minio/minio-go"
// ...
// ...
// reader, stat, err := s3.GetObject(...)
// if err != nil {
// resp := s3.ToErrorResponse(err)
// }
// ...
func ToErrorResponse(err error) ErrorResponse {
switch err := err.(type) {
case ErrorResponse:
return err
default:
return ErrorResponse{}
}
}
// Error - Returns HTTP error string
func (e ErrorResponse) Error() string {
return e.Message
}
// Common string for errors to report issue location in unexpected
// cases.
const (
reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
)
// HTTPRespToErrorResponse returns a new encoded ErrorResponse
// structure as error.
func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
if resp == nil {
msg := "Response is empty. " + reportIssue
return ErrInvalidArgument(msg)
}
var errResp ErrorResponse
err := xmlDecoder(resp.Body, &errResp)
// Xml decoding failed with no body, fall back to HTTP headers.
if err != nil {
switch resp.StatusCode {
case http.StatusNotFound:
if objectName == "" {
errResp = ErrorResponse{
Code: "NoSuchBucket",
Message: "The specified bucket does not exist.",
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
} else {
errResp = ErrorResponse{
Code: "NoSuchKey",
Message: "The specified key does not exist.",
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
}
case http.StatusForbidden:
errResp = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied.",
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
case http.StatusConflict:
errResp = ErrorResponse{
Code: "Conflict",
Message: "Bucket not empty.",
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
default:
errResp = ErrorResponse{
Code: resp.Status,
Message: resp.Status,
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
}
}
// AccessDenied without a signature mismatch code, usually means
// that the bucket policy has certain restrictions where some API
// operations are not allowed. Handle this case so that top level
// callers can interpret this easily and fall back if needed to a
// lower functionality call. Read each individual API specific
// code for such fallbacks.
if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied" {
errResp.Code = "NotImplemented"
errResp.Message = "Operation is not allowed according to your bucket policy."
}
return errResp
}
// ErrEntityTooLarge - Input size is larger than supported maximum.
func ErrEntityTooLarge(totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size %d exceeds the maximum allowed object size '5GiB' for single PUT operation.", totalSize)
return ErrorResponse{
Code: "EntityTooLarge",
Message: msg,
BucketName: bucketName,
Key: objectName,
}
}
// ErrEntityTooSmall - Input size is smaller than supported minimum.
func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size %d is below the minimum allowed object size '0B' for single PUT operation.", totalSize)
return ErrorResponse{
Code: "EntityTooLarge",
Message: msg,
BucketName: bucketName,
Key: objectName,
}
}
// ErrUnexpectedShortRead - Unexpected shorter read of input buffer from
// target.
func ErrUnexpectedShortRead(totalRead, totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Data read %s is shorter than the size %s of input buffer.",
strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
return ErrorResponse{
Code: "UnexpectedShortRead",
Message: msg,
BucketName: bucketName,
Key: objectName,
}
}
// ErrUnexpectedEOF - Unexpected end of file reached.
func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Data read %s is not equal to the size %s of the input Reader.",
strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
return ErrorResponse{
Code: "UnexpectedEOF",
Message: msg,
BucketName: bucketName,
Key: objectName,
}
}
// ErrInvalidBucketName - Invalid bucket name response.
func ErrInvalidBucketName(message string) error {
return ErrorResponse{
Code: "InvalidBucketName",
Message: message,
RequestID: "minio",
}
}
// ErrInvalidObjectName - Invalid object name response.
func ErrInvalidObjectName(message string) error {
return ErrorResponse{
Code: "NoSuchKey",
Message: message,
RequestID: "minio",
}
}
// ErrInvalidParts - Invalid number of parts.
func ErrInvalidParts(expectedParts, uploadedParts int) error {
msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", expectedParts, uploadedParts)
return ErrorResponse{
Code: "InvalidParts",
Message: msg,
RequestID: "minio",
}
}
// ErrInvalidObjectPrefix - Invalid object prefix response is
// similar to object name response.
var ErrInvalidObjectPrefix = ErrInvalidObjectName
// ErrInvalidArgument - Invalid argument response.
func ErrInvalidArgument(message string) error {
return ErrorResponse{
Code: "InvalidArgument",
Message: message,
RequestID: "minio",
}
}

View File

@ -0,0 +1,102 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"io"
"os"
"path/filepath"
)
// FGetObject - download contents of an object to a local file.
func (c Client) FGetObject(bucketName, objectName, filePath string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
return err
}
// Verify if destination already exists.
st, err := os.Stat(filePath)
if err == nil {
// If the destination exists and is a directory.
if st.IsDir() {
return ErrInvalidArgument("fileName is a directory.")
}
}
// Proceed if file does not exist. return for all other errors.
if err != nil {
if !os.IsNotExist(err) {
return err
}
}
// Extract top level direcotry.
objectDir, _ := filepath.Split(filePath)
if objectDir != "" {
// Create any missing top level directories.
if err := os.MkdirAll(objectDir, 0700); err != nil {
return err
}
}
// Gather md5sum.
objectStat, err := c.StatObject(bucketName, objectName)
if err != nil {
return err
}
// Write to a temporary file "fileName.part.minio" before saving.
filePartPath := filePath + objectStat.ETag + ".part.minio"
// If exists, open in append mode. If not create it as a part file.
filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
return err
}
// Issue Stat to get the current offset.
st, err = filePart.Stat()
if err != nil {
return err
}
// Seek to current position for incoming reader.
objectReader, objectStat, err := c.getObject(bucketName, objectName, st.Size(), 0)
if err != nil {
return err
}
// Write to the part file.
if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil {
return err
}
// Close the file before rename, this is specifically needed for Windows users.
filePart.Close()
// Safely completed. Now commit by renaming to actual filename.
if err = os.Rename(filePartPath, filePath); err != nil {
return err
}
// Return.
return nil
}

View File

@ -0,0 +1,537 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"errors"
"fmt"
"io"
"math"
"net/http"
"net/url"
"strings"
"sync"
"time"
)
// GetBucketACL - Get the permissions on an existing bucket.
//
// Returned values are:
//
// private - Owner gets full access.
// public-read - Owner gets full access, others get read access.
// public-read-write - Owner gets full access, others get full access
// too.
// authenticated-read - Owner gets full access, authenticated users
// get read access.
func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return "", err
}
// Set acl query.
urlValues := make(url.Values)
urlValues.Set("acl", "")
// Instantiate a new request.
req, err := c.newRequest("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
if err != nil {
return "", err
}
// Initiate the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return "", err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return "", HTTPRespToErrorResponse(resp, bucketName, "")
}
}
// Decode access control policy.
policy := accessControlPolicy{}
err = xmlDecoder(resp.Body, &policy)
if err != nil {
return "", err
}
// We need to avoid following de-serialization check for Google
// Cloud Storage. On Google Cloud Storage "private" canned ACL's
// policy do not have grant list. Treat it as a valid case, check
// for all other vendors.
if !isGoogleEndpoint(c.endpointURL) {
if policy.AccessControlList.Grant == nil {
errorResponse := ErrorResponse{
Code: "InternalError",
Message: "Access control Grant list is empty. " + reportIssue,
BucketName: bucketName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
return "", errorResponse
}
}
// Boolean cues to indentify right canned acls.
var publicRead, publicWrite, authenticatedRead bool
// Handle grants.
grants := policy.AccessControlList.Grant
for _, g := range grants {
if g.Grantee.URI == "" && g.Permission == "FULL_CONTROL" {
continue
}
if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
authenticatedRead = true
break
} else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
publicWrite = true
} else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
publicRead = true
}
}
// Verify if acl is authenticated read.
if authenticatedRead {
return BucketACL("authenticated-read"), nil
}
// Verify if acl is private.
if !publicWrite && !publicRead {
return BucketACL("private"), nil
}
// Verify if acl is public-read.
if !publicWrite && publicRead {
return BucketACL("public-read"), nil
}
// Verify if acl is public-read-write.
if publicRead && publicWrite {
return BucketACL("public-read-write"), nil
}
return "", ErrorResponse{
Code: "NoSuchBucketPolicy",
Message: "The specified bucket does not have a bucket policy.",
BucketName: bucketName,
RequestID: "minio",
}
}
// GetObject - returns an seekable, readable object.
func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return nil, err
}
if err := isValidObjectName(objectName); err != nil {
return nil, err
}
// Send an explicit info to get the actual object size.
objectInfo, err := c.StatObject(bucketName, objectName)
if err != nil {
return nil, err
}
// Create request channel.
reqCh := make(chan readRequest)
// Create response channel.
resCh := make(chan readResponse)
// Create done channel.
doneCh := make(chan struct{})
// This routine feeds partial object data as and when the caller
// reads.
go func() {
defer close(reqCh)
defer close(resCh)
// Loop through the incoming control messages and read data.
for {
select {
// When the done channel is closed exit our routine.
case <-doneCh:
return
// Request message.
case req := <-reqCh:
// Get shortest length.
// NOTE: Last remaining bytes are usually smaller than
// req.Buffer size. Use that as the final length.
length := math.Min(float64(len(req.Buffer)), float64(objectInfo.Size-req.Offset))
httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length))
if err != nil {
resCh <- readResponse{
Error: err,
}
return
}
size, err := io.ReadFull(httpReader, req.Buffer)
if err == io.ErrUnexpectedEOF {
// If an EOF happens after reading some but not
// all the bytes ReadFull returns ErrUnexpectedEOF
err = io.EOF
}
resCh <- readResponse{
Size: int(size),
Error: err,
}
}
}
}()
// Return the readerAt backed by routine.
return newObject(reqCh, resCh, doneCh, objectInfo), nil
}
// Read response message container to reply back for the request.
type readResponse struct {
Size int
Error error
}
// Read request message container to communicate with internal
// go-routine.
type readRequest struct {
Buffer []byte
Offset int64 // readAt offset.
}
// Object represents an open object. It implements Read, ReadAt,
// Seeker, Close for a HTTP stream.
type Object struct {
// Mutex.
mutex *sync.Mutex
// User allocated and defined.
reqCh chan<- readRequest
resCh <-chan readResponse
doneCh chan<- struct{}
currOffset int64
objectInfo ObjectInfo
// Keeps track of closed call.
isClosed bool
// Previous error saved for future calls.
prevErr error
}
// Read reads up to len(p) bytes into p. It returns the number of
// bytes read (0 <= n <= len(p)) and any error encountered. Returns
// io.EOF upon end of file.
func (o *Object) Read(b []byte) (n int, err error) {
if o == nil {
return 0, ErrInvalidArgument("Object is nil")
}
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
// Previous prevErr is which was saved in previous operation.
if o.prevErr != nil || o.isClosed {
return 0, o.prevErr
}
// If current offset has reached Size limit, return EOF.
if o.currOffset >= o.objectInfo.Size {
return 0, io.EOF
}
// Send current information over control channel to indicate we
// are ready.
reqMsg := readRequest{}
// Send the offset and pointer to the buffer over the channel.
reqMsg.Buffer = b
reqMsg.Offset = o.currOffset
// Send read request over the control channel.
o.reqCh <- reqMsg
// Get data over the response channel.
dataMsg := <-o.resCh
// Bytes read.
bytesRead := int64(dataMsg.Size)
// Update current offset.
o.currOffset += bytesRead
if dataMsg.Error == nil {
// If currOffset read is equal to objectSize
// We have reached end of file, we return io.EOF.
if o.currOffset >= o.objectInfo.Size {
return dataMsg.Size, io.EOF
}
return dataMsg.Size, nil
}
// Save any error.
o.prevErr = dataMsg.Error
return dataMsg.Size, dataMsg.Error
}
// Stat returns the ObjectInfo structure describing object.
func (o *Object) Stat() (ObjectInfo, error) {
if o == nil {
return ObjectInfo{}, ErrInvalidArgument("Object is nil")
}
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
if o.prevErr != nil || o.isClosed {
return ObjectInfo{}, o.prevErr
}
return o.objectInfo, nil
}
// ReadAt reads len(b) bytes from the File starting at byte offset
// off. It returns the number of bytes read and the error, if any.
// ReadAt always returns a non-nil error when n < len(b). At end of
// file, that error is io.EOF.
func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
if o == nil {
return 0, ErrInvalidArgument("Object is nil")
}
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
// prevErr is which was saved in previous operation.
if o.prevErr != nil || o.isClosed {
return 0, o.prevErr
}
// If offset is negative and offset is greater than or equal to
// object size we return EOF.
if offset < 0 || offset >= o.objectInfo.Size {
return 0, io.EOF
}
// Send current information over control channel to indicate we
// are ready.
reqMsg := readRequest{}
// Send the offset and pointer to the buffer over the channel.
reqMsg.Buffer = b
reqMsg.Offset = offset
// Send read request over the control channel.
o.reqCh <- reqMsg
// Get data over the response channel.
dataMsg := <-o.resCh
// Bytes read.
bytesRead := int64(dataMsg.Size)
if dataMsg.Error == nil {
// If offset+bytes read is equal to objectSize
// we have reached end of file, we return io.EOF.
if offset+bytesRead == o.objectInfo.Size {
return dataMsg.Size, io.EOF
}
return dataMsg.Size, nil
}
// Save any error.
o.prevErr = dataMsg.Error
return dataMsg.Size, dataMsg.Error
}
// Seek sets the offset for the next Read or Write to offset,
// interpreted according to whence: 0 means relative to the
// origin of the file, 1 means relative to the current offset,
// and 2 means relative to the end.
// Seek returns the new offset and an error, if any.
//
// Seeking to a negative offset is an error. Seeking to any positive
// offset is legal, subsequent io operations succeed until the
// underlying object is not closed.
func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
if o == nil {
return 0, ErrInvalidArgument("Object is nil")
}
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
if o.prevErr != nil {
// At EOF seeking is legal, for any other errors we return.
if o.prevErr != io.EOF {
return 0, o.prevErr
}
}
// Negative offset is valid for whence of '2'.
if offset < 0 && whence != 2 {
return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence))
}
switch whence {
default:
return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
case 0:
if offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset = offset
case 1:
if o.currOffset+offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset += offset
case 2:
// Seeking to positive offset is valid for whence '2', but
// since we are backing a Reader we have reached 'EOF' if
// offset is positive.
if offset > 0 {
return 0, io.EOF
}
// Seeking to negative position not allowed for whence.
if o.objectInfo.Size+offset < 0 {
return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
}
o.currOffset += offset
}
// Return the effective offset.
return o.currOffset, nil
}
// Close - The behavior of Close after the first call returns error
// for subsequent Close() calls.
func (o *Object) Close() (err error) {
if o == nil {
return ErrInvalidArgument("Object is nil")
}
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
// if already closed return an error.
if o.isClosed {
return o.prevErr
}
// Close successfully.
close(o.doneCh)
// Save for future operations.
errMsg := "Object is already closed. Bad file descriptor."
o.prevErr = errors.New(errMsg)
// Save here that we closed done channel successfully.
o.isClosed = true
return nil
}
// newObject instantiates a new *minio.Object*
func newObject(reqCh chan<- readRequest, resCh <-chan readResponse, doneCh chan<- struct{}, objectInfo ObjectInfo) *Object {
return &Object{
mutex: &sync.Mutex{},
reqCh: reqCh,
resCh: resCh,
doneCh: doneCh,
objectInfo: objectInfo,
}
}
// getObject - retrieve object from Object Storage.
//
// Additionally this function also takes range arguments to download the specified
// range bytes of an object. Setting offset and length = 0 will download the full object.
//
// For more information about the HTTP Range header.
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectInfo, error) {
// Validate input arguments.
if err := isValidBucketName(bucketName); err != nil {
return nil, ObjectInfo{}, err
}
if err := isValidObjectName(objectName); err != nil {
return nil, ObjectInfo{}, err
}
customHeader := make(http.Header)
// Set ranges if length and offset are valid.
if length > 0 && offset >= 0 {
customHeader.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
} else if offset > 0 && length == 0 {
customHeader.Set("Range", fmt.Sprintf("bytes=%d-", offset))
} else if length < 0 && offset == 0 {
customHeader.Set("Range", fmt.Sprintf("bytes=%d", length))
}
// Instantiate a new request.
req, err := c.newRequest("GET", requestMetadata{
bucketName: bucketName,
objectName: objectName,
customHeader: customHeader,
})
if err != nil {
return nil, ObjectInfo{}, err
}
// Execute the request.
resp, err := c.do(req)
if err != nil {
return nil, ObjectInfo{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
return nil, ObjectInfo{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
}
}
// Trim off the odd double quotes from ETag in the beginning and end.
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
md5sum = strings.TrimSuffix(md5sum, "\"")
// Parse the date.
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
if err != nil {
msg := "Last-Modified time format not recognized. " + reportIssue
return nil, ObjectInfo{}, ErrorResponse{
Code: "InternalError",
Message: msg,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
}
// Get content-type.
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
if contentType == "" {
contentType = "application/octet-stream"
}
var objectStat ObjectInfo
objectStat.ETag = md5sum
objectStat.Key = objectName
objectStat.Size = resp.ContentLength
objectStat.LastModified = date
objectStat.ContentType = contentType
// do not close body here, caller will close
return resp.Body, objectStat, nil
}

View File

@ -0,0 +1,533 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"fmt"
"net/http"
"net/url"
"strings"
)
// ListBuckets list all buckets owned by this authenticated user.
//
// This call requires explicit authentication, no anonymous requests are
// allowed for listing buckets.
//
// api := client.New(....)
// for message := range api.ListBuckets() {
// fmt.Println(message)
// }
//
func (c Client) ListBuckets() ([]BucketInfo, error) {
// Instantiate a new request.
req, err := c.newRequest("GET", requestMetadata{})
if err != nil {
return nil, err
}
// Initiate the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return nil, HTTPRespToErrorResponse(resp, "", "")
}
}
listAllMyBucketsResult := listAllMyBucketsResult{}
err = xmlDecoder(resp.Body, &listAllMyBucketsResult)
if err != nil {
return nil, err
}
return listAllMyBucketsResult.Buckets.Bucket, nil
}
// ListObjects - (List Objects) - List some objects or all recursively.
//
// ListObjects lists all objects matching the objectPrefix from
// the specified bucket. If recursion is enabled it would list
// all subdirectories and all its contents.
//
// Your input paramters are just bucketName, objectPrefix, recursive
// and a done channel for pro-actively closing the internal go
// routine. If you enable recursive as 'true' this function will
// return back all the objects in a given bucket name and object
// prefix.
//
// api := client.New(....)
// // Create a done channel.
// doneCh := make(chan struct{})
// defer close(doneCh)
// // Recurively list all objects in 'mytestbucket'
// recursive := true
// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) {
// fmt.Println(message)
// }
//
func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
// Allocate new list objects channel.
objectStatCh := make(chan ObjectInfo, 1000)
// Default listing is delimited at "/"
delimiter := "/"
if recursive {
// If recursive we do not delimit.
delimiter = ""
}
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
}
return objectStatCh
}
// Validate incoming object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
}
return objectStatCh
}
// Initiate list objects goroutine here.
go func(objectStatCh chan<- ObjectInfo) {
defer close(objectStatCh)
// Save marker for next request.
var marker string
for {
// Get list of objects a maximum of 1000 per request.
result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000)
if err != nil {
objectStatCh <- ObjectInfo{
Err: err,
}
return
}
// If contents are available loop through and send over channel.
for _, object := range result.Contents {
// Save the marker.
marker = object.Key
select {
// Send object content.
case objectStatCh <- object:
// If receives done from the caller, return here.
case <-doneCh:
return
}
}
// Send all common prefixes if any.
// NOTE: prefixes are only present if the request is delimited.
for _, obj := range result.CommonPrefixes {
object := ObjectInfo{}
object.Key = obj.Prefix
object.Size = 0
select {
// Send object prefixes.
case objectStatCh <- object:
// If receives done from the caller, return here.
case <-doneCh:
return
}
}
// If next marker present, save it for next request.
if result.NextMarker != "" {
marker = result.NextMarker
}
// Listing ends result is not truncated, return right here.
if !result.IsTruncated {
return
}
}
}(objectStatCh)
return objectStatCh
}
/// Bucket Read Operations.
// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
//
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
// request paramters :-
// ---------
// ?marker - Specifies the key to start with when listing objects in a bucket.
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (listBucketResult, error) {
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
return listBucketResult{}, err
}
// Validate object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
return listBucketResult{}, err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
// Set object prefix.
if objectPrefix != "" {
urlValues.Set("prefix", urlEncodePath(objectPrefix))
}
// Set object marker.
if objectMarker != "" {
urlValues.Set("marker", urlEncodePath(objectMarker))
}
// Set delimiter.
if delimiter != "" {
urlValues.Set("delimiter", delimiter)
}
// maxkeys should default to 1000 or less.
if maxkeys == 0 || maxkeys > 1000 {
maxkeys = 1000
}
// Set max keys.
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
// Initialize a new request.
req, err := c.newRequest("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
if err != nil {
return listBucketResult{}, err
}
// Execute list buckets.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return listBucketResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return listBucketResult{}, HTTPRespToErrorResponse(resp, bucketName, "")
}
}
// Decode listBuckets XML.
listBucketResult := listBucketResult{}
err = xmlDecoder(resp.Body, &listBucketResult)
if err != nil {
return listBucketResult, err
}
return listBucketResult, nil
}
// ListIncompleteUploads - List incompletely uploaded multipart objects.
//
// ListIncompleteUploads lists all incompleted objects matching the
// objectPrefix from the specified bucket. If recursion is enabled
// it would list all subdirectories and all its contents.
//
// Your input paramters are just bucketName, objectPrefix, recursive
// and a done channel to proactively close the internal go routine.
// If you enable recursive as 'true' this function will return back all
// the multipart objects in a given bucket name.
//
// api := client.New(....)
// // Create a done channel.
// doneCh := make(chan struct{})
// defer close(doneCh)
// // Recurively list all objects in 'mytestbucket'
// recursive := true
// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) {
// fmt.Println(message)
// }
//
func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
// Turn on size aggregation of individual parts.
isAggregateSize := true
return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh)
}
// listIncompleteUploads lists all incomplete uploads.
func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
// Allocate channel for multipart uploads.
objectMultipartStatCh := make(chan ObjectMultipartInfo, 1000)
// Delimiter is set to "/" by default.
delimiter := "/"
if recursive {
// If recursive do not delimit.
delimiter = ""
}
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
}
return objectMultipartStatCh
}
// Validate incoming object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
}
return objectMultipartStatCh
}
go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
defer close(objectMultipartStatCh)
// object and upload ID marker for future requests.
var objectMarker string
var uploadIDMarker string
for {
// list all multipart uploads.
result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000)
if err != nil {
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
}
return
}
// Save objectMarker and uploadIDMarker for next request.
objectMarker = result.NextKeyMarker
uploadIDMarker = result.NextUploadIDMarker
// Send all multipart uploads.
for _, obj := range result.Uploads {
// Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
if aggregateSize {
// Get total multipart size.
obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID)
if err != nil {
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
}
}
}
select {
// Send individual uploads here.
case objectMultipartStatCh <- obj:
// If done channel return here.
case <-doneCh:
return
}
}
// Send all common prefixes if any.
// NOTE: prefixes are only present if the request is delimited.
for _, obj := range result.CommonPrefixes {
object := ObjectMultipartInfo{}
object.Key = obj.Prefix
object.Size = 0
select {
// Send delimited prefixes here.
case objectMultipartStatCh <- object:
// If done channel return here.
case <-doneCh:
return
}
}
// Listing ends if result not truncated, return right here.
if !result.IsTruncated {
return
}
}
}(objectMultipartStatCh)
// return.
return objectMultipartStatCh
}
// listMultipartUploads - (List Multipart Uploads).
// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
//
// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
// request paramters. :-
// ---------
// ?key-marker - Specifies the multipart upload after which listing should begin.
// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (listMultipartUploadsResult, error) {
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set uploads.
urlValues.Set("uploads", "")
// Set object key marker.
if keyMarker != "" {
urlValues.Set("key-marker", urlEncodePath(keyMarker))
}
// Set upload id marker.
if uploadIDMarker != "" {
urlValues.Set("upload-id-marker", uploadIDMarker)
}
// Set prefix marker.
if prefix != "" {
urlValues.Set("prefix", urlEncodePath(prefix))
}
// Set delimiter.
if delimiter != "" {
urlValues.Set("delimiter", delimiter)
}
// maxUploads should be 1000 or less.
if maxUploads == 0 || maxUploads > 1000 {
maxUploads = 1000
}
// Set max-uploads.
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
// Instantiate a new request.
req, err := c.newRequest("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
if err != nil {
return listMultipartUploadsResult{}, err
}
// Execute list multipart uploads request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return listMultipartUploadsResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return listMultipartUploadsResult{}, HTTPRespToErrorResponse(resp, bucketName, "")
}
}
// Decode response body.
listMultipartUploadsResult := listMultipartUploadsResult{}
err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
if err != nil {
return listMultipartUploadsResult, err
}
return listMultipartUploadsResult, nil
}
// listObjectParts list all object parts recursively.
func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]objectPart, err error) {
// Part number marker for the next batch of request.
var nextPartNumberMarker int
partsInfo = make(map[int]objectPart)
for {
// Get list of uploaded parts a maximum of 1000 per request.
listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
if err != nil {
return nil, err
}
// Append to parts info.
for _, part := range listObjPartsResult.ObjectParts {
// Trim off the odd double quotes from ETag in the beginning and end.
part.ETag = strings.TrimPrefix(part.ETag, "\"")
part.ETag = strings.TrimSuffix(part.ETag, "\"")
partsInfo[part.PartNumber] = part
}
// Keep part number marker, for the next iteration.
nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker
// Listing ends result is not truncated, return right here.
if !listObjPartsResult.IsTruncated {
break
}
}
// Return all the parts.
return partsInfo, nil
}
// findUploadID lists all incomplete uploads and finds the uploadID of the matching object name.
func (c Client) findUploadID(bucketName, objectName string) (string, error) {
// Make list incomplete uploads recursive.
isRecursive := true
// Turn off size aggregation of individual parts, in this request.
isAggregateSize := false
// NOTE: done Channel is set to 'nil, this will drain go routine until exhaustion.
for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, nil) {
if mpUpload.Err != nil {
return "", mpUpload.Err
}
// if object name found, return the upload id.
if objectName == mpUpload.Key {
return mpUpload.UploadID, nil
}
}
// No upload id was found, return success and empty upload id.
return "", nil
}
// getTotalMultipartSize - calculate total uploaded size for the a given multipart object.
func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) {
// Iterate over all parts and aggregate the size.
partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
return 0, err
}
for _, partInfo := range partsInfo {
size += partInfo.Size
}
return size, nil
}
// listObjectPartsQuery (List Parts query)
// - lists some or all (up to 1000) parts that have been uploaded
// for a specific multipart upload
//
// You can use the request parameters as selection criteria to return
// a subset of the uploads in a bucket, request paramters :-
// ---------
// ?part-number-marker - Specifies the part after which listing should
// begin.
// ?max-parts - Maximum parts to be listed per request.
func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) {
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set part number marker.
urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
// Set upload id.
urlValues.Set("uploadId", uploadID)
// maxParts should be 1000 or less.
if maxParts == 0 || maxParts > 1000 {
maxParts = 1000
}
// Set max parts.
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
req, err := c.newRequest("GET", requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
})
if err != nil {
return listObjectPartsResult{}, err
}
// Exectue list object parts.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return listObjectPartsResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return listObjectPartsResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode list object parts XML.
listObjectPartsResult := listObjectPartsResult{}
err = xmlDecoder(resp.Body, &listObjectPartsResult)
if err != nil {
return listObjectPartsResult, err
}
return listObjectPartsResult, nil
}

View File

@ -0,0 +1,148 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"errors"
"time"
)
// PresignedGetObject - Returns a presigned URL to access an object without credentials.
// Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) PresignedGetObject(bucketName, objectName string, expires time.Duration) (string, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return "", err
}
if err := isValidObjectName(objectName); err != nil {
return "", err
}
if err := isValidExpiry(expires); err != nil {
return "", err
}
expireSeconds := int64(expires / time.Second)
// Instantiate a new request.
// Since expires is set newRequest will presign the request.
req, err := c.newRequest("GET", requestMetadata{
presignURL: true,
bucketName: bucketName,
objectName: objectName,
expires: expireSeconds,
})
if err != nil {
return "", err
}
return req.URL.String(), nil
}
// PresignedPutObject - Returns a presigned URL to upload an object without credentials.
// Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) PresignedPutObject(bucketName, objectName string, expires time.Duration) (string, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return "", err
}
if err := isValidObjectName(objectName); err != nil {
return "", err
}
if err := isValidExpiry(expires); err != nil {
return "", err
}
expireSeconds := int64(expires / time.Second)
// Instantiate a new request.
// Since expires is set newRequest will presign the request.
req, err := c.newRequest("PUT", requestMetadata{
presignURL: true,
bucketName: bucketName,
objectName: objectName,
expires: expireSeconds,
})
if err != nil {
return "", err
}
return req.URL.String(), nil
}
// PresignedPostPolicy - Returns POST form data to upload an object at a location.
func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
// Validate input arguments.
if p.expiration.IsZero() {
return nil, errors.New("Expiration time must be specified")
}
if _, ok := p.formData["key"]; !ok {
return nil, errors.New("object key must be specified")
}
if _, ok := p.formData["bucket"]; !ok {
return nil, errors.New("bucket name must be specified")
}
bucketName := p.formData["bucket"]
// Fetch the bucket location.
location, err := c.getBucketLocation(bucketName)
if err != nil {
return nil, err
}
// Keep time.
t := time.Now().UTC()
// For signature version '2' handle here.
if c.signature.isV2() {
policyBase64 := p.base64()
p.formData["policy"] = policyBase64
// For Google endpoint set this value to be 'GoogleAccessId'.
if isGoogleEndpoint(c.endpointURL) {
p.formData["GoogleAccessId"] = c.accessKeyID
} else {
// For all other endpoints set this value to be 'AWSAccessKeyId'.
p.formData["AWSAccessKeyId"] = c.accessKeyID
}
// Sign the policy.
p.formData["signature"] = PostPresignSignatureV2(policyBase64, c.secretAccessKey)
return p.formData, nil
}
// Add date policy.
p.addNewPolicy(policyCondition{
matchType: "eq",
condition: "$x-amz-date",
value: t.Format(iso8601DateFormat),
})
// Add algorithm policy.
p.addNewPolicy(policyCondition{
matchType: "eq",
condition: "$x-amz-algorithm",
value: signV4Algorithm,
})
// Add a credential policy.
credential := getCredential(c.accessKeyID, location, t)
p.addNewPolicy(policyCondition{
matchType: "eq",
condition: "$x-amz-credential",
value: credential,
})
// Get base64 encoded policy.
policyBase64 := p.base64()
// Fill in the form data.
p.formData["policy"] = policyBase64
p.formData["x-amz-algorithm"] = signV4Algorithm
p.formData["x-amz-credential"] = credential
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
p.formData["x-amz-signature"] = PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
return p.formData, nil
}

View File

@ -0,0 +1,219 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"encoding/hex"
"encoding/xml"
"io/ioutil"
"net/http"
"net/url"
)
/// Bucket operations
// MakeBucket makes a new bucket.
//
// Optional arguments are acl and location - by default all buckets are created
// with ``private`` acl and in US Standard region.
//
// ACL valid values - http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
//
// private - owner gets full access [default].
// public-read - owner gets full access, all others get read access.
// public-read-write - owner gets full access, all others get full access too.
// authenticated-read - owner gets full access, authenticated users get read access.
//
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) error {
// Validate if request is made on anonymous requests.
if c.anonymous {
return ErrInvalidArgument("Make bucket cannot be issued with anonymous credentials.")
}
// Validate the input arguments.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if !acl.isValidBucketACL() {
return ErrInvalidArgument("Unrecognized ACL " + acl.String())
}
// If location is empty, treat is a default region 'us-east-1'.
if location == "" {
location = "us-east-1"
}
// Instantiate the request.
req, err := c.makeBucketRequest(bucketName, acl, location)
if err != nil {
return err
}
// Execute the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return HTTPRespToErrorResponse(resp, bucketName, "")
}
}
// Save the location into cache on a succesful makeBucket response.
c.bucketLocCache.Set(bucketName, location)
// Return.
return nil
}
// makeBucketRequest constructs request for makeBucket.
func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location string) (*http.Request, error) {
// Validate input arguments.
if err := isValidBucketName(bucketName); err != nil {
return nil, err
}
if !acl.isValidBucketACL() {
return nil, ErrInvalidArgument("Unrecognized ACL " + acl.String())
}
// Set get bucket location always as path style.
targetURL := *c.endpointURL
if bucketName != "" {
// If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support this.
if isVirtualHostSupported(c.endpointURL) {
targetURL.Host = bucketName + "." + c.endpointURL.Host
targetURL.Path = "/"
} else {
// If not fall back to using path style.
targetURL.Path = "/" + bucketName
}
}
// get a new HTTP request for the method.
req, err := http.NewRequest("PUT", targetURL.String(), nil)
if err != nil {
return nil, err
}
// by default bucket acl is set to private.
req.Header.Set("x-amz-acl", "private")
if acl != "" {
req.Header.Set("x-amz-acl", string(acl))
}
// set UserAgent for the request.
c.setUserAgent(req)
// set sha256 sum for signature calculation only with signature version '4'.
if c.signature.isV4() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
}
// If location is not 'us-east-1' create bucket location config.
if location != "us-east-1" && location != "" {
createBucketConfig := createBucketConfiguration{}
createBucketConfig.Location = location
var createBucketConfigBytes []byte
createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
if err != nil {
return nil, err
}
createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
req.Body = ioutil.NopCloser(createBucketConfigBuffer)
req.ContentLength = int64(createBucketConfigBuffer.Len())
if c.signature.isV4() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBuffer.Bytes())))
}
}
// Sign the request.
if c.signature.isV4() {
// Signature calculated for MakeBucket request should be for 'us-east-1',
// regardless of the bucket's location constraint.
req = SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
req = SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
// Return signed request.
return req, nil
}
// SetBucketACL set the permissions on an existing bucket using access control lists (ACL).
//
// For example
//
// private - owner gets full access [default].
// public-read - owner gets full access, all others get read access.
// public-read-write - owner gets full access, all others get full access too.
// authenticated-read - owner gets full access, authenticated users get read access.
func (c Client) SetBucketACL(bucketName string, acl BucketACL) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if !acl.isValidBucketACL() {
return ErrInvalidArgument("Unrecognized ACL " + acl.String())
}
// Set acl query.
urlValues := make(url.Values)
urlValues.Set("acl", "")
// Add misc headers.
customHeader := make(http.Header)
if acl != "" {
customHeader.Set("x-amz-acl", acl.String())
} else {
customHeader.Set("x-amz-acl", "private")
}
// Instantiate a new request.
req, err := c.newRequest("PUT", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
customHeader: customHeader,
})
if err != nil {
return err
}
// Initiate the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
// if error return.
if resp.StatusCode != http.StatusOK {
return HTTPRespToErrorResponse(resp, bucketName, "")
}
}
// return
return nil
}

View File

@ -0,0 +1,167 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"crypto/md5"
"crypto/sha256"
"hash"
"io"
"os"
)
// Verify if reader is *os.File
func isFile(reader io.Reader) (ok bool) {
_, ok = reader.(*os.File)
return
}
// Verify if reader is *minio.Object
func isObject(reader io.Reader) (ok bool) {
_, ok = reader.(*Object)
return
}
// Verify if reader is a generic ReaderAt
func isReadAt(reader io.Reader) (ok bool) {
_, ok = reader.(io.ReaderAt)
return
}
// hashCopyN - Calculates Md5sum and SHA256sum for upto partSize amount of bytes.
func (c Client) hashCopyN(writer io.ReadWriteSeeker, reader io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) {
// MD5 and SHA256 hasher.
var hashMD5, hashSHA256 hash.Hash
// MD5 and SHA256 hasher.
hashMD5 = md5.New()
hashWriter := io.MultiWriter(writer, hashMD5)
if c.signature.isV4() {
hashSHA256 = sha256.New()
hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
}
// Copies to input at writer.
size, err = io.CopyN(hashWriter, reader, partSize)
if err != nil {
// If not EOF return error right here.
if err != io.EOF {
return nil, nil, 0, err
}
}
// Seek back to beginning of input, any error fail right here.
if _, err := writer.Seek(0, 0); err != nil {
return nil, nil, 0, err
}
// Finalize md5shum and sha256 sum.
md5Sum = hashMD5.Sum(nil)
if c.signature.isV4() {
sha256Sum = hashSHA256.Sum(nil)
}
return md5Sum, sha256Sum, size, err
}
// getUploadID - fetch upload id if already present for an object name
// or initiate a new request to fetch a new upload id.
func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadID string, isNew bool, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return "", false, err
}
if err := isValidObjectName(objectName); err != nil {
return "", false, err
}
// Set content Type to default if empty string.
if contentType == "" {
contentType = "application/octet-stream"
}
// Find upload id for previous upload for an object.
uploadID, err = c.findUploadID(bucketName, objectName)
if err != nil {
return "", false, err
}
if uploadID == "" {
// Initiate multipart upload for an object.
initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType)
if err != nil {
return "", false, err
}
// Save the new upload id.
uploadID = initMultipartUploadResult.UploadID
// Indicate that this is a new upload id.
isNew = true
}
return uploadID, isNew, nil
}
// computeHash - Calculates MD5 and SHA256 for an input read Seeker.
func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) {
// MD5 and SHA256 hasher.
var hashMD5, hashSHA256 hash.Hash
// MD5 and SHA256 hasher.
hashMD5 = md5.New()
hashWriter := io.MultiWriter(hashMD5)
if c.signature.isV4() {
hashSHA256 = sha256.New()
hashWriter = io.MultiWriter(hashMD5, hashSHA256)
}
size, err = io.Copy(hashWriter, reader)
if err != nil {
return nil, nil, 0, err
}
// Seek back reader to the beginning location.
if _, err := reader.Seek(0, 0); err != nil {
return nil, nil, 0, err
}
// Finalize md5shum and sha256 sum.
md5Sum = hashMD5.Sum(nil)
if c.signature.isV4() {
sha256Sum = hashSHA256.Sum(nil)
}
return md5Sum, sha256Sum, size, nil
}
// Fetch all parts info, including total uploaded size, maximum part
// size and max part number.
func (c Client) getPartsInfo(bucketName, objectName, uploadID string) (prtsInfo map[int]objectPart, totalSize int64, maxPrtSize int64, maxPrtNumber int, err error) {
// Fetch previously upload parts.
prtsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
return nil, 0, 0, 0, err
}
// Peek through all the parts and calculate totalSize, maximum
// part size and last part number.
for _, prtInfo := range prtsInfo {
// Save previously uploaded size.
totalSize += prtInfo.Size
// Choose the maximum part size.
if prtInfo.Size >= maxPrtSize {
maxPrtSize = prtInfo.Size
}
// Choose the maximum part number.
if maxPrtNumber < prtInfo.PartNumber {
maxPrtNumber = prtInfo.PartNumber
}
}
return prtsInfo, totalSize, maxPrtSize, maxPrtNumber, nil
}

View File

@ -0,0 +1,227 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"os"
"sort"
)
// FPutObject - Create an object in a bucket, with contents from file at filePath.
func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Open the referenced file.
fileReader, err := os.Open(filePath)
// If any error fail quickly here.
if err != nil {
return 0, err
}
defer fileReader.Close()
// Save the file stat.
fileStat, err := fileReader.Stat()
if err != nil {
return 0, err
}
// Save the file size.
fileSize := fileStat.Size()
// Check for largest object size allowed.
if fileSize > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(fileSize, bucketName, objectName)
}
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
if isGoogleEndpoint(c.endpointURL) {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize),
Key: objectName,
BucketName: bucketName,
}
}
// Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size.
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType)
}
// NOTE: S3 doesn't allow anonymous multipart requests.
if isAmazonEndpoint(c.endpointURL) && c.anonymous {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize),
Key: objectName,
BucketName: bucketName,
}
}
// Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size.
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType)
}
// Small object upload is initiated for uploads for input data size smaller than 5MiB.
if fileSize < minimumPartSize {
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType)
}
// Upload all large objects as multipart.
n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, contentType)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "NotImplemented" {
// If size of file is greater than '5GiB' fail.
if fileSize > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(fileSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType)
}
return n, err
}
return n, nil
}
// putObjectMultipartFromFile - Creates object from contents of *os.File
//
// NOTE: This function is meant to be used for readers with local
// file as in *os.File. This function resumes by skipping all the
// necessary parts which were already uploaded by verifying them
// against MD5SUM of each individual parts. This function also
// effectively utilizes file system capabilities of reading from
// specific sections and not having to create temporary files.
func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader *os.File, fileSize int64, contentType string) (int64, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Get upload id for an object, initiates a new multipart request
// if it cannot find any previously partially uploaded object.
uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
if err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var completeMultipartUpload completeMultipartUpload
// Previous maximum part size
var prevMaxPartSize int64
// A map of all uploaded parts.
var partsInfo = make(map[int]objectPart)
// If this session is a continuation of a previous session fetch all
// previously uploaded parts info.
if !isNew {
// Fetch previously upload parts and maximum part size.
partsInfo, _, prevMaxPartSize, _, err = c.getPartsInfo(bucketName, objectName, uploadID)
if err != nil {
return 0, err
}
}
// Calculate the optimal part size for a given file size.
partSize := optimalPartSize(fileSize)
// Use prevMaxPartSize if available.
if prevMaxPartSize != 0 {
partSize = prevMaxPartSize
}
// Part number always starts with '0'.
partNumber := 0
// Upload each part until fileSize.
for totalUploadedSize < fileSize {
// Increment part number.
partNumber++
// Get a section reader on a particular offset.
sectionReader := io.NewSectionReader(fileReader, totalUploadedSize, partSize)
// Calculates MD5 and SHA256 sum for a section reader.
md5Sum, sha256Sum, size, err := c.computeHash(sectionReader)
if err != nil {
return 0, err
}
// Verify if part was not uploaded.
if !isPartUploaded(objectPart{
ETag: hex.EncodeToString(md5Sum),
PartNumber: partNumber,
}, partsInfo) {
// Proceed to upload the part.
objPart, err := c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(sectionReader), partNumber, md5Sum, sha256Sum, size)
if err != nil {
return totalUploadedSize, err
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
}
// Save successfully uploaded size.
totalUploadedSize += size
}
// Verify if we uploaded all data.
if totalUploadedSize != fileSize {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
}
// Loop over uploaded parts to save them in a Parts array before completing the multipart request.
for _, part := range partsInfo {
var complPart completePart
complPart.ETag = part.ETag
complPart.PartNumber = part.PartNumber
completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
}
// Verify if partNumber is different than total list of parts.
if partNumber != len(completeMultipartUpload.Parts) {
return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts))
}
// Sort all completed parts.
sort.Sort(completedParts(completeMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}

View File

@ -0,0 +1,368 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"encoding/hex"
"encoding/xml"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"strings"
)
// Comprehensive put object operation involving multipart resumable uploads.
//
// Following code handles these types of readers.
//
// - *os.File
// - *minio.Object
// - Any reader which has a method 'ReadAt()'
//
// If we exhaust all the known types, code proceeds to use stream as
// is where each part is re-downloaded, checksummed and verified
// before upload.
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string) (n int64, err error) {
if size > 0 && size >= minimumPartSize {
// Verify if reader is *os.File, then use file system functionalities.
if isFile(reader) {
return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType)
}
// Verify if reader is *minio.Object or io.ReaderAt.
// NOTE: Verification of object is kept for a specific purpose
// while it is going to be duck typed similar to io.ReaderAt.
// It is to indicate that *minio.Object implements io.ReaderAt.
// and such a functionality is used in the subsequent code
// path.
if isObject(reader) || isReadAt(reader) {
return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType)
}
}
// For any other data size and reader type we do generic multipart
// approach by staging data in temporary files and uploading them.
return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType)
}
// putObjectStream uploads files bigger than 5MiB, and also supports
// special case where size is unknown i.e '-1'.
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// getUploadID for an object, initiates a new multipart request
// if it cannot find any previously partially uploaded object.
uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
if err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var completeMultipartUpload completeMultipartUpload
// Previous maximum part size
var prevMaxPartSize int64
// A map of all previously uploaded parts.
var partsInfo = make(map[int]objectPart)
// If This session is a continuation of a previous session fetch all
// previously uploaded parts info.
if !isNew {
// Fetch previously uploaded parts and maximum part size.
partsInfo, _, prevMaxPartSize, _, err = c.getPartsInfo(bucketName, objectName, uploadID)
if err != nil {
return 0, err
}
}
// Calculate the optimal part size for a given size.
partSize := optimalPartSize(size)
// Use prevMaxPartSize if available.
if prevMaxPartSize != 0 {
partSize = prevMaxPartSize
}
// Part number always starts with '0'.
partNumber := 0
// Upload each part until EOF.
for {
// Increment part number.
partNumber++
// Initialize a new temporary file.
tmpFile, err := newTempFile("multiparts$-putobject-stream")
if err != nil {
return 0, err
}
// Calculates MD5 and SHA256 sum while copying partSize bytes into tmpFile.
md5Sum, sha256Sum, size, rErr := c.hashCopyN(tmpFile, reader, partSize)
if rErr != nil {
if rErr != io.EOF {
return 0, rErr
}
}
// Verify if part was not uploaded.
if !isPartUploaded(objectPart{
ETag: hex.EncodeToString(md5Sum),
PartNumber: partNumber,
}, partsInfo) {
// Proceed to upload the part.
objPart, err := c.uploadPart(bucketName, objectName, uploadID, tmpFile, partNumber, md5Sum, sha256Sum, size)
if err != nil {
// Close the temporary file upon any error.
tmpFile.Close()
return 0, err
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
}
// Close the temporary file.
tmpFile.Close()
// Save successfully uploaded size.
totalUploadedSize += size
// If read error was an EOF, break out of the loop.
if rErr == io.EOF {
break
}
}
// Verify if we uploaded all the data.
if size > 0 {
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
}
// Loop over uploaded parts to save them in a Parts array before completing the multipart request.
for _, part := range partsInfo {
var complPart completePart
complPart.ETag = part.ETag
complPart.PartNumber = part.PartNumber
completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
}
// Verify if partNumber is different than total list of parts.
if partNumber != len(completeMultipartUpload.Parts) {
return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts))
}
// Sort all completed parts.
sort.Sort(completedParts(completeMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
return initiateMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploads", "")
if contentType == "" {
contentType = "application/octet-stream"
}
// Set ContentType header.
customHeader := make(http.Header)
customHeader.Set("Content-Type", contentType)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
customHeader: customHeader,
}
// Instantiate the request.
req, err := c.newRequest("POST", reqMetadata)
if err != nil {
return initiateMultipartUploadResult{}, err
}
// Execute the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return initiateMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return initiateMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode xml for new multipart upload.
initiateMultipartUploadResult := initiateMultipartUploadResult{}
err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
if err != nil {
return initiateMultipartUploadResult, err
}
return initiateMultipartUploadResult, nil
}
// uploadPart - Uploads a part in a multipart upload.
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.ReadCloser, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return objectPart{}, err
}
if err := isValidObjectName(objectName); err != nil {
return objectPart{}, err
}
if size > maxPartSize {
return objectPart{}, ErrEntityTooLarge(size, bucketName, objectName)
}
if size <= -1 {
return objectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
}
if partNumber <= 0 {
return objectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
}
if uploadID == "" {
return objectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
}
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set part number.
urlValues.Set("partNumber", strconv.Itoa(partNumber))
// Set upload id.
urlValues.Set("uploadId", uploadID)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentBody: reader,
contentLength: size,
contentMD5Bytes: md5Sum,
contentSHA256Bytes: sha256Sum,
}
// Instantiate a request.
req, err := c.newRequest("PUT", reqMetadata)
if err != nil {
return objectPart{}, err
}
// Execute the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return objectPart{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return objectPart{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
}
}
// Once successfully uploaded, return completed part.
objPart := objectPart{}
objPart.Size = size
objPart.PartNumber = partNumber
// Trim off the odd double quotes from ETag in the beginning and end.
objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"")
return objPart, nil
}
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return completeMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
return completeMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID)
// Marshal complete multipart body.
completeMultipartUploadBytes, err := xml.Marshal(complete)
if err != nil {
return completeMultipartUploadResult{}, err
}
// Instantiate all the complete multipart buffer.
completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentBody: ioutil.NopCloser(completeMultipartUploadBuffer),
contentLength: int64(completeMultipartUploadBuffer.Len()),
contentSHA256Bytes: sum256(completeMultipartUploadBuffer.Bytes()),
}
// Instantiate the request.
req, err := c.newRequest("POST", reqMetadata)
if err != nil {
return completeMultipartUploadResult{}, err
}
// Execute the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return completeMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return completeMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode completed multipart upload response on success.
completeMultipartUploadResult := completeMultipartUploadResult{}
err = xmlDecoder(resp.Body, &completeMultipartUploadResult)
if err != nil {
return completeMultipartUploadResult, err
}
return completeMultipartUploadResult, nil
}

View File

@ -0,0 +1,192 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"crypto/md5"
"crypto/sha256"
"errors"
"hash"
"io"
"sort"
)
// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader
// of type which implements io.ReaderAt interface (ReadAt method).
//
// NOTE: This function is meant to be used for all readers which
// implement io.ReaderAt which allows us for resuming multipart
// uploads but reading at an offset, which would avoid re-read the
// data which was already uploaded. Internally this function uses
// temporary files for staging all the data, these temporary files are
// cleaned automatically when the caller i.e http client closes the
// stream after uploading all the contents successfully.
func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Get upload id for an object, initiates a new multipart request
// if it cannot find any previously partially uploaded object.
uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
if err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var completeMultipartUpload completeMultipartUpload
// Previous maximum part size
var prevMaxPartSize int64
// Previous part number.
var prevPartNumber int
// A map of all uploaded parts.
var partsInfo = make(map[int]objectPart)
// Fetch all parts info previously uploaded.
if !isNew {
partsInfo, totalUploadedSize, prevMaxPartSize, prevPartNumber, err = c.getPartsInfo(bucketName, objectName, uploadID)
if err != nil {
return 0, err
}
}
// Calculate the optimal part size for a given file size.
partSize := optimalPartSize(size)
// If prevMaxPartSize is set use that.
if prevMaxPartSize != 0 {
partSize = prevMaxPartSize
}
// MD5 and SHA256 hasher.
var hashMD5, hashSHA256 hash.Hash
// Part number always starts with prevPartNumber + 1. i.e The next part number.
partNumber := prevPartNumber + 1
// Upload each part until totalUploadedSize reaches input reader size.
for totalUploadedSize < size {
// Initialize a new temporary file.
tmpFile, err := newTempFile("multiparts$-putobject-partial")
if err != nil {
return 0, err
}
// Create a hash multiwriter.
hashMD5 = md5.New()
hashWriter := io.MultiWriter(hashMD5)
if c.signature.isV4() {
hashSHA256 = sha256.New()
hashWriter = io.MultiWriter(hashMD5, hashSHA256)
}
writer := io.MultiWriter(tmpFile, hashWriter)
// Choose totalUploadedSize as the current readAtOffset.
readAtOffset := totalUploadedSize
// Read until partSize.
var totalReadPartSize int64
// ReadAt defaults to reading at 5MiB buffer.
readAtBuffer := make([]byte, optimalReadAtBufferSize)
// Following block reads data at an offset from the input
// reader and copies data to into local temporary file.
// Temporary file data is limited to the partSize.
for totalReadPartSize < partSize {
readAtSize, rerr := reader.ReadAt(readAtBuffer, readAtOffset)
if rerr != nil {
if rerr != io.EOF {
return 0, rerr
}
}
writeSize, werr := writer.Write(readAtBuffer[:readAtSize])
if werr != nil {
return 0, werr
}
if readAtSize != writeSize {
return 0, errors.New("Something really bad happened here. " + reportIssue)
}
readAtOffset += int64(writeSize)
totalReadPartSize += int64(writeSize)
if rerr == io.EOF {
break
}
}
// Seek back to beginning of the temporary file.
if _, err := tmpFile.Seek(0, 0); err != nil {
return 0, err
}
var md5Sum, sha256Sum []byte
md5Sum = hashMD5.Sum(nil)
// Signature version '4'.
if c.signature.isV4() {
sha256Sum = hashSHA256.Sum(nil)
}
// Proceed to upload the part.
objPart, err := c.uploadPart(bucketName, objectName, uploadID, tmpFile, partNumber, md5Sum, sha256Sum, totalReadPartSize)
if err != nil {
// Close the read closer.
tmpFile.Close()
return totalUploadedSize, err
}
// Save successfully uploaded size.
totalUploadedSize += totalReadPartSize
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
// Move to next part.
partNumber++
}
// Verify if we uploaded all the data.
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
// Loop over uploaded parts to save them in a Parts array before completing the multipart request.
for _, part := range partsInfo {
var complPart completePart
complPart.ETag = part.ETag
complPart.PartNumber = part.PartNumber
completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
}
// Sort all completed parts.
sort.Sort(completedParts(completeMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}

View File

@ -0,0 +1,284 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
)
// getReaderSize gets the size of the underlying reader, if possible.
func getReaderSize(reader io.Reader) (size int64, err error) {
size = -1
if reader != nil {
switch v := reader.(type) {
case *bytes.Buffer:
size = int64(v.Len())
case *bytes.Reader:
size = int64(v.Len())
case *strings.Reader:
size = int64(v.Len())
case *os.File:
var st os.FileInfo
st, err = v.Stat()
if err != nil {
return 0, err
}
size = st.Size()
case *Object:
var st ObjectInfo
st, err = v.Stat()
if err != nil {
return 0, err
}
size = st.Size
}
}
return size, nil
}
// completedParts is a collection of parts sortable by their part numbers.
// used for sorting the uploaded parts before completing the multipart request.
type completedParts []completePart
func (a completedParts) Len() int { return len(a) }
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
// PutObject creates an object in a bucket.
//
// You must have WRITE permissions on a bucket to create an object.
//
// - For size smaller than 5MiB PutObject automatically does a single atomic Put operation.
// - For size larger than 5MiB PutObject automatically does a resumable multipart Put operation.
// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF.
// Maximum object size that can be uploaded through this operation will be 5TiB.
//
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
// So we fall back to single PUT operation with the maximum limit of 5GiB.
//
// NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation.
func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// get reader size.
size, err := getReaderSize(reader)
if err != nil {
return 0, err
}
// Check for largest object size allowed.
if size > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(size, bucketName, objectName)
}
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
// So we fall back to single PUT operation with the maximum limit of 5GiB.
if isGoogleEndpoint(c.endpointURL) {
if size <= -1 {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.",
Key: objectName,
BucketName: bucketName,
}
}
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, bucketName, objectName)
}
// Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType)
}
// NOTE: S3 doesn't allow anonymous multipart requests.
if isAmazonEndpoint(c.endpointURL) && c.anonymous {
if size <= -1 {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: "Content-Length cannot be negative for anonymous requests.",
Key: objectName,
BucketName: bucketName,
}
}
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, bucketName, objectName)
}
// Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType)
}
// putSmall object.
if size < minimumPartSize && size > 0 {
return c.putObjectSingle(bucketName, objectName, reader, size, contentType)
}
// For all sizes greater than 5MiB do multipart.
n, err = c.putObjectMultipart(bucketName, objectName, reader, size, contentType)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "NotImplemented" {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectSingle(bucketName, objectName, reader, size, contentType)
}
return n, err
}
return n, nil
}
// putObjectNoChecksum special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, bucketName, objectName)
}
// This function does not calculate sha256 and md5sum for payload.
// Execute put object.
st, err := c.putObjectDo(bucketName, objectName, ioutil.NopCloser(reader), nil, nil, size, contentType)
if err != nil {
return 0, err
}
if st.Size != size {
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
}
return size, nil
}
// putObjectSingle is a special function for uploading single put object request.
// This special function is used as a fallback when multipart upload fails.
func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, bucketName, objectName)
}
// If size is a stream, upload upto 5GiB.
if size <= -1 {
size = maxSinglePutObjectSize
}
// Initialize a new temporary file.
tmpFile, err := newTempFile("single$-putobject-single")
if err != nil {
return 0, err
}
md5Sum, sha256Sum, size, err := c.hashCopyN(tmpFile, reader, size)
if err != nil {
if err != io.EOF {
return 0, err
}
}
// Execute put object.
st, err := c.putObjectDo(bucketName, objectName, tmpFile, md5Sum, sha256Sum, size, contentType)
if err != nil {
return 0, err
}
if st.Size != size {
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
}
return size, nil
}
// putObjectDo - executes the put object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
}
if err := isValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
if size <= -1 {
return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName)
}
if size > maxSinglePutObjectSize {
return ObjectInfo{}, ErrEntityTooLarge(size, bucketName, objectName)
}
if strings.TrimSpace(contentType) == "" {
contentType = "application/octet-stream"
}
// Set headers.
customHeader := make(http.Header)
customHeader.Set("Content-Type", contentType)
// Populate request metadata.
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
customHeader: customHeader,
contentBody: reader,
contentLength: size,
contentMD5Bytes: md5Sum,
contentSHA256Bytes: sha256Sum,
}
// Initiate new request.
req, err := c.newRequest("PUT", reqMetadata)
if err != nil {
return ObjectInfo{}, err
}
// Execute the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ObjectInfo{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
}
}
var metadata ObjectInfo
// Trim off the odd double quotes from ETag in the beginning and end.
metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"")
// A success here means data was written to server successfully.
metadata.Size = size
// Return here.
return metadata, nil
}

View File

@ -0,0 +1,167 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"net/http"
"net/url"
)
// RemoveBucket deletes the bucket name.
//
// All objects (including all object versions and delete markers).
// in the bucket must be deleted before successfully attempting this request.
func (c Client) RemoveBucket(bucketName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
// Instantiate a new request.
req, err := c.newRequest("DELETE", requestMetadata{
bucketName: bucketName,
})
if err != nil {
return err
}
// Initiate the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusNoContent {
return HTTPRespToErrorResponse(resp, bucketName, "")
}
}
// Remove the location from cache on a successful delete.
c.bucketLocCache.Delete(bucketName)
return nil
}
// RemoveObject remove an object from a bucket.
func (c Client) RemoveObject(bucketName, objectName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
return err
}
// Instantiate the request.
req, err := c.newRequest("DELETE", requestMetadata{
bucketName: bucketName,
objectName: objectName,
})
if err != nil {
return err
}
// Initiate the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return err
}
// DeleteObject always responds with http '204' even for
// objects which do not exist. So no need to handle them
// specifically.
return nil
}
// RemoveIncompleteUpload aborts an partially uploaded object.
// Requires explicit authentication, no anonymous requests are allowed for multipart API.
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
return err
}
// Find multipart upload id of the object to be aborted.
uploadID, err := c.findUploadID(bucketName, objectName)
if err != nil {
return err
}
if uploadID != "" {
// Upload id found, abort the incomplete multipart upload.
err := c.abortMultipartUpload(bucketName, objectName, uploadID)
if err != nil {
return err
}
}
return nil
}
// abortMultipartUpload aborts a multipart upload for the given
// uploadID, all previously uploaded parts are deleted.
func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
return err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID)
// Instantiate a new DELETE request.
req, err := c.newRequest("DELETE", requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
})
if err != nil {
return err
}
// Initiate the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusNoContent {
// Abort has no response body, handle it for any errors.
var errorResponse ErrorResponse
switch resp.StatusCode {
case http.StatusNotFound:
// This is needed specifically for abort and it cannot
// be converged into default case.
errorResponse = ErrorResponse{
Code: "NoSuchUpload",
Message: "The specified multipart upload does not exist.",
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
default:
return HTTPRespToErrorResponse(resp, bucketName, objectName)
}
return errorResponse
}
}
return nil
}

View File

@ -0,0 +1,197 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/xml"
"time"
)
// listAllMyBucketsResult container for listBuckets response.
type listAllMyBucketsResult struct {
// Container for one or more buckets.
Buckets struct {
Bucket []BucketInfo
}
Owner owner
}
// owner container for bucket owner information.
type owner struct {
DisplayName string
ID string
}
// commonPrefix container for prefix response.
type commonPrefix struct {
Prefix string
}
// listBucketResult container for listObjects response.
type listBucketResult struct {
// A response can contain CommonPrefixes only if you have
// specified a delimiter.
CommonPrefixes []commonPrefix
// Metadata about each object returned.
Contents []ObjectInfo
Delimiter string
// Encoding type used to encode object keys in the response.
EncodingType string
// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
Marker string
MaxKeys int64
Name string
// When response is truncated (the IsTruncated element value in
// the response is true), you can use the key name in this field
// as marker in the subsequent request to get next set of objects.
// Object storage lists objects in alphabetical order Note: This
// element is returned only if you have delimiter request
// parameter specified. If response does not include the NextMaker
// and it is truncated, you can use the value of the last Key in
// the response as the marker in the subsequent request to get the
// next set of object keys.
NextMarker string
Prefix string
}
// listMultipartUploadsResult container for ListMultipartUploads response
type listMultipartUploadsResult struct {
Bucket string
KeyMarker string
UploadIDMarker string `xml:"UploadIdMarker"`
NextKeyMarker string
NextUploadIDMarker string `xml:"NextUploadIdMarker"`
EncodingType string
MaxUploads int64
IsTruncated bool
Uploads []ObjectMultipartInfo `xml:"Upload"`
Prefix string
Delimiter string
// A response can contain CommonPrefixes only if you specify a delimiter.
CommonPrefixes []commonPrefix
}
// initiator container for who initiated multipart upload.
type initiator struct {
ID string
DisplayName string
}
// objectPart container for particular part of an object.
type objectPart struct {
// Part number identifies the part.
PartNumber int
// Date and time the part was uploaded.
LastModified time.Time
// Entity tag returned when the part was uploaded, usually md5sum
// of the part.
ETag string
// Size of the uploaded part data.
Size int64
}
// listObjectPartsResult container for ListObjectParts response.
type listObjectPartsResult struct {
Bucket string
Key string
UploadID string `xml:"UploadId"`
Initiator initiator
Owner owner
StorageClass string
PartNumberMarker int
NextPartNumberMarker int
MaxParts int
// Indicates whether the returned list of parts is truncated.
IsTruncated bool
ObjectParts []objectPart `xml:"Part"`
EncodingType string
}
// initiateMultipartUploadResult container for InitiateMultiPartUpload
// response.
type initiateMultipartUploadResult struct {
Bucket string
Key string
UploadID string `xml:"UploadId"`
}
// completeMultipartUploadResult container for completed multipart
// upload response.
type completeMultipartUploadResult struct {
Location string
Bucket string
Key string
ETag string
}
// completePart sub container lists individual part numbers and their
// md5sum, part of completeMultipartUpload.
type completePart struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
// Part number identifies the part.
PartNumber int
ETag string
}
// completeMultipartUpload container for completing multipart upload.
type completeMultipartUpload struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
Parts []completePart `xml:"Part"`
}
// createBucketConfiguration container for bucket configuration.
type createBucketConfiguration struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
Location string `xml:"LocationConstraint"`
}
// grant container for the grantee and his or her permissions.
type grant struct {
// grantee container for DisplayName and ID of the person being
// granted permissions.
Grantee struct {
ID string
DisplayName string
EmailAddress string
Type string
URI string
}
Permission string
}
// accessControlPolicy contains the elements providing ACL permissions
// for a bucket.
type accessControlPolicy struct {
// accessControlList container for ACL information.
AccessControlList struct {
Grant []grant
}
Owner owner
}

View File

@ -0,0 +1,125 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"net/http"
"strconv"
"strings"
"time"
)
// BucketExists verify if bucket exists and you have permission to access it.
func (c Client) BucketExists(bucketName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
// Instantiate a new request.
req, err := c.newRequest("HEAD", requestMetadata{
bucketName: bucketName,
})
if err != nil {
return err
}
// Initiate the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return HTTPRespToErrorResponse(resp, bucketName, "")
}
}
return nil
}
// StatObject verifies if object exists and you have permission to access.
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
}
if err := isValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
// Instantiate a new request.
req, err := c.newRequest("HEAD", requestMetadata{
bucketName: bucketName,
objectName: objectName,
})
if err != nil {
return ObjectInfo{}, err
}
// Initiate the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ObjectInfo{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
}
}
// Trim off the odd double quotes from ETag in the beginning and end.
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
md5sum = strings.TrimSuffix(md5sum, "\"")
// Parse content length.
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
return ObjectInfo{}, ErrorResponse{
Code: "InternalError",
Message: "Content-Length is invalid. " + reportIssue,
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
}
// Parse Last-Modified has http time format.
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
if err != nil {
return ObjectInfo{}, ErrorResponse{
Code: "InternalError",
Message: "Last-Modified time format is invalid. " + reportIssue,
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
}
}
// Fetch content type if any present.
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
if contentType == "" {
contentType = "application/octet-stream"
}
// Save object metadata info.
var objectStat ObjectInfo
objectStat.ETag = md5sum
objectStat.Key = objectName
objectStat.Size = size
objectStat.LastModified = date
objectStat.ContentType = contentType
return objectStat, nil
}

498
Godeps/_workspace/src/github.com/minio/minio-go/api.go generated vendored Normal file
View File

@ -0,0 +1,498 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"net/http/httputil"
"net/url"
"os"
"regexp"
"runtime"
"strings"
"time"
)
// Client implements Amazon S3 compatible methods.
type Client struct {
/// Standard options.
// AccessKeyID required for authorized requests.
accessKeyID string
// SecretAccessKey required for authorized requests.
secretAccessKey string
// Choose a signature type if necessary.
signature SignatureType
// Set to 'true' if Client has no access and secret keys.
anonymous bool
// User supplied.
appInfo struct {
appName string
appVersion string
}
endpointURL *url.URL
// Needs allocation.
httpClient *http.Client
bucketLocCache *bucketLocationCache
// Advanced functionality
isTraceEnabled bool
traceOutput io.Writer
}
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "0.2.5"
)
// User Agent should always following the below style.
// Please open an issue to discuss any new changes here.
//
// Minio (OS; ARCH) LIB/VER APP/VER
const (
libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
)
// NewV2 - instantiate minio client with Amazon S3 signature version
// '2' compatiblity.
func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
if err != nil {
return nil, err
}
// Set to use signature version '2'.
clnt.signature = SignatureV2
return clnt, nil
}
// NewV4 - instantiate minio client with Amazon S3 signature version
// '4' compatibility.
func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
if err != nil {
return nil, err
}
// Set to use signature version '4'.
clnt.signature = SignatureV4
return clnt, nil
}
// New - instantiate minio client Client, adds automatic verification
// of signature.
func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
if err != nil {
return nil, err
}
// Google cloud storage should be set to signature V2, force it if
// not.
if isGoogleEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV2
}
// If Amazon S3 set to signature v2.
if isAmazonEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV4
}
return clnt, nil
}
func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, insecure)
if err != nil {
return nil, err
}
// instantiate new Client.
clnt := new(Client)
clnt.accessKeyID = accessKeyID
clnt.secretAccessKey = secretAccessKey
if clnt.accessKeyID == "" || clnt.secretAccessKey == "" {
clnt.anonymous = true
}
// Save endpoint URL, user agent for future uses.
clnt.endpointURL = endpointURL
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{}
clnt.bucketLocCache = newBucketLocationCache()
// Return.
return clnt, nil
}
// SetAppInfo - add application details to user agent.
func (c *Client) SetAppInfo(appName string, appVersion string) {
// if app name and version is not set, we do not a new user
// agent.
if appName != "" && appVersion != "" {
c.appInfo = struct {
appName string
appVersion string
}{}
c.appInfo.appName = appName
c.appInfo.appVersion = appVersion
}
}
// SetCustomTransport - set new custom transport.
func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
// Set this to override default transport
// ``http.DefaultTransport``.
//
// This transport is usually needed for debugging OR to add your
// own custom TLS certificates on the client transport, for custom
// CA's and certs which are not part of standard certificate
// authority follow this example :-
//
// tr := &http.Transport{
// TLSClientConfig: &tls.Config{RootCAs: pool},
// DisableCompression: true,
// }
// api.SetTransport(tr)
//
if c.httpClient != nil {
c.httpClient.Transport = customHTTPTransport
}
}
// TraceOn - enable HTTP tracing.
func (c *Client) TraceOn(outputStream io.Writer) error {
// if outputStream is nil then default to os.Stdout.
if outputStream == nil {
outputStream = os.Stdout
}
// Sets a new output stream.
c.traceOutput = outputStream
// Enable tracing.
c.isTraceEnabled = true
return nil
}
// TraceOff - disable HTTP tracing.
func (c *Client) TraceOff() {
// Disable tracing.
c.isTraceEnabled = false
}
// requestMetadata - is container for all the values to make a
// request.
type requestMetadata struct {
// If set newRequest presigns the URL.
presignURL bool
// User supplied.
bucketName string
objectName string
queryValues url.Values
customHeader http.Header
expires int64
// Generated by our internal code.
contentBody io.ReadCloser
contentLength int64
contentSHA256Bytes []byte
contentMD5Bytes []byte
}
// Filter out signature value from Authorization header.
func (c Client) filterSignature(req *http.Request) {
// For anonymous requests return here.
if c.anonymous {
return
}
// Handle if Signature V2.
if c.signature.isV2() {
// Set a temporary redacted auth
req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**")
return
}
/// Signature V4 authorization header.
// Save the original auth.
origAuth := req.Header.Get("Authorization")
// Strip out accessKeyID from:
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/")
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
// Strip out 256-bit signature from: Signature=<256-bit signature>
regSign := regexp.MustCompile("Signature=([[0-9a-f]+)")
newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
// Set a temporary redacted auth
req.Header.Set("Authorization", newAuth)
return
}
// dumpHTTP - dump HTTP request and response.
func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// Starts http dump.
_, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
if err != nil {
return err
}
// Filter out Signature field from Authorization header.
c.filterSignature(req)
// Only display request header.
reqTrace, err := httputil.DumpRequestOut(req, false)
if err != nil {
return err
}
// Write request to trace output.
_, err = fmt.Fprint(c.traceOutput, string(reqTrace))
if err != nil {
return err
}
// Only display response header.
var respTrace []byte
// For errors we make sure to dump response body as well.
if resp.StatusCode != http.StatusOK &&
resp.StatusCode != http.StatusPartialContent &&
resp.StatusCode != http.StatusNoContent {
respTrace, err = httputil.DumpResponse(resp, true)
if err != nil {
return err
}
} else {
respTrace, err = httputil.DumpResponse(resp, false)
if err != nil {
return err
}
}
// Write response to trace output.
_, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
if err != nil {
return err
}
// Ends the http dump.
_, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
if err != nil {
return err
}
// Returns success.
return nil
}
// do - execute http request.
func (c Client) do(req *http.Request) (*http.Response, error) {
// execute the request.
resp, err := c.httpClient.Do(req)
if err != nil {
return resp, err
}
// If trace is enabled, dump http request and response.
if c.isTraceEnabled {
err = c.dumpHTTP(req, resp)
if err != nil {
return nil, err
}
}
return resp, nil
}
// newRequest - instantiate a new HTTP request for a given method.
func (c Client) newRequest(method string, metadata requestMetadata) (*http.Request, error) {
// If no method is supplied default to 'POST'.
if method == "" {
method = "POST"
}
// construct a new target URL.
targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.queryValues)
if err != nil {
return nil, err
}
// get a new HTTP request for the method.
req, err := http.NewRequest(method, targetURL.String(), nil)
if err != nil {
return nil, err
}
// Gather location only if bucketName is present.
location := "us-east-1" // Default all other requests to "us-east-1".
if metadata.bucketName != "" {
location, err = c.getBucketLocation(metadata.bucketName)
if err != nil {
return nil, err
}
}
// If presigned request, return quickly.
if metadata.expires != 0 {
if c.anonymous {
return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.")
}
if c.signature.isV2() {
// Presign URL with signature v2.
req = PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
} else {
// Presign URL with signature v4.
req = PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
}
return req, nil
}
// Set content body if available.
if metadata.contentBody != nil {
req.Body = metadata.contentBody
}
// set UserAgent for the request.
c.setUserAgent(req)
// Set all headers.
for k, v := range metadata.customHeader {
req.Header.Set(k, v[0])
}
// set incoming content-length.
if metadata.contentLength > 0 {
req.ContentLength = metadata.contentLength
}
// Set sha256 sum only for non anonymous credentials.
if !c.anonymous {
// set sha256 sum for signature calculation only with
// signature version '4'.
if c.signature.isV4() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
if metadata.contentSHA256Bytes != nil {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSHA256Bytes))
}
}
}
// set md5Sum for content protection.
if metadata.contentMD5Bytes != nil {
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
}
// Sign the request if not anonymous.
if !c.anonymous {
if c.signature.isV2() {
// Add signature version '2' authorization header.
req = SignV2(*req, c.accessKeyID, c.secretAccessKey)
} else if c.signature.isV4() {
// Add signature version '4' authorization header.
req = SignV4(*req, c.accessKeyID, c.secretAccessKey, location)
}
}
// return request.
return req, nil
}
// set User agent.
func (c Client) setUserAgent(req *http.Request) {
req.Header.Set("User-Agent", libraryUserAgent)
if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
}
}
// makeTargetURL make a new target url.
func (c Client) makeTargetURL(bucketName, objectName string, queryValues url.Values) (*url.URL, error) {
urlStr := c.endpointURL.Scheme + "://" + c.endpointURL.Host + "/"
// Make URL only if bucketName is available, otherwise use the
// endpoint URL.
if bucketName != "" {
// If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support
// this.
if isVirtualHostSupported(c.endpointURL) {
urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + c.endpointURL.Host + "/"
if objectName != "" {
urlStr = urlStr + urlEncodePath(objectName)
}
} else {
// If not fall back to using path style.
urlStr = urlStr + bucketName
if objectName != "" {
urlStr = urlStr + "/" + urlEncodePath(objectName)
}
}
}
// If there are any query values, add them to the end.
if len(queryValues) > 0 {
urlStr = urlStr + "?" + queryValues.Encode()
}
u, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
return u, nil
}
// CloudStorageClient - Cloud Storage Client interface.
type CloudStorageClient interface {
// Bucket Read/Write/Stat operations.
MakeBucket(bucketName string, cannedACL BucketACL, location string) error
BucketExists(bucketName string) error
RemoveBucket(bucketName string) error
SetBucketACL(bucketName string, cannedACL BucketACL) error
GetBucketACL(bucketName string) (BucketACL, error)
ListBuckets() ([]BucketInfo, error)
ListObjects(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo
ListIncompleteUploads(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo
// Object Read/Write/Stat operations.
GetObject(bucketName, objectName string) (reader *Object, err error)
PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error)
StatObject(bucketName, objectName string) (ObjectInfo, error)
RemoveObject(bucketName, objectName string) error
RemoveIncompleteUpload(bucketName, objectName string) error
// File to Object API.
FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error)
FGetObject(bucketName, objectName, filePath string) error
// Presigned operations.
PresignedGetObject(bucketName, objectName string, expires time.Duration) (presignedURL string, err error)
PresignedPutObject(bucketName, objectName string, expires time.Duration) (presignedURL string, err error)
PresignedPostPolicy(*PostPolicy) (formData map[string]string, err error)
// Application info.
SetAppInfo(appName, appVersion string)
// Set custom transport.
SetCustomTransport(customTransport http.RoundTripper)
// HTTP tracing methods.
TraceOn(traceOutput io.Writer) error
TraceOff()
}

View File

@ -0,0 +1,835 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio_test
import (
"bytes"
crand "crypto/rand"
"errors"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"testing"
"time"
"github.com/minio/minio-go"
)
func TestGetObjectClosedTwiceV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
st, err := r.Stat()
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if st.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
if err := r.Close(); err != nil {
t.Fatal("Error:", err)
}
if err := r.Close(); err == nil {
t.Fatal("Error: object is already closed, should return error")
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests removing partially uploaded objects.
func TestRemovePartiallyUploadedV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping function tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.NewV2(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Enable tracing, write to stdout.
// c.TraceOn(os.Stderr)
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
reader, writer := io.Pipe()
go func() {
i := 0
for i < 25 {
_, err = io.CopyN(writer, crand.Reader, 128*1024)
if err != nil {
t.Fatal("Error:", err, bucketName)
}
i++
}
writer.CloseWithError(errors.New("Proactively closed to be verified later."))
}()
objectName := bucketName + "-resumable"
_, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
if err == nil {
t.Fatal("Error: PutObject should fail.")
}
if err.Error() != "Proactively closed to be verified later." {
t.Fatal("Error:", err)
}
err = c.RemoveIncompleteUpload(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests resumable file based put object multipart upload.
func TestResumableFPutObjectV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Enable tracing, write to stdout.
// c.TraceOn(os.Stderr)
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
file, err := ioutil.TempFile(os.TempDir(), "resumable")
if err != nil {
t.Fatal("Error:", err)
}
n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
if err != nil {
t.Fatal("Error:", err)
}
if n != int64(11*1024*1024) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
}
objectName := bucketName + "-resumable"
n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
if err != nil {
t.Fatal("Error:", err)
}
if n != int64(11*1024*1024) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
}
// Close the file pro-actively for windows.
file.Close()
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
err = os.Remove(file.Name())
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests resumable put object multipart upload.
func TestResumablePutObjectV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// generate 11MB
buf := make([]byte, 11*1024*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
objectName := bucketName + "-resumable"
reader := bytes.NewReader(buf)
n, err := c.PutObject(bucketName, objectName, reader, "application/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests get object ReaderSeeker interface methods.
func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
st, err := r.Stat()
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if st.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
offset := int64(2048)
n, err = r.Seek(offset, 0)
if err != nil {
t.Fatal("Error:", err, offset)
}
if n != offset {
t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n",
offset, n)
}
n, err = r.Seek(0, 1)
if err != nil {
t.Fatal("Error:", err)
}
if n != offset {
t.Fatalf("Error: number of current seek does not match, want %v, got %v\n",
offset, n)
}
_, err = r.Seek(offset, 2)
if err == nil {
t.Fatal("Error: seek on positive offset for whence '2' should error out")
}
n, err = r.Seek(-offset, 2)
if err != nil {
t.Fatal("Error:", err)
}
if n != 0 {
t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n)
}
var buffer bytes.Buffer
if _, err = io.CopyN(&buffer, r, st.Size); err != nil {
t.Fatal("Error:", err)
}
if !bytes.Equal(buf, buffer.Bytes()) {
t.Fatal("Error: Incorrect read bytes v/s original buffer.")
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests get object ReaderAt interface methods.
func TestGetObjectReadAtFunctionalV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
st, err := r.Stat()
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if st.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
offset := int64(2048)
// Read directly
buf2 := make([]byte, 512)
buf3 := make([]byte, 512)
buf4 := make([]byte, 512)
m, err := r.ReadAt(buf2, offset)
if err != nil {
t.Fatal("Error:", err, st.Size, len(buf2), offset)
}
if m != len(buf2) {
t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2))
}
if !bytes.Equal(buf2, buf[offset:offset+512]) {
t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
}
offset += 512
m, err = r.ReadAt(buf3, offset)
if err != nil {
t.Fatal("Error:", err, st.Size, len(buf3), offset)
}
if m != len(buf3) {
t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3))
}
if !bytes.Equal(buf3, buf[offset:offset+512]) {
t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
}
offset += 512
m, err = r.ReadAt(buf4, offset)
if err != nil {
t.Fatal("Error:", err, st.Size, len(buf4), offset)
}
if m != len(buf4) {
t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4))
}
if !bytes.Equal(buf4, buf[offset:offset+512]) {
t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
}
buf5 := make([]byte, n)
// Read the whole object.
m, err = r.ReadAt(buf5, 0)
if err != nil {
if err != io.EOF {
t.Fatal("Error:", err, len(buf5))
}
}
if m != len(buf5) {
t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5))
}
if !bytes.Equal(buf, buf5) {
t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
}
buf6 := make([]byte, n+1)
// Read the whole object and beyond.
_, err = r.ReadAt(buf6, 0)
if err != nil {
if err != io.EOF {
t.Fatal("Error:", err, len(buf6))
}
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests comprehensive list of all methods.
func TestFunctionalV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable to debug
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate a random file name.
fileName := randString(60, rand.NewSource(time.Now().UnixNano()))
file, err := os.Create(fileName)
if err != nil {
t.Fatal("Error:", err)
}
var totalSize int64
for i := 0; i < 3; i++ {
buf := make([]byte, rand.Intn(1<<19))
n, err := file.Write(buf)
if err != nil {
t.Fatal("Error:", err)
}
totalSize += int64(n)
}
file.Close()
// Verify if bucket exits and you have access.
err = c.BucketExists(bucketName)
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Make the bucket 'public read/write'.
err = c.SetBucketACL(bucketName, "public-read-write")
if err != nil {
t.Fatal("Error:", err)
}
// Get the previously set acl.
acl, err := c.GetBucketACL(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
// ACL must be 'public read/write'.
if acl != minio.BucketACL("public-read-write") {
t.Fatal("Error:", acl)
}
// List all buckets.
buckets, err := c.ListBuckets()
if len(buckets) == 0 {
t.Fatal("Error: list buckets cannot be empty", buckets)
}
if err != nil {
t.Fatal("Error:", err)
}
// Verify if previously created bucket is listed in list buckets.
bucketFound := false
for _, bucket := range buckets {
if bucket.Name == bucketName {
bucketFound = true
}
}
// If bucket not found error out.
if !bucketFound {
t.Fatal("Error: bucket ", bucketName, "not found")
}
objectName := bucketName + "unique"
// Generate data
buf := make([]byte, rand.Intn(1<<19))
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error: ", err)
}
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
if err != nil {
t.Fatal("Error: ", err)
}
if n != int64(len(buf)) {
t.Fatal("Error: bad length ", n, len(buf))
}
n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName+"-nolength")
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// Instantiate a done channel to close all listing.
doneCh := make(chan struct{})
defer close(doneCh)
objFound := false
isRecursive := true // Recursive is true.
for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
if obj.Key == objectName {
objFound = true
break
}
}
if !objFound {
t.Fatal("Error: object " + objectName + " not found.")
}
incompObjNotFound := true
for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
if objIncompl.Key != "" {
incompObjNotFound = false
break
}
}
if !incompObjNotFound {
t.Fatal("Error: unexpected dangling incomplete upload found.")
}
newReader, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
newReadBytes, err := ioutil.ReadAll(newReader)
if err != nil {
t.Fatal("Error: ", err)
}
if !bytes.Equal(newReadBytes, buf) {
t.Fatal("Error: bytes mismatch.")
}
err = c.FGetObject(bucketName, objectName, fileName+"-f")
if err != nil {
t.Fatal("Error: ", err)
}
presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second)
if err != nil {
t.Fatal("Error: ", err)
}
resp, err := http.Get(presignedGetURL)
if err != nil {
t.Fatal("Error: ", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatal("Error: ", resp.Status)
}
newPresignedBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal("Error: ", err)
}
if !bytes.Equal(newPresignedBytes, buf) {
t.Fatal("Error: bytes mismatch.")
}
presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
if err != nil {
t.Fatal("Error: ", err)
}
buf = make([]byte, rand.Intn(1<<20))
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error: ", err)
}
req, err := http.NewRequest("PUT", presignedPutURL, bytes.NewReader(buf))
if err != nil {
t.Fatal("Error: ", err)
}
httpClient := &http.Client{}
resp, err = httpClient.Do(req)
if err != nil {
t.Fatal("Error: ", err)
}
newReader, err = c.GetObject(bucketName, objectName+"-presigned")
if err != nil {
t.Fatal("Error: ", err)
}
newReadBytes, err = ioutil.ReadAll(newReader)
if err != nil {
t.Fatal("Error: ", err)
}
if !bytes.Equal(newReadBytes, buf) {
t.Fatal("Error: bytes mismatch.")
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveObject(bucketName, objectName+"-f")
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveObject(bucketName, objectName+"-nolength")
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveObject(bucketName, objectName+"-presigned")
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
err = c.RemoveBucket(bucketName)
if err == nil {
t.Fatal("Error:")
}
if err.Error() != "The specified bucket does not exist" {
t.Fatal("Error: ", err)
}
if err = os.Remove(fileName); err != nil {
t.Fatal("Error: ", err)
}
if err = os.Remove(fileName + "-f"); err != nil {
t.Fatal("Error: ", err)
}
}

View File

@ -0,0 +1,859 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio_test
import (
"bytes"
crand "crypto/rand"
"errors"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"testing"
"time"
"github.com/minio/minio-go"
)
const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
func randString(n int, src rand.Source) string {
b := make([]byte, n)
// A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b[0:30])
}
func TestGetObjectClosedTwice(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
st, err := r.Stat()
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if st.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
if err := r.Close(); err != nil {
t.Fatal("Error:", err)
}
if err := r.Close(); err == nil {
t.Fatal("Error: object is already closed, should return error")
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests removing partially uploaded objects.
func TestRemovePartiallyUploaded(t *testing.T) {
if testing.Short() {
t.Skip("skipping function tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Enable tracing, write to stdout.
// c.TraceOn(os.Stderr)
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
reader, writer := io.Pipe()
go func() {
i := 0
for i < 25 {
_, err = io.CopyN(writer, crand.Reader, 128*1024)
if err != nil {
t.Fatal("Error:", err, bucketName)
}
i++
}
writer.CloseWithError(errors.New("Proactively closed to be verified later."))
}()
objectName := bucketName + "-resumable"
_, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
if err == nil {
t.Fatal("Error: PutObject should fail.")
}
if err.Error() != "Proactively closed to be verified later." {
t.Fatal("Error:", err)
}
err = c.RemoveIncompleteUpload(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests resumable file based put object multipart upload.
func TestResumableFPutObject(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Enable tracing, write to stdout.
// c.TraceOn(os.Stderr)
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
file, err := ioutil.TempFile(os.TempDir(), "resumable")
if err != nil {
t.Fatal("Error:", err)
}
n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
if err != nil {
t.Fatal("Error:", err)
}
if n != int64(11*1024*1024) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
}
objectName := bucketName + "-resumable"
n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
if err != nil {
t.Fatal("Error:", err)
}
if n != int64(11*1024*1024) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
}
// Close the file pro-actively for windows.
file.Close()
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
err = os.Remove(file.Name())
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests resumable put object multipart upload.
func TestResumablePutObject(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate 11MB
buf := make([]byte, 11*1024*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
objectName := bucketName + "-resumable"
reader := bytes.NewReader(buf)
n, err := c.PutObject(bucketName, objectName, reader, "application/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests get object ReaderSeeker interface methods.
func TestGetObjectReadSeekFunctional(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
st, err := r.Stat()
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if st.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
offset := int64(2048)
n, err = r.Seek(offset, 0)
if err != nil {
t.Fatal("Error:", err, offset)
}
if n != offset {
t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n",
offset, n)
}
n, err = r.Seek(0, 1)
if err != nil {
t.Fatal("Error:", err)
}
if n != offset {
t.Fatalf("Error: number of current seek does not match, want %v, got %v\n",
offset, n)
}
_, err = r.Seek(offset, 2)
if err == nil {
t.Fatal("Error: seek on positive offset for whence '2' should error out")
}
n, err = r.Seek(-offset, 2)
if err != nil {
t.Fatal("Error:", err)
}
if n != 0 {
t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n)
}
var buffer bytes.Buffer
if _, err = io.CopyN(&buffer, r, st.Size); err != nil {
t.Fatal("Error:", err)
}
if !bytes.Equal(buf, buffer.Bytes()) {
t.Fatal("Error: Incorrect read bytes v/s original buffer.")
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests get object ReaderAt interface methods.
func TestGetObjectReadAtFunctional(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
st, err := r.Stat()
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if st.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
offset := int64(2048)
// read directly
buf2 := make([]byte, 512)
buf3 := make([]byte, 512)
buf4 := make([]byte, 512)
m, err := r.ReadAt(buf2, offset)
if err != nil {
t.Fatal("Error:", err, st.Size, len(buf2), offset)
}
if m != len(buf2) {
t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2))
}
if !bytes.Equal(buf2, buf[offset:offset+512]) {
t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
}
offset += 512
m, err = r.ReadAt(buf3, offset)
if err != nil {
t.Fatal("Error:", err, st.Size, len(buf3), offset)
}
if m != len(buf3) {
t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3))
}
if !bytes.Equal(buf3, buf[offset:offset+512]) {
t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
}
offset += 512
m, err = r.ReadAt(buf4, offset)
if err != nil {
t.Fatal("Error:", err, st.Size, len(buf4), offset)
}
if m != len(buf4) {
t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4))
}
if !bytes.Equal(buf4, buf[offset:offset+512]) {
t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
}
buf5 := make([]byte, n)
// Read the whole object.
m, err = r.ReadAt(buf5, 0)
if err != nil {
if err != io.EOF {
t.Fatal("Error:", err, len(buf5))
}
}
if m != len(buf5) {
t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5))
}
if !bytes.Equal(buf, buf5) {
t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
}
buf6 := make([]byte, n+1)
// Read the whole object and beyond.
_, err = r.ReadAt(buf6, 0)
if err != nil {
if err != io.EOF {
t.Fatal("Error:", err, len(buf6))
}
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests comprehensive list of all methods.
func TestFunctional(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable to debug
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate a random file name.
fileName := randString(60, rand.NewSource(time.Now().UnixNano()))
file, err := os.Create(fileName)
if err != nil {
t.Fatal("Error:", err)
}
var totalSize int64
for i := 0; i < 3; i++ {
buf := make([]byte, rand.Intn(1<<19))
n, err := file.Write(buf)
if err != nil {
t.Fatal("Error:", err)
}
totalSize += int64(n)
}
file.Close()
// Verify if bucket exits and you have access.
err = c.BucketExists(bucketName)
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Make the bucket 'public read/write'.
err = c.SetBucketACL(bucketName, "public-read-write")
if err != nil {
t.Fatal("Error:", err)
}
// Get the previously set acl.
acl, err := c.GetBucketACL(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
// ACL must be 'public read/write'.
if acl != minio.BucketACL("public-read-write") {
t.Fatal("Error:", acl)
}
// List all buckets.
buckets, err := c.ListBuckets()
if len(buckets) == 0 {
t.Fatal("Error: list buckets cannot be empty", buckets)
}
if err != nil {
t.Fatal("Error:", err)
}
// Verify if previously created bucket is listed in list buckets.
bucketFound := false
for _, bucket := range buckets {
if bucket.Name == bucketName {
bucketFound = true
}
}
// If bucket not found error out.
if !bucketFound {
t.Fatal("Error: bucket ", bucketName, "not found")
}
objectName := bucketName + "unique"
// Generate data
buf := make([]byte, rand.Intn(1<<19))
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error: ", err)
}
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
if err != nil {
t.Fatal("Error: ", err)
}
if n != int64(len(buf)) {
t.Fatal("Error: bad length ", n, len(buf))
}
n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName+"-nolength")
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// Instantiate a done channel to close all listing.
doneCh := make(chan struct{})
defer close(doneCh)
objFound := false
isRecursive := true // Recursive is true.
for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
if obj.Key == objectName {
objFound = true
break
}
}
if !objFound {
t.Fatal("Error: object " + objectName + " not found.")
}
incompObjNotFound := true
for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
if objIncompl.Key != "" {
incompObjNotFound = false
break
}
}
if !incompObjNotFound {
t.Fatal("Error: unexpected dangling incomplete upload found.")
}
newReader, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
newReadBytes, err := ioutil.ReadAll(newReader)
if err != nil {
t.Fatal("Error: ", err)
}
if !bytes.Equal(newReadBytes, buf) {
t.Fatal("Error: bytes mismatch.")
}
err = c.FGetObject(bucketName, objectName, fileName+"-f")
if err != nil {
t.Fatal("Error: ", err)
}
presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second)
if err != nil {
t.Fatal("Error: ", err)
}
resp, err := http.Get(presignedGetURL)
if err != nil {
t.Fatal("Error: ", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatal("Error: ", resp.Status)
}
newPresignedBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal("Error: ", err)
}
if !bytes.Equal(newPresignedBytes, buf) {
t.Fatal("Error: bytes mismatch.")
}
presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
if err != nil {
t.Fatal("Error: ", err)
}
buf = make([]byte, rand.Intn(1<<20))
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error: ", err)
}
req, err := http.NewRequest("PUT", presignedPutURL, bytes.NewReader(buf))
if err != nil {
t.Fatal("Error: ", err)
}
httpClient := &http.Client{}
resp, err = httpClient.Do(req)
if err != nil {
t.Fatal("Error: ", err)
}
newReader, err = c.GetObject(bucketName, objectName+"-presigned")
if err != nil {
t.Fatal("Error: ", err)
}
newReadBytes, err = ioutil.ReadAll(newReader)
if err != nil {
t.Fatal("Error: ", err)
}
if !bytes.Equal(newReadBytes, buf) {
t.Fatal("Error: bytes mismatch.")
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveObject(bucketName, objectName+"-f")
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveObject(bucketName, objectName+"-nolength")
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveObject(bucketName, objectName+"-presigned")
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
err = c.RemoveBucket(bucketName)
if err == nil {
t.Fatal("Error:")
}
if err.Error() != "The specified bucket does not exist" {
t.Fatal("Error: ", err)
}
if err = os.Remove(fileName); err != nil {
t.Fatal("Error: ", err)
}
if err = os.Remove(fileName + "-f"); err != nil {
t.Fatal("Error: ", err)
}
}

View File

@ -0,0 +1,364 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"fmt"
"net/http"
"net/url"
"strings"
"testing"
)
func TestEncodeURL2Path(t *testing.T) {
type urlStrings struct {
objName string
encodedObjName string
}
bucketName := "bucketName"
want := []urlStrings{
{
objName: "本語",
encodedObjName: "%E6%9C%AC%E8%AA%9E",
},
{
objName: "本語.1",
encodedObjName: "%E6%9C%AC%E8%AA%9E.1",
},
{
objName: ">123>3123123",
encodedObjName: "%3E123%3E3123123",
},
{
objName: "test 1 2.txt",
encodedObjName: "test%201%202.txt",
},
{
objName: "test++ 1.txt",
encodedObjName: "test%2B%2B%201.txt",
},
}
for _, o := range want {
u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName))
if err != nil {
t.Fatal("Error:", err)
}
urlPath := "/" + bucketName + "/" + o.encodedObjName
if urlPath != encodeURL2Path(u) {
t.Fatal("Error")
}
}
}
func TestErrorResponse(t *testing.T) {
var err error
err = ErrorResponse{
Code: "Testing",
}
errResp := ToErrorResponse(err)
if errResp.Code != "Testing" {
t.Fatal("Type conversion failed, we have an empty struct.")
}
// Test http response decoding.
var httpResponse *http.Response
// Set empty variables
httpResponse = nil
var bucketName, objectName string
// Should fail with invalid argument.
err = HTTPRespToErrorResponse(httpResponse, bucketName, objectName)
errResp = ToErrorResponse(err)
if errResp.Code != "InvalidArgument" {
t.Fatal("Empty response input should return invalid argument.")
}
}
func TestSignatureCalculation(t *testing.T) {
req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil)
if err != nil {
t.Fatal("Error:", err)
}
req = SignV4(*req, "", "", "us-east-1")
if req.Header.Get("Authorization") != "" {
t.Fatal("Error: anonymous credentials should not have Authorization header.")
}
req = PreSignV4(*req, "", "", "us-east-1", 0)
if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
}
req = SignV2(*req, "", "")
if req.Header.Get("Authorization") != "" {
t.Fatal("Error: anonymous credentials should not have Authorization header.")
}
req = PreSignV2(*req, "", "", 0)
if strings.Contains(req.URL.RawQuery, "Signature") {
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
}
req = SignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1")
if req.Header.Get("Authorization") == "" {
t.Fatal("Error: normal credentials should have Authorization header.")
}
req = PreSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0)
if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
t.Fatal("Error: normal credentials should have Signature query resource.")
}
req = SignV2(*req, "ACCESS-KEY", "SECRET-KEY")
if req.Header.Get("Authorization") == "" {
t.Fatal("Error: normal credentials should have Authorization header.")
}
req = PreSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0)
if !strings.Contains(req.URL.RawQuery, "Signature") {
t.Fatal("Error: normal credentials should not have Signature query resource.")
}
}
func TestSignatureType(t *testing.T) {
clnt := Client{}
if !clnt.signature.isV4() {
t.Fatal("Error")
}
clnt.signature = SignatureV2
if !clnt.signature.isV2() {
t.Fatal("Error")
}
if clnt.signature.isV4() {
t.Fatal("Error")
}
clnt.signature = SignatureV4
if !clnt.signature.isV4() {
t.Fatal("Error")
}
}
func TestACLTypes(t *testing.T) {
want := map[string]bool{
"private": true,
"public-read": true,
"public-read-write": true,
"authenticated-read": true,
"invalid": false,
}
for acl, ok := range want {
if BucketACL(acl).isValidBucketACL() != ok {
t.Fatal("Error")
}
}
}
func TestPartSize(t *testing.T) {
var maxPartSize int64 = 1024 * 1024 * 1024 * 5
partSize := optimalPartSize(5000000000000000000)
if partSize > minimumPartSize {
if partSize > maxPartSize {
t.Fatal("invalid result, cannot be bigger than maxPartSize 5GiB")
}
}
partSize = optimalPartSize(50000000000)
if partSize > minimumPartSize {
t.Fatal("invalid result, cannot be bigger than minimumPartSize 5MiB")
}
}
func TestURLEncoding(t *testing.T) {
type urlStrings struct {
name string
encodedName string
}
want := []urlStrings{
{
name: "bigfile-1._%",
encodedName: "bigfile-1._%25",
},
{
name: "本語",
encodedName: "%E6%9C%AC%E8%AA%9E",
},
{
name: "本語.1",
encodedName: "%E6%9C%AC%E8%AA%9E.1",
},
{
name: ">123>3123123",
encodedName: "%3E123%3E3123123",
},
{
name: "test 1 2.txt",
encodedName: "test%201%202.txt",
},
{
name: "test++ 1.txt",
encodedName: "test%2B%2B%201.txt",
},
}
for _, u := range want {
if u.encodedName != urlEncodePath(u.name) {
t.Fatal("Error")
}
}
}
func TestGetEndpointURL(t *testing.T) {
if _, err := getEndpointURL("s3.amazonaws.com", false); err != nil {
t.Fatal("Error:", err)
}
if _, err := getEndpointURL("192.168.1.1", false); err != nil {
t.Fatal("Error:", err)
}
if _, err := getEndpointURL("13333.123123.-", false); err == nil {
t.Fatal("Error")
}
if _, err := getEndpointURL("s3.aamzza.-", false); err == nil {
t.Fatal("Error")
}
if _, err := getEndpointURL("s3.amazonaws.com:443", false); err == nil {
t.Fatal("Error")
}
}
func TestValidIP(t *testing.T) {
type validIP struct {
ip string
valid bool
}
want := []validIP{
{
ip: "192.168.1.1",
valid: true,
},
{
ip: "192.1.8",
valid: false,
},
{
ip: "..192.",
valid: false,
},
{
ip: "192.168.1.1.1",
valid: false,
},
}
for _, w := range want {
valid := isValidIP(w.ip)
if valid != w.valid {
t.Fatal("Error")
}
}
}
func TestValidEndpointDomain(t *testing.T) {
type validEndpoint struct {
endpointDomain string
valid bool
}
want := []validEndpoint{
{
endpointDomain: "s3.amazonaws.com",
valid: true,
},
{
endpointDomain: "s3.amazonaws.com_",
valid: false,
},
{
endpointDomain: "%$$$",
valid: false,
},
{
endpointDomain: "s3.amz.test.com",
valid: true,
},
{
endpointDomain: "s3.%%",
valid: false,
},
{
endpointDomain: "localhost",
valid: true,
},
{
endpointDomain: "-localhost",
valid: false,
},
{
endpointDomain: "",
valid: false,
},
{
endpointDomain: "\n \t",
valid: false,
},
{
endpointDomain: " ",
valid: false,
},
}
for _, w := range want {
valid := isValidDomain(w.endpointDomain)
if valid != w.valid {
t.Fatal("Error:", w.endpointDomain)
}
}
}
func TestValidEndpointURL(t *testing.T) {
type validURL struct {
url string
valid bool
}
want := []validURL{
{
url: "https://s3.amazonaws.com",
valid: true,
},
{
url: "https://s3.amazonaws.com/bucket/object",
valid: false,
},
{
url: "192.168.1.1",
valid: false,
},
}
for _, w := range want {
u, err := url.Parse(w.url)
if err != nil {
t.Fatal("Error:", err)
}
valid := false
if err := isValidEndpointURL(u); err == nil {
valid = true
}
if valid != w.valid {
t.Fatal("Error")
}
}
}

View File

@ -0,0 +1,36 @@
# version format
version: "{build}"
# Operating system (build VM template)
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\minio\minio-go
# environment variables
environment:
GOPATH: c:\gopath
GO15VENDOREXPERIMENT: 1
# scripts that run after cloning repository
install:
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- go version
- go env
- go get -u github.com/golang/lint/golint
- go get -u golang.org/x/tools/cmd/vet
- go get -u github.com/remyoudompheng/go-misc/deadcode
# to run your custom scripts instead of automatic MSBuild
build_script:
- go vet ./...
- gofmt -s -l .
- golint github.com/minio/minio-go...
- deadcode
- go test -short -v
- go test -short -race -v
# to disable automatic tests
test: off
# to disable deployment
deploy: off

View File

@ -0,0 +1,75 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
// BucketACL - Bucket level access control.
type BucketACL string
// Different types of ACL's currently supported for buckets.
const (
bucketPrivate = BucketACL("private")
bucketReadOnly = BucketACL("public-read")
bucketPublic = BucketACL("public-read-write")
bucketAuthenticated = BucketACL("authenticated-read")
)
// Stringify acl.
func (b BucketACL) String() string {
if string(b) == "" {
return "private"
}
return string(b)
}
// isValidBucketACL - Is provided acl string supported.
func (b BucketACL) isValidBucketACL() bool {
switch true {
case b.isPrivate():
fallthrough
case b.isReadOnly():
fallthrough
case b.isPublic():
fallthrough
case b.isAuthenticated():
return true
case b.String() == "private":
// By default its "private"
return true
default:
return false
}
}
// isPrivate - Is acl Private.
func (b BucketACL) isPrivate() bool {
return b == bucketPrivate
}
// isPublicRead - Is acl PublicRead.
func (b BucketACL) isReadOnly() bool {
return b == bucketReadOnly
}
// isPublicReadWrite - Is acl PublicReadWrite.
func (b BucketACL) isPublic() bool {
return b == bucketPublic
}
// isAuthenticated - Is acl AuthenticatedRead.
func (b BucketACL) isAuthenticated() bool {
return b == bucketAuthenticated
}

View File

@ -0,0 +1,154 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/hex"
"net/http"
"net/url"
"path/filepath"
"sync"
)
// bucketLocationCache - Provides simple mechansim to hold bucket
// locations in memory.
type bucketLocationCache struct {
// mutex is used for handling the concurrent
// read/write requests for cache.
sync.RWMutex
// items holds the cached bucket locations.
items map[string]string
}
// newBucketLocationCache - Provides a new bucket location cache to be
// used internally with the client object.
func newBucketLocationCache() *bucketLocationCache {
return &bucketLocationCache{
items: make(map[string]string),
}
}
// Get - Returns a value of a given key if it exists.
func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
r.RLock()
defer r.RUnlock()
location, ok = r.items[bucketName]
return
}
// Set - Will persist a value into cache.
func (r *bucketLocationCache) Set(bucketName string, location string) {
r.Lock()
defer r.Unlock()
r.items[bucketName] = location
}
// Delete - Deletes a bucket name from cache.
func (r *bucketLocationCache) Delete(bucketName string) {
r.Lock()
defer r.Unlock()
delete(r.items, bucketName)
}
// getBucketLocation - Get location for the bucketName from location map cache.
func (c Client) getBucketLocation(bucketName string) (string, error) {
// For anonymous requests, default to "us-east-1" and let other calls
// move forward.
if c.anonymous {
return "us-east-1", nil
}
if location, ok := c.bucketLocCache.Get(bucketName); ok {
return location, nil
}
// Initialize a new request.
req, err := c.getBucketLocationRequest(bucketName)
if err != nil {
return "", err
}
// Initiate the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return "", err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return "", HTTPRespToErrorResponse(resp, bucketName, "")
}
}
// Extract location.
var locationConstraint string
err = xmlDecoder(resp.Body, &locationConstraint)
if err != nil {
return "", err
}
location := locationConstraint
// Location is empty will be 'us-east-1'.
if location == "" {
location = "us-east-1"
}
// Location can be 'EU' convert it to meaningful 'eu-west-1'.
if location == "EU" {
location = "eu-west-1"
}
// Save the location into cache.
c.bucketLocCache.Set(bucketName, location)
// Return.
return location, nil
}
// getBucketLocationRequest - Wrapper creates a new getBucketLocation request.
func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) {
// Set location query.
urlValues := make(url.Values)
urlValues.Set("location", "")
// Set get bucket location always as path style.
targetURL := c.endpointURL
targetURL.Path = filepath.Join(bucketName, "")
targetURL.RawQuery = urlValues.Encode()
// Get a new HTTP request for the method.
req, err := http.NewRequest("GET", targetURL.String(), nil)
if err != nil {
return nil, err
}
// Set UserAgent for the request.
c.setUserAgent(req)
// Set sha256 sum for signature calculation only with signature version '4'.
if c.signature.isV4() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
}
// Sign the request.
if c.signature.isV4() {
req = SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
req = SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
return req, nil
}

View File

@ -0,0 +1,42 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
/// Multipart upload defaults.
// minimumPartSize - minimum part size 5MiB per object after which
// putObject behaves internally as multipart.
const minimumPartSize = 1024 * 1024 * 5
// maxParts - maximum parts for a single multipart session.
const maxParts = 10000
// maxPartSize - maximum part size 5GiB for a single multipart upload
// operation.
const maxPartSize = 1024 * 1024 * 1024 * 5
// maxSinglePutObjectSize - maximum size 5GiB of object per PUT
// operation.
const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
// maxMultipartPutObjectSize - maximum size 5TiB of object for
// Multipart operation.
const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
// optimalReadAtBufferSize - optimal buffer 5MiB used for reading
// through ReadAt operation.
const optimalReadAtBufferSize = 1024 * 1024 * 5

View File

@ -0,0 +1,46 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname is a dummy value, please replace them with original value.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
err = s3Client.BucketExists("my-bucketname")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,44 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname, my-objectname and my-filename.csv are dummy values, please replace them with original values.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
if err := s3Client.FGetObject("bucket-name", "objectName", "fileName.csv"); err != nil {
log.Fatalln(err)
}
log.Println("Successfully saved fileName.csv")
}

View File

@ -0,0 +1,44 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname, my-objectname and my-filename.csv are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", "application/csv"); err != nil {
log.Fatalln(err)
}
log.Println("Successfully uploaded my-filename.csv")
}

View File

@ -0,0 +1,46 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname is a dummy value, please replace them with original value.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
acl, err := s3Client.GetBucketACL("my-bucketname")
if err != nil {
log.Fatalln(err)
}
log.Println(acl)
}

View File

@ -0,0 +1,68 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"io"
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
defer reader.Close()
reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
defer reader.Close()
localFile, err := os.Create("my-testfile")
if err != nil {
log.Fatalln(err)
}
defer localfile.Close()
stat, err := reader.Stat()
if err != nil {
log.Fatalln(err)
}
if _, err := io.CopyN(localFile, reader, stat.Size); err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,45 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
buckets, err := s3Client.ListBuckets()
if err != nil {
log.Fatalln(err)
}
for _, bucket := range buckets {
log.Println(bucket)
}
}

View File

@ -0,0 +1,56 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname and my-prefixname are dummy values, please replace them with original values.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
// Create a done channel to control 'ListObjects' go routine.
doneCh := make(struct{})
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
// List all multipart uploads from a bucket-name with a matching prefix.
for multipartObject := range s3Client.ListIncompleteUploads("my-bucketname", "my-prefixname", true, doneCh) {
if multipartObject.Err != nil {
fmt.Println(multipartObject.Err)
return
}
fmt.Println(multipartObject)
}
return
}

View File

@ -0,0 +1,56 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname and my-prefixname are dummy values, please replace them with original values.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
// Create a done channel to control 'ListObjects' go routine.
doneCh := make(struct{})
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
// List all objects from a bucket-name with a matching prefix.
for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) {
if object.Err != nil {
fmt.Println(object.Err)
return
}
fmt.Println(object)
}
return
}

View File

@ -0,0 +1,45 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname is a dummy value, please replace them with original value.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
err = s3Client.MakeBucket("my-bucketname", minio.BucketACL("private"), "us-east-1")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,46 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"time"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
if err != nil {
log.Fatalln(err)
}
log.Println(presignedURL)
}

View File

@ -0,0 +1,56 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"log"
"time"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
policy := minio.NewPostPolicy()
policy.SetBucket("my-bucketname")
policy.SetKey("my-objectname")
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
m, err := s3Client.PresignedPostPolicy(policy)
if err != nil {
log.Fatalln(err)
}
fmt.Printf("curl ")
for k, v := range m {
fmt.Printf("-F %s=%s ", k, v)
}
fmt.Printf("-F file=@/etc/bashrc ")
fmt.Printf("https://play.minio.io:9002/my-bucketname\n")
}

View File

@ -0,0 +1,46 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"time"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
presignedURL, err := s3Client.PresignedPutObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
if err != nil {
log.Fatalln(err)
}
log.Println(presignedURL)
}

View File

@ -0,0 +1,52 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
object, err := os.Open("my-testfile")
if err != nil {
log.Fatalln(err)
}
defer object.Close()
n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
if err != nil {
log.Fatalln(err)
}
log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
}

View File

@ -0,0 +1,46 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname is a dummy value, please replace them with original value.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
// This operation will only work if your bucket is empty.
err = s3Client.RemoveBucket("my-bucketname")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,46 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
for err := range s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname") {
if err != nil {
log.Fatalln(err)
}
}
log.Println("Success")
}

View File

@ -0,0 +1,44 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
err = s3Client.RemoveObject("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,45 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname is a dummy value, please replace them with original value.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
err = s3Client.SetBucketACL("my-bucketname", minio.BucketACL("public-read-write"))
if err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,44 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
// Requests are always secure by default. set inSecure=true to enable insecure access.
// inSecure boolean is the last argument for New().
// New provides a client object backend by automatically detected signature type based
// on the provider.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
stat, err := s3Client.StatObject("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
log.Println(stat)
}

View File

@ -0,0 +1,47 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
err = s3Client.BucketExists("my-bucketname")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,45 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
// and my-filename.csv are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv"); err != nil {
log.Fatalln(err)
}
log.Println("Successfully saved my-filename.csv")
}

View File

@ -0,0 +1,45 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
// and my-filename.csv are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", "application/csv"); err != nil {
log.Fatalln(err)
}
log.Println("Successfully uploaded my-filename.csv")
}

View File

@ -0,0 +1,47 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
acl, err := s3Client.GetBucketACL("my-bucketname")
if err != nil {
log.Fatalln(err)
}
log.Println(acl)
}

View File

@ -0,0 +1,63 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"io"
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
// my-testfile are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false)
if err != nil {
log.Fatalln(err)
}
reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
defer reader.Close()
localFile, err := os.Create("my-testfile")
if err != nil {
log.Fatalln(err)
}
defer localfile.Close()
stat, err := reader.Stat()
if err != nil {
log.Fatalln(err)
}
if _, err := io.CopyN(localFile, reader, stat.Size); err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,48 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID and YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
buckets, err := s3Client.ListBuckets()
if err != nil {
log.Fatalln(err)
}
for _, bucket := range buckets {
log.Println(bucket)
}
}

View File

@ -0,0 +1,57 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
// are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
// Create a done channel to control 'ListObjects' go routine.
doneCh := make(struct{})
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
// List all multipart uploads from a bucket-name with a matching prefix.
for multipartObject := range s3Client.ListIncompleteUploads("my-bucketname", "my-prefixname", true, doneCh) {
if multipartObject.Err != nil {
fmt.Println(multipartObject.Err)
return
}
fmt.Println(multipartObject)
}
return
}

View File

@ -0,0 +1,57 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
// are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
fmt.Println(err)
return
}
// Create a done channel to control 'ListObjects' go routine.
doneCh := make(struct{})
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
// List all objects from a bucket-name with a matching prefix.
for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) {
if object.Err != nil {
fmt.Println(object.Err)
return
}
fmt.Println(object)
}
return
}

View File

@ -0,0 +1,46 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
err = s3Client.MakeBucket("my-bucketname", minio.BucketACL("private"), "us-east-1")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,47 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"time"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
// are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
if err != nil {
log.Fatalln(err)
}
log.Println(presignedURL)
}

View File

@ -0,0 +1,57 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"log"
"time"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
// are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
policy := minio.NewPostPolicy()
policy.SetBucket("my-bucketname")
policy.SetKey("my-objectname")
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
m, err := s3Client.PresignedPostPolicy(policy)
if err != nil {
log.Fatalln(err)
}
fmt.Printf("curl ")
for k, v := range m {
fmt.Printf("-F %s=%s ", k, v)
}
fmt.Printf("-F file=@/etc/bash.bashrc ")
fmt.Printf("https://my-bucketname.s3.amazonaws.com\n")
}

View File

@ -0,0 +1,47 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"time"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
// are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
presignedURL, err := s3Client.PresignedPutObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
if err != nil {
log.Fatalln(err)
}
log.Println(presignedURL)
}

View File

@ -0,0 +1,53 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
// my-objectname are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
object, err := os.Open("my-testfile")
if err != nil {
log.Fatalln(err)
}
defer object.Close()
n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
if err != nil {
log.Fatalln(err)
}
log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
}

View File

@ -0,0 +1,48 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
// This operation will only work if your bucket is empty.
err = s3Client.RemoveBucket("my-bucketname")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,47 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
// are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
for err := range s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname") {
if err != nil {
log.Fatalln(err)
}
}
log.Println("Success")
}

View File

@ -0,0 +1,45 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
// are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
err = s3Client.RemoveObject("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
}

View File

@ -0,0 +1,46 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname
// are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
err = s3Client.SetBucketACL("my-bucketname", minio.BucketACL("public-read-write"))
if err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,45 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
// are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
stat, err := s3Client.StatObject("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
log.Println(stat)
}

View File

@ -0,0 +1,191 @@
package minio
import (
"encoding/base64"
"fmt"
"strings"
"time"
)
// expirationDateFormat date format for expiration key in json policy.
const expirationDateFormat = "2006-01-02T15:04:05.999Z"
// policyCondition explanation:
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
//
// Example:
//
// policyCondition {
// matchType: "$eq",
// key: "$Content-Type",
// value: "image/png",
// }
//
type policyCondition struct {
matchType string
condition string
value string
}
// PostPolicy - Provides strict static type conversion and validation
// for Amazon S3's POST policy JSON string.
type PostPolicy struct {
// Expiration date and time of the POST policy.
expiration time.Time
// Collection of different policy conditions.
conditions []policyCondition
// ContentLengthRange minimum and maximum allowable size for the
// uploaded content.
contentLengthRange struct {
min int64
max int64
}
// Post form data.
formData map[string]string
}
// NewPostPolicy - Instantiate new post policy.
func NewPostPolicy() *PostPolicy {
p := &PostPolicy{}
p.conditions = make([]policyCondition, 0)
p.formData = make(map[string]string)
return p
}
// SetExpires - Sets expiration time for the new policy.
func (p *PostPolicy) SetExpires(t time.Time) error {
if t.IsZero() {
return ErrInvalidArgument("No expiry time set.")
}
p.expiration = t
return nil
}
// SetKey - Sets an object name for the policy based upload.
func (p *PostPolicy) SetKey(key string) error {
if strings.TrimSpace(key) == "" || key == "" {
return ErrInvalidArgument("Object name is empty.")
}
policyCond := policyCondition{
matchType: "eq",
condition: "$key",
value: key,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["key"] = key
return nil
}
// SetKeyStartsWith - Sets an object name that an policy based upload
// can start with.
func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" {
return ErrInvalidArgument("Object prefix is empty.")
}
policyCond := policyCondition{
matchType: "starts-with",
condition: "$key",
value: keyStartsWith,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["key"] = keyStartsWith
return nil
}
// SetBucket - Sets bucket at which objects will be uploaded to.
func (p *PostPolicy) SetBucket(bucketName string) error {
if strings.TrimSpace(bucketName) == "" || bucketName == "" {
return ErrInvalidArgument("Bucket name is empty.")
}
policyCond := policyCondition{
matchType: "eq",
condition: "$bucket",
value: bucketName,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["bucket"] = bucketName
return nil
}
// SetContentType - Sets content-type of the object for this policy
// based upload.
func (p *PostPolicy) SetContentType(contentType string) error {
if strings.TrimSpace(contentType) == "" || contentType == "" {
return ErrInvalidArgument("No content type specified.")
}
policyCond := policyCondition{
matchType: "eq",
condition: "$Content-Type",
value: contentType,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["Content-Type"] = contentType
return nil
}
// SetContentLengthRange - Set new min and max content length
// condition for all incoming uploads.
func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
if min > max {
return ErrInvalidArgument("Minimum limit is larger than maximum limit.")
}
if min < 0 {
return ErrInvalidArgument("Minimum limit cannot be negative.")
}
if max < 0 {
return ErrInvalidArgument("Maximum limit cannot be negative.")
}
p.contentLengthRange.min = min
p.contentLengthRange.max = max
return nil
}
// addNewPolicy - internal helper to validate adding new policies.
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
return ErrInvalidArgument("Policy fields are empty.")
}
p.conditions = append(p.conditions, policyCond)
return nil
}
// Stringer interface for printing policy in json formatted string.
func (p PostPolicy) String() string {
return string(p.marshalJSON())
}
// marshalJSON - Provides Marshalled JSON in bytes.
func (p PostPolicy) marshalJSON() []byte {
expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
var conditionsStr string
conditions := []string{}
for _, po := range p.conditions {
conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value))
}
if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 {
conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]",
p.contentLengthRange.min, p.contentLengthRange.max))
}
if len(conditions) > 0 {
conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]"
}
retStr := "{"
retStr = retStr + expirationStr + ","
retStr = retStr + conditionsStr
retStr = retStr + "}"
return []byte(retStr)
}
// base64 - Produces base64 of PostPolicy's Marshalled json.
func (p PostPolicy) base64() string {
return base64.StdEncoding.EncodeToString(p.marshalJSON())
}

View File

@ -0,0 +1,289 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"fmt"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
)
// Signature and API related constants.
const (
signV2Algorithm = "AWS"
)
// Encode input URL path to URL encoded path.
func encodeURL2Path(u *url.URL) (path string) {
// Encode URL path.
if strings.HasSuffix(u.Host, ".s3.amazonaws.com") {
path = "/" + strings.TrimSuffix(u.Host, ".s3.amazonaws.com")
path += u.Path
path = urlEncodePath(path)
return
}
if strings.HasSuffix(u.Host, ".storage.googleapis.com") {
path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com")
path += u.Path
path = urlEncodePath(path)
return
}
path = urlEncodePath(u.Path)
return
}
// PreSignV2 - presign the request in following style.
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
}
d := time.Now().UTC()
// Add date if not present.
if date := req.Header.Get("Date"); date == "" {
req.Header.Set("Date", d.Format(http.TimeFormat))
}
// Get encoded URL path.
path := encodeURL2Path(req.URL)
// Find epoch expires when the request will expire.
epochExpires := d.Unix() + expires
// Get string to sign.
stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", req.Method, epochExpires, path)
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign))
// Calculate signature.
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
query := req.URL.Query()
// Handle specially for Google Cloud Storage.
if strings.Contains(req.URL.Host, ".storage.googleapis.com") {
query.Set("GoogleAccessId", accessKeyID)
} else {
query.Set("AWSAccessKeyId", accessKeyID)
}
// Fill in Expires and Signature for presigned query.
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
query.Set("Signature", signature)
// Encode query and save.
req.URL.RawQuery = query.Encode()
return &req
}
// PostPresignSignatureV2 - presigned signature for PostPolicy
// request.
func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(policyBase64))
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
return signature
}
// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
//
// StringToSign = HTTP-Verb + "\n" +
// Content-MD5 + "\n" +
// Content-Type + "\n" +
// Date + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
//
// CanonicalizedResource = [ "/" + Bucket ] +
// <HTTP-Request-URI, from the protocol name up to the query string> +
// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
//
// CanonicalizedProtocolHeaders = <described below>
// SignV2 sign the request before Do() (AWS Signature Version 2).
func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
}
// Initial time.
d := time.Now().UTC()
// Add date if not present.
if date := req.Header.Get("Date"); date == "" {
req.Header.Set("Date", d.Format(http.TimeFormat))
}
// Calculate HMAC for secretAccessKey.
stringToSign := getStringToSignV2(req)
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign))
// Prepare auth header.
authHeader := new(bytes.Buffer)
authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID))
encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
encoder.Write(hm.Sum(nil))
encoder.Close()
// Set Authorization header.
req.Header.Set("Authorization", authHeader.String())
return &req
}
// From the Amazon docs:
//
// StringToSign = HTTP-Verb + "\n" +
// Content-MD5 + "\n" +
// Content-Type + "\n" +
// Date + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
func getStringToSignV2(req http.Request) string {
buf := new(bytes.Buffer)
// Write standard headers.
writeDefaultHeaders(buf, req)
// Write canonicalized protocol headers if any.
writeCanonicalizedHeaders(buf, req)
// Write canonicalized Query resources if any.
writeCanonicalizedResource(buf, req)
return buf.String()
}
// writeDefaultHeader - write all default necessary headers
func writeDefaultHeaders(buf *bytes.Buffer, req http.Request) {
buf.WriteString(req.Method)
buf.WriteByte('\n')
buf.WriteString(req.Header.Get("Content-MD5"))
buf.WriteByte('\n')
buf.WriteString(req.Header.Get("Content-Type"))
buf.WriteByte('\n')
buf.WriteString(req.Header.Get("Date"))
buf.WriteByte('\n')
}
// writeCanonicalizedHeaders - write canonicalized headers.
func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
var protoHeaders []string
vals := make(map[string][]string)
for k, vv := range req.Header {
// All the AMZ headers should be lowercase
lk := strings.ToLower(k)
if strings.HasPrefix(lk, "x-amz") {
protoHeaders = append(protoHeaders, lk)
vals[lk] = vv
}
}
sort.Strings(protoHeaders)
for _, k := range protoHeaders {
buf.WriteString(k)
buf.WriteByte(':')
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
if strings.Contains(v, "\n") {
// TODO: "Unfold" long headers that
// span multiple lines (as allowed by
// RFC 2616, section 4.2) by replacing
// the folding white-space (including
// new-line) by a single space.
buf.WriteString(v)
} else {
buf.WriteString(v)
}
}
buf.WriteByte('\n')
}
}
// Must be sorted:
var resourceList = []string{
"acl",
"location",
"logging",
"notification",
"partNumber",
"policy",
"response-content-type",
"response-content-language",
"response-expires",
"response-cache-control",
"response-content-disposition",
"response-content-encoding",
"requestPayment",
"torrent",
"uploadId",
"uploads",
"versionId",
"versioning",
"versions",
"website",
}
// From the Amazon docs:
//
// CanonicalizedResource = [ "/" + Bucket ] +
// <HTTP-Request-URI, from the protocol name up to the query string> +
// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) error {
// Save request URL.
requestURL := req.URL
// Get encoded URL path.
path := encodeURL2Path(requestURL)
buf.WriteString(path)
sort.Strings(resourceList)
if requestURL.RawQuery != "" {
var n int
vals, _ := url.ParseQuery(requestURL.RawQuery)
// Verify if any sub resource queries are present, if yes
// canonicallize them.
for _, resource := range resourceList {
if vv, ok := vals[resource]; ok && len(vv) > 0 {
n++
// First element
switch n {
case 1:
buf.WriteByte('?')
// The rest
default:
buf.WriteByte('&')
}
buf.WriteString(resource)
// Request parameters
if len(vv[0]) > 0 {
buf.WriteByte('=')
buf.WriteString(url.QueryEscape(vv[0]))
}
}
}
}
return nil
}

View File

@ -0,0 +1,303 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"encoding/hex"
"net/http"
"sort"
"strconv"
"strings"
"time"
)
// Signature and API related constants.
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601DateFormat = "20060102T150405Z"
yyyymmdd = "20060102"
)
///
/// Excerpts from @lsegal -
/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
///
/// User-Agent:
///
/// This is ignored from signing because signing this causes
/// problems with generating pre-signed URLs (that are executed
/// by other agents) or when customers pass requests through
/// proxies, which may modify the user-agent.
///
/// Content-Length:
///
/// This is ignored from signing because generating a pre-signed
/// URL should not provide a content-length constraint,
/// specifically when vending a S3 pre-signed PUT URL. The
/// corollary to this is that when sending regular requests
/// (non-pre-signed), the signature contains a checksum of the
/// body, which implicitly validates the payload length (since
/// changing the number of bytes would change the checksum)
/// and therefore this header is not valuable in the signature.
///
/// Content-Type:
///
/// Signing this header causes quite a number of problems in
/// browser environments, where browsers like to modify and
/// normalize the content-type header in different ways. There is
/// more information on this in https://goo.gl/2E9gyy. Avoiding
/// this field simplifies logic and reduces the possibility of
/// future bugs.
///
/// Authorization:
///
/// Is skipped for obvious reasons
///
var ignoredHeaders = map[string]bool{
"Authorization": true,
"Content-Type": true,
"Content-Length": true,
"User-Agent": true,
}
// getSigningKey hmac seed to calculate final signature.
func getSigningKey(secret, loc string, t time.Time) []byte {
date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
location := sumHMAC(date, []byte(loc))
service := sumHMAC(location, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request"))
return signingKey
}
// getSignature final signature in hexadecimal form.
func getSignature(signingKey []byte, stringToSign string) string {
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
}
// getScope generate a string of a specific date, an AWS region, and a
// service.
func getScope(location string, t time.Time) string {
scope := strings.Join([]string{
t.Format(yyyymmdd),
location,
"s3",
"aws4_request",
}, "/")
return scope
}
// getCredential generate a credential string.
func getCredential(accessKeyID, location string, t time.Time) string {
scope := getScope(location, t)
return accessKeyID + "/" + scope
}
// getHashedPayload get the hexadecimal value of the SHA256 hash of
// the request payload.
func getHashedPayload(req http.Request) string {
hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
if hashedPayload == "" {
// Presign does not have a payload, use S3 recommended value.
hashedPayload = "UNSIGNED-PAYLOAD"
}
return hashedPayload
}
// getCanonicalHeaders generate a list of request headers for
// signature.
func getCanonicalHeaders(req http.Request) string {
var headers []string
vals := make(map[string][]string)
for k, vv := range req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // ignored header
}
headers = append(headers, strings.ToLower(k))
vals[strings.ToLower(k)] = vv
}
headers = append(headers, "host")
sort.Strings(headers)
var buf bytes.Buffer
// Save all the headers in canonical form <header>:<value> newline
// separated for each header.
for _, k := range headers {
buf.WriteString(k)
buf.WriteByte(':')
switch {
case k == "host":
buf.WriteString(req.URL.Host)
fallthrough
default:
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
buf.WriteString(v)
}
buf.WriteByte('\n')
}
}
return buf.String()
}
// getSignedHeaders generate all signed request headers.
// i.e lexically sorted, semicolon-separated list of lowercase
// request header names.
func getSignedHeaders(req http.Request) string {
var headers []string
for k := range req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // Ignored header found continue.
}
headers = append(headers, strings.ToLower(k))
}
headers = append(headers, "host")
sort.Strings(headers)
return strings.Join(headers, ";")
}
// getCanonicalRequest generate a canonical request of style.
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
func getCanonicalRequest(req http.Request) string {
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
canonicalRequest := strings.Join([]string{
req.Method,
urlEncodePath(req.URL.Path),
req.URL.RawQuery,
getCanonicalHeaders(req),
getSignedHeaders(req),
getHashedPayload(req),
}, "\n")
return canonicalRequest
}
// getStringToSign a string based on selected query values.
func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n"
stringToSign = stringToSign + getScope(location, t) + "\n"
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
return stringToSign
}
// PreSignV4 presign the request, in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
}
// Initial time.
t := time.Now().UTC()
// Get credential string.
credential := getCredential(accessKeyID, location, t)
// Get all signed headers.
signedHeaders := getSignedHeaders(req)
// Set URL query.
query := req.URL.Query()
query.Set("X-Amz-Algorithm", signV4Algorithm)
query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
query.Set("X-Amz-SignedHeaders", signedHeaders)
query.Set("X-Amz-Credential", credential)
req.URL.RawQuery = query.Encode()
// Get canonical request.
canonicalRequest := getCanonicalRequest(req)
// Get string to sign from canonical request.
stringToSign := getStringToSignV4(t, location, canonicalRequest)
// Gext hmac signing key.
signingKey := getSigningKey(secretAccessKey, location, t)
// Calculate signature.
signature := getSignature(signingKey, stringToSign)
// Add signature header to RawQuery.
req.URL.RawQuery += "&X-Amz-Signature=" + signature
return &req
}
// PostPresignSignatureV4 - presigned signature for PostPolicy
// requests.
func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
// Get signining key.
signingkey := getSigningKey(secretAccessKey, location, t)
// Calculate signature.
signature := getSignature(signingkey, policyBase64)
return signature
}
// SignV4 sign the request before Do(), in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
}
// Initial time.
t := time.Now().UTC()
// Set x-amz-date.
req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
// Get canonical request.
canonicalRequest := getCanonicalRequest(req)
// Get string to sign from canonical request.
stringToSign := getStringToSignV4(t, location, canonicalRequest)
// Get hmac signing key.
signingKey := getSigningKey(secretAccessKey, location, t)
// Get credential string.
credential := getCredential(accessKeyID, location, t)
// Get all signed headers.
signedHeaders := getSignedHeaders(req)
// Calculate signature.
signature := getSignature(signingKey, stringToSign)
// If regular request, construct the final authorization header.
parts := []string{
signV4Algorithm + " Credential=" + credential,
"SignedHeaders=" + signedHeaders,
"Signature=" + signature,
}
// Set authorization header.
auth := strings.Join(parts, ", ")
req.Header.Set("Authorization", auth)
return &req
}

View File

@ -0,0 +1,37 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
// SignatureType is type of Authorization requested for a given HTTP request.
type SignatureType int
// Different types of supported signatures - default is Latest i.e SignatureV4.
const (
Latest SignatureType = iota
SignatureV4
SignatureV2
)
// isV2 - is signature SignatureV2?
func (s SignatureType) isV2() bool {
return s == SignatureV2
}
// isV4 - is signature SignatureV4?
func (s SignatureType) isV4() bool {
return s == SignatureV4 || s == Latest
}

View File

@ -0,0 +1,60 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"io/ioutil"
"os"
"sync"
)
// tempFile - temporary file container.
type tempFile struct {
*os.File
mutex *sync.Mutex
}
// newTempFile returns a new temporary file, once closed it automatically deletes itself.
func newTempFile(prefix string) (*tempFile, error) {
// use platform specific temp directory.
file, err := ioutil.TempFile(os.TempDir(), prefix)
if err != nil {
return nil, err
}
return &tempFile{
File: file,
mutex: &sync.Mutex{},
}, nil
}
// Close - closer wrapper to close and remove temporary file.
func (t *tempFile) Close() error {
t.mutex.Lock()
defer t.mutex.Unlock()
if t.File != nil {
// Close the file.
if err := t.File.Close(); err != nil {
return err
}
// Remove file.
if err := os.Remove(t.File.Name()); err != nil {
return err
}
t.File = nil
}
return nil
}

View File

@ -0,0 +1,341 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/xml"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"time"
"unicode/utf8"
)
// xmlDecoder provide decoded value in xml.
func xmlDecoder(body io.Reader, v interface{}) error {
d := xml.NewDecoder(body)
return d.Decode(v)
}
// sum256 calculate sha256 sum for an input byte array.
func sum256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
// sumHMAC calculate hmac between two input byte array.
func sumHMAC(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
// isPartUploaded - true if part is already uploaded.
func isPartUploaded(objPart objectPart, objectParts map[int]objectPart) (isUploaded bool) {
_, isUploaded = objectParts[objPart.PartNumber]
if isUploaded {
isUploaded = (objPart.ETag == objectParts[objPart.PartNumber].ETag)
}
return
}
// getEndpointURL - construct a new endpoint.
func getEndpointURL(endpoint string, inSecure bool) (*url.URL, error) {
if strings.Contains(endpoint, ":") {
host, _, err := net.SplitHostPort(endpoint)
if err != nil {
return nil, err
}
if !isValidIP(host) && !isValidDomain(host) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
} else {
if !isValidIP(endpoint) && !isValidDomain(endpoint) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
}
// if inSecure is true, use 'http' scheme.
scheme := "https"
if inSecure {
scheme = "http"
}
// Construct a secured endpoint URL.
endpointURLStr := scheme + "://" + endpoint
endpointURL, err := url.Parse(endpointURLStr)
if err != nil {
return nil, err
}
// Validate incoming endpoint URL.
if err := isValidEndpointURL(endpointURL); err != nil {
return nil, err
}
return endpointURL, nil
}
// isValidDomain validates if input string is a valid domain name.
func isValidDomain(host string) bool {
// See RFC 1035, RFC 3696.
host = strings.TrimSpace(host)
if len(host) == 0 || len(host) > 255 {
return false
}
// host cannot start or end with "-"
if host[len(host)-1:] == "-" || host[:1] == "-" {
return false
}
// host cannot start or end with "_"
if host[len(host)-1:] == "_" || host[:1] == "_" {
return false
}
// host cannot start or end with a "."
if host[len(host)-1:] == "." || host[:1] == "." {
return false
}
// All non alphanumeric characters are invalid.
if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
return false
}
// No need to regexp match, since the list is non-exhaustive.
// We let it valid and fail later.
return true
}
// isValidIP parses input string for ip address validity.
func isValidIP(ip string) bool {
return net.ParseIP(ip) != nil
}
// closeResponse close non nil response with any response Body.
// convenient wrapper to drain any remaining data on response body.
//
// Subsequently this allows golang http RoundTripper
// to re-use the same connection for future requests.
func closeResponse(resp *http.Response) {
// Callers should close resp.Body when done reading from it.
// If resp.Body is not closed, the Client's underlying RoundTripper
// (typically Transport) may not be able to re-use a persistent TCP
// connection to the server for a subsequent "keep-alive" request.
if resp != nil && resp.Body != nil {
// Drain any remaining Body and then close the connection.
// Without this closing connection would disallow re-using
// the same connection for future uses.
// - http://stackoverflow.com/a/17961593/4465767
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
}
// isVirtualHostSupported - verify if host supports virtual hosted style.
// Currently only Amazon S3 and Google Cloud Storage would support this.
func isVirtualHostSupported(endpointURL *url.URL) bool {
return isAmazonEndpoint(endpointURL) || isGoogleEndpoint(endpointURL)
}
// Match if it is exactly Amazon S3 endpoint.
func isAmazonEndpoint(endpointURL *url.URL) bool {
if endpointURL == nil {
return false
}
if endpointURL.Host == "s3.amazonaws.com" {
return true
}
return false
}
// Match if it is exactly Google cloud storage endpoint.
func isGoogleEndpoint(endpointURL *url.URL) bool {
if endpointURL == nil {
return false
}
if endpointURL.Host == "storage.googleapis.com" {
return true
}
return false
}
// Verify if input endpoint URL is valid.
func isValidEndpointURL(endpointURL *url.URL) error {
if endpointURL == nil {
return ErrInvalidArgument("Endpoint url cannot be empty.")
}
if endpointURL.Path != "/" && endpointURL.Path != "" {
return ErrInvalidArgument("Endpoing url cannot have fully qualified paths.")
}
if strings.Contains(endpointURL.Host, ".amazonaws.com") {
if !isAmazonEndpoint(endpointURL) {
return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
}
}
if strings.Contains(endpointURL.Host, ".googleapis.com") {
if !isGoogleEndpoint(endpointURL) {
return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
}
}
return nil
}
// Verify if input expires value is valid.
func isValidExpiry(expires time.Duration) error {
expireSeconds := int64(expires / time.Second)
if expireSeconds < 1 {
return ErrInvalidArgument("Expires cannot be lesser than 1 second.")
}
if expireSeconds > 604800 {
return ErrInvalidArgument("Expires cannot be greater than 7 days.")
}
return nil
}
/// Excerpts from - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
/// When using virtual hostedstyle buckets with SSL, the SSL wild card
/// certificate only matches buckets that do not contain periods.
/// To work around this, use HTTP or write your own certificate verification logic.
// We decided to not support bucketNames with '.' in them.
var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\-]{1,61}[a-z0-9]$`)
// isValidBucketName - verify bucket name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func isValidBucketName(bucketName string) error {
if strings.TrimSpace(bucketName) == "" {
return ErrInvalidBucketName("Bucket name cannot be empty.")
}
if len(bucketName) < 3 {
return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.")
}
if len(bucketName) > 63 {
return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.")
}
if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
}
if !validBucketName.MatchString(bucketName) {
return ErrInvalidBucketName("Bucket name contains invalid characters.")
}
return nil
}
// isValidObjectName - verify object name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
func isValidObjectName(objectName string) error {
if strings.TrimSpace(objectName) == "" {
return ErrInvalidObjectName("Object name cannot be empty.")
}
if len(objectName) > 1024 {
return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.")
}
if !utf8.ValidString(objectName) {
return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.")
}
return nil
}
// isValidObjectPrefix - verify if object prefix is valid.
func isValidObjectPrefix(objectPrefix string) error {
if len(objectPrefix) > 1024 {
return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.")
}
if !utf8.ValidString(objectPrefix) {
return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.")
}
return nil
}
// optimalPartSize - calculate the optimal part size for the given objectSize.
//
// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
// object storage it will have the following parameters as constants.
//
// maxParts - 10000
// minimumPartSize - 5MiB
// maximumPartSize - 5GiB
//
// if the partSize after division with maxParts is greater than minimumPartSize
// then choose miniumPartSize as the new part size, if not return minimumPartSize.
//
// Special cases
//
// - if input object size is -1 then return maxPartSize.
// - if it happens to be that partSize is indeed bigger
// than the maximum part size just return maxPartSize.
func optimalPartSize(objectSize int64) int64 {
// if object size is -1 choose part size as 5GiB.
if objectSize == -1 {
return maxPartSize
}
// make sure last part has enough buffer and handle this poperly.
partSize := (objectSize / (maxParts - 1))
if partSize > minimumPartSize {
if partSize > maxPartSize {
return maxPartSize
}
return partSize
}
return minimumPartSize
}
// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func urlEncodePath(pathName string) string {
// if object matches reserved string, no need to encode them
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
if reservedNames.MatchString(pathName) {
return pathName
}
var encodedPathname string
for _, s := range pathName {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
// if utf8 cannot convert return the same string as is
return pathName
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
}
}
}
return encodedPathname
}

View File

@ -1,74 +0,0 @@
package aws
import (
"time"
)
// AttemptStrategy represents a strategy for waiting for an action
// to complete successfully. This is an internal type used by the
// implementation of other goamz packages.
type AttemptStrategy struct {
Total time.Duration // total duration of attempt.
Delay time.Duration // interval between each try in the burst.
Min int // minimum number of retries; overrides Total
}
type Attempt struct {
strategy AttemptStrategy
last time.Time
end time.Time
force bool
count int
}
// Start begins a new sequence of attempts for the given strategy.
func (s AttemptStrategy) Start() *Attempt {
now := time.Now()
return &Attempt{
strategy: s,
last: now,
end: now.Add(s.Total),
force: true,
}
}
// Next waits until it is time to perform the next attempt or returns
// false if it is time to stop trying.
func (a *Attempt) Next() bool {
now := time.Now()
sleep := a.nextSleep(now)
if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
return false
}
a.force = false
if sleep > 0 && a.count > 0 {
time.Sleep(sleep)
now = time.Now()
}
a.count++
a.last = now
return true
}
func (a *Attempt) nextSleep(now time.Time) time.Duration {
sleep := a.strategy.Delay - now.Sub(a.last)
if sleep < 0 {
return 0
}
return sleep
}
// HasNext returns whether another attempt will be made if the current
// one fails. If it returns true, the following call to Next is
// guaranteed to return true.
func (a *Attempt) HasNext() bool {
if a.force || a.strategy.Min > a.count {
return true
}
now := time.Now()
if now.Add(a.nextSleep(now)).Before(a.end) {
a.force = true
return true
}
return false
}

View File

@ -1,59 +0,0 @@
package aws_test
import (
"time"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
)
func (S) TestAttemptTiming(c *C) {
testAttempt := aws.AttemptStrategy{
Total: 0.25e9,
Delay: 0.1e9,
}
want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9}
got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing
t0 := time.Now()
for a := testAttempt.Start(); a.Next(); {
got = append(got, time.Now().Sub(t0))
}
got = append(got, time.Now().Sub(t0))
c.Assert(got, HasLen, len(want))
const margin = 0.01e9
for i, got := range want {
lo := want[i] - margin
hi := want[i] + margin
if got < lo || got > hi {
c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds())
}
}
}
func (S) TestAttemptNextHasNext(c *C) {
a := aws.AttemptStrategy{}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, false)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{Total: 2e8}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, true)
time.Sleep(2e8)
c.Assert(a.HasNext(), Equals, true)
c.Assert(a.Next(), Equals, true)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{Total: 1e8, Min: 2}.Start()
time.Sleep(1e8)
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, true)
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, false)
c.Assert(a.Next(), Equals, false)
}

View File

@ -1,268 +0,0 @@
//
// goamz - Go packages to interact with the Amazon Web Services.
//
// https://wiki.ubuntu.com/goamz
//
// Copyright (c) 2011 Canonical Ltd.
//
package aws
import (
"errors"
"os"
"strings"
)
// Region defines the URLs where AWS services may be accessed.
//
// See http://goo.gl/d8BP1 for more details.
type Region struct {
Name string // the canonical name of this region.
EC2Endpoint string
S3Endpoint string
S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name.
S3LocationConstraint bool // true if this region requires a LocationConstraint declaration.
S3LowercaseBucket bool // true if the region requires bucket names to be lower case.
SDBEndpoint string // not all regions have simpleDB, fro eg. Frankfurt (eu-central-1) does not
SNSEndpoint string
SQSEndpoint string
IAMEndpoint string
}
func (r Region) ResolveS3BucketEndpoint(bucketName string) string {
if r.S3BucketEndpoint != "" {
return strings.ToLower(strings.Replace(r.S3BucketEndpoint, "${bucket}", bucketName, -1))
}
return strings.ToLower(r.S3Endpoint + "/" + bucketName + "/")
}
var USEast = Region{
"us-east-1", // US East (N. Virginia)
"https://ec2.us-east-1.amazonaws.com",
"https://s3.amazonaws.com",
"",
false,
false,
"https://sdb.amazonaws.com",
"https://sns.us-east-1.amazonaws.com",
"https://sqs.us-east-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var USWest = Region{
"us-west-1", //US West (N. California)
"https://ec2.us-west-1.amazonaws.com",
"https://s3-us-west-1.amazonaws.com",
"",
true,
true,
"https://sdb.us-west-1.amazonaws.com",
"https://sns.us-west-1.amazonaws.com",
"https://sqs.us-west-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var USWest2 = Region{
"us-west-2", // US West (Oregon)
"https://ec2.us-west-2.amazonaws.com",
"https://s3-us-west-2.amazonaws.com",
"",
true,
true,
"https://sdb.us-west-2.amazonaws.com",
"https://sns.us-west-2.amazonaws.com",
"https://sqs.us-west-2.amazonaws.com",
"https://iam.amazonaws.com",
}
var USGovWest = Region{
"us-gov-west-1", // Isolated regions, AWS GovCloud (US)
"https://ec2.us-gov-west-1.amazonaws.com",
"https://s3-us-gov-west-1.amazonaws.com",
"",
true,
true,
"",
"https://sns.us-gov-west-1.amazonaws.com",
"https://sqs.us-gov-west-1.amazonaws.com",
"https://iam.us-gov.amazonaws.com",
}
var EUWest = Region{
"eu-west-1", // EU (Ireland)
"https://ec2.eu-west-1.amazonaws.com",
"https://s3-eu-west-1.amazonaws.com",
"",
true,
true,
"https://sdb.eu-west-1.amazonaws.com",
"https://sns.eu-west-1.amazonaws.com",
"https://sqs.eu-west-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var EUCentral = Region{
"eu-central-1", // EU (Frankfurt)
"https://ec2.eu-central-1.amazonaws.com",
"https://s3-eu-central-1.amazonaws.com",
"",
true,
true,
"",
"https://sns.eu-central-1.amazonaws.com",
"https://sqs.eu-central-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var APSoutheast = Region{
"ap-southeast-1", // Asia Pacific (Singapore)
"https://ec2.ap-southeast-1.amazonaws.com",
"https://s3-ap-southeast-1.amazonaws.com",
"",
true,
true,
"https://sdb.ap-southeast-1.amazonaws.com",
"https://sns.ap-southeast-1.amazonaws.com",
"https://sqs.ap-southeast-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var APSoutheast2 = Region{
"ap-southeast-2", //Asia Pacific (Sydney)
"https://ec2.ap-southeast-2.amazonaws.com",
"https://s3-ap-southeast-2.amazonaws.com",
"",
true,
true,
"https://sdb.ap-southeast-2.amazonaws.com",
"https://sns.ap-southeast-2.amazonaws.com",
"https://sqs.ap-southeast-2.amazonaws.com",
"https://iam.amazonaws.com",
}
var APNortheast = Region{
"ap-northeast-1", //Asia Pacific (Tokyo)
"https://ec2.ap-northeast-1.amazonaws.com",
"https://s3-ap-northeast-1.amazonaws.com",
"",
true,
true,
"https://sdb.ap-northeast-1.amazonaws.com",
"https://sns.ap-northeast-1.amazonaws.com",
"https://sqs.ap-northeast-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var SAEast = Region{
"sa-east-1", // South America (Sao Paulo)
"https://ec2.sa-east-1.amazonaws.com",
"https://s3-sa-east-1.amazonaws.com",
"",
true,
true,
"https://sdb.sa-east-1.amazonaws.com",
"https://sns.sa-east-1.amazonaws.com",
"https://sqs.sa-east-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var CNNorth = Region{
"cn-north-1", // Isolated regions, China (Beijing)
"https://ec2.cn-north-1.amazonaws.com.cn",
"https://s3.cn-north-1.amazonaws.com.cn",
"",
true,
true,
"https://sdb.cn-north-1.amazonaws.com.cn",
"https://sns.cn-north-1.amazonaws.com.cn",
"https://sqs.cn-north-1.amazonaws.com.cn",
"https://iam.cn-north-1.amazonaws.com.cn",
}
var Regions = map[string]Region{
APNortheast.Name: APNortheast,
APSoutheast.Name: APSoutheast,
APSoutheast2.Name: APSoutheast2,
EUWest.Name: EUWest,
EUCentral.Name: EUCentral,
USEast.Name: USEast,
USWest.Name: USWest,
USWest2.Name: USWest2,
USGovWest.Name: USGovWest,
SAEast.Name: SAEast,
CNNorth.Name: CNNorth,
}
type Auth struct {
AccessKey, SecretKey string
}
var unreserved = make([]bool, 128)
var hex = "0123456789ABCDEF"
func init() {
// RFC3986
u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~"
for _, c := range u {
unreserved[c] = true
}
}
// EnvAuth creates an Auth based on environment information.
// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment
// variables are used as the first preference, but EC2_ACCESS_KEY
// and EC2_SECRET_KEY or AWS_ACCESS_KEY and AWS_SECRET_KEY
// environment variables are also supported.
func EnvAuth() (auth Auth, err error) {
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
// first fallbaback to EC2_ env variable
if auth.AccessKey == "" && auth.SecretKey == "" {
auth.AccessKey = os.Getenv("EC2_ACCESS_KEY")
auth.SecretKey = os.Getenv("EC2_SECRET_KEY")
}
// second fallbaback to AWS_ env variable
if auth.AccessKey == "" && auth.SecretKey == "" {
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY")
auth.SecretKey = os.Getenv("AWS_SECRET_KEY")
}
if auth.AccessKey == "" {
err = errors.New("AWS_ACCESS_KEY_ID not found in environment")
}
if auth.SecretKey == "" {
err = errors.New("AWS_SECRET_ACCESS_KEY not found in environment")
}
return
}
// Encode takes a string and URI-encodes it in a way suitable
// to be used in AWS signatures.
func Encode(s string) string {
encode := false
for i := 0; i != len(s); i++ {
c := s[i]
if c > 127 || !unreserved[c] {
encode = true
break
}
}
if !encode {
return s
}
e := make([]byte, len(s)*3)
ei := 0
for i := 0; i != len(s); i++ {
c := s[i]
if c > 127 || !unreserved[c] {
e[ei] = '%'
e[ei+1] = hex[c>>4]
e[ei+2] = hex[c&0xF]
ei += 3
} else {
e[ei] = c
ei += 1
}
}
return string(e[:ei])
}

View File

@ -1,84 +0,0 @@
package aws_test
import (
"os"
"strings"
"testing"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
)
func Test(t *testing.T) {
TestingT(t)
}
var _ = Suite(&S{})
type S struct {
environ []string
}
func (s *S) SetUpSuite(c *C) {
s.environ = os.Environ()
}
func (s *S) TearDownTest(c *C) {
os.Clearenv()
for _, kv := range s.environ {
l := strings.SplitN(kv, "=", 2)
os.Setenv(l[0], l[1])
}
}
func (s *S) TestEnvAuthNoSecret(c *C) {
os.Clearenv()
_, err := aws.EnvAuth()
c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY not found in environment")
}
func (s *S) TestEnvAuthNoAccess(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "foo")
_, err := aws.EnvAuth()
c.Assert(err, ErrorMatches, "AWS_ACCESS_KEY_ID not found in environment")
}
func (s *S) TestEnvAuth(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY_ID", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEnvAuthLegacy(c *C) {
os.Clearenv()
os.Setenv("EC2_SECRET_KEY", "secret")
os.Setenv("EC2_ACCESS_KEY", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEnvAuthAws(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEncode(c *C) {
c.Assert(aws.Encode("foo"), Equals, "foo")
c.Assert(aws.Encode("/"), Equals, "%2F")
}
func (s *S) TestRegionsAreNamed(c *C) {
for n, r := range aws.Regions {
c.Assert(n, Equals, r.Name)
}
}

View File

@ -1,447 +0,0 @@
package aws
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"sort"
"strings"
"time"
)
var debug = log.New(
// Remove the c-style comment header to front of line to debug information.
/*os.Stdout, //*/ ioutil.Discard,
"DEBUG: ",
log.LstdFlags,
)
type Signer func(*http.Request, Auth) error
// Ensure our signers meet the interface
var _ Signer = SignV2
var _ Signer = SignV4Factory("", "")
type hasher func(io.Reader) (string, error)
const (
ISO8601BasicFormat = "20060102T150405Z"
ISO8601BasicFormatShort = "20060102"
)
// SignV2 signs an HTTP request utilizing version 2 of the AWS
// signature, and the given credentials.
func SignV2(req *http.Request, auth Auth) (err error) {
queryVals := req.URL.Query()
queryVals.Set("AWSAccessKeyId", auth.AccessKey)
queryVals.Set("SignatureVersion", "2")
queryVals.Set("SignatureMethod", "HmacSHA256")
uriStr := canonicalURI(req.URL)
queryStr := canonicalQueryString(queryVals)
payload := new(bytes.Buffer)
if err := errorCollector(
fprintfWrapper(payload, "%s\n", requestMethodVerb(req.Method)),
fprintfWrapper(payload, "%s\n", req.Host),
fprintfWrapper(payload, "%s\n", uriStr),
fprintfWrapper(payload, "%s", queryStr),
); err != nil {
return err
}
hash := hmac.New(sha256.New, []byte(auth.SecretKey))
hash.Write(payload.Bytes())
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
base64.StdEncoding.Encode(signature, hash.Sum(nil))
queryVals.Set("Signature", string(signature))
req.URL.RawQuery = queryVals.Encode()
return nil
}
// SignV4Factory returns a version 4 Signer which will utilize the
// given region name.
func SignV4Factory(regionName, serviceName string) Signer {
return func(req *http.Request, auth Auth) error {
return SignV4(req, auth, regionName, serviceName)
}
}
func SignV4URL(req *http.Request, auth Auth, regionName, svcName string, expires time.Duration) error {
reqTime, err := requestTime(req)
if err != nil {
return err
}
req.Header.Del("date")
credScope := credentialScope(reqTime, regionName, svcName)
queryVals := req.URL.Query()
queryVals.Set("X-Amz-Algorithm", "AWS4-HMAC-SHA256")
queryVals.Set("X-Amz-Credential", auth.AccessKey+"/"+credScope)
queryVals.Set("X-Amz-Date", reqTime.Format(ISO8601BasicFormat))
queryVals.Set("X-Amz-Expires", fmt.Sprintf("%d", int(expires.Seconds())))
queryVals.Set("X-Amz-SignedHeaders", "host")
req.URL.RawQuery = queryVals.Encode()
_, canonReqHash, _, err := canonicalRequest(req, sha256Hasher, false)
if err != nil {
return err
}
var strToSign string
if strToSign, err = stringToSign(reqTime, canonReqHash, credScope); err != nil {
return err
}
key := signingKey(reqTime, auth.SecretKey, regionName, svcName)
signature := fmt.Sprintf("%x", hmacHasher(key, strToSign))
debug.Printf("strToSign:\n\"\"\"\n%s\n\"\"\"", strToSign)
queryVals.Set("X-Amz-Signature", signature)
req.URL.RawQuery = queryVals.Encode()
return nil
}
// SignV4 signs an HTTP request utilizing version 4 of the AWS
// signature, and the given credentials.
func SignV4(req *http.Request, auth Auth, regionName, svcName string) (err error) {
var reqTime time.Time
if reqTime, err = requestTime(req); err != nil {
return err
}
// Remove any existing authorization headers as they will corrupt
// the signing.
delete(req.Header, "Authorization")
delete(req.Header, "authorization")
credScope := credentialScope(reqTime, regionName, svcName)
_, canonReqHash, sortedHdrNames, err := canonicalRequest(req, sha256Hasher, true)
if err != nil {
return err
}
var strToSign string
if strToSign, err = stringToSign(reqTime, canonReqHash, credScope); err != nil {
return err
}
key := signingKey(reqTime, auth.SecretKey, regionName, svcName)
signature := fmt.Sprintf("%x", hmacHasher(key, strToSign))
debug.Printf("strToSign:\n\"\"\"\n%s\n\"\"\"", strToSign)
var authHdrVal string
if authHdrVal, err = authHeaderString(
req.Header,
auth.AccessKey,
signature,
credScope,
sortedHdrNames,
); err != nil {
return err
}
req.Header.Set("Authorization", authHdrVal)
return nil
}
// Task 1: Create a Canonical Request.
// Returns the canonical request, and its hash.
func canonicalRequest(
req *http.Request,
hasher hasher,
calcPayHash bool,
) (canReq, canReqHash string, sortedHdrNames []string, err error) {
payHash := "UNSIGNED-PAYLOAD"
if calcPayHash {
if payHash, err = payloadHash(req, hasher); err != nil {
return
}
req.Header.Set("x-amz-content-sha256", payHash)
}
sortedHdrNames = sortHeaderNames(req.Header, "host")
var canHdr string
if canHdr, err = canonicalHeaders(sortedHdrNames, req.Host, req.Header); err != nil {
return
}
debug.Printf("canHdr:\n\"\"\"\n%s\n\"\"\"", canHdr)
debug.Printf("signedHeader: %s\n\n", strings.Join(sortedHdrNames, ";"))
uriStr := canonicalURI(req.URL)
queryStr := canonicalQueryString(req.URL.Query())
c := new(bytes.Buffer)
if err := errorCollector(
fprintfWrapper(c, "%s\n", requestMethodVerb(req.Method)),
fprintfWrapper(c, "%s\n", uriStr),
fprintfWrapper(c, "%s\n", queryStr),
fprintfWrapper(c, "%s\n", canHdr),
fprintfWrapper(c, "%s\n", strings.Join(sortedHdrNames, ";")),
fprintfWrapper(c, "%s", payHash),
); err != nil {
return "", "", nil, err
}
canReq = c.String()
debug.Printf("canReq:\n\"\"\"\n%s\n\"\"\"", canReq)
canReqHash, err = hasher(bytes.NewBuffer([]byte(canReq)))
return canReq, canReqHash, sortedHdrNames, err
}
// Task 2: Create a string to Sign
// Returns a string in the defined format to sign for the authorization header.
func stringToSign(
t time.Time,
canonReqHash string,
credScope string,
) (string, error) {
w := new(bytes.Buffer)
if err := errorCollector(
fprintfWrapper(w, "AWS4-HMAC-SHA256\n"),
fprintfWrapper(w, "%s\n", t.Format(ISO8601BasicFormat)),
fprintfWrapper(w, "%s\n", credScope),
fprintfWrapper(w, "%s", canonReqHash),
); err != nil {
return "", err
}
return w.String(), nil
}
// Task 3: Calculate the Signature
// Returns a derived signing key.
func signingKey(t time.Time, secretKey, regionName, svcName string) []byte {
kSecret := secretKey
kDate := hmacHasher([]byte("AWS4"+kSecret), t.Format(ISO8601BasicFormatShort))
kRegion := hmacHasher(kDate, regionName)
kService := hmacHasher(kRegion, svcName)
kSigning := hmacHasher(kService, "aws4_request")
return kSigning
}
// Task 4: Add the Signing Information to the Request
// Returns a string to be placed in the Authorization header for the request.
func authHeaderString(
header http.Header,
accessKey,
signature string,
credScope string,
sortedHeaderNames []string,
) (string, error) {
w := new(bytes.Buffer)
if err := errorCollector(
fprintfWrapper(w, "AWS4-HMAC-SHA256 "),
fprintfWrapper(w, "Credential=%s/%s, ", accessKey, credScope),
fprintfWrapper(w, "SignedHeaders=%s, ", strings.Join(sortedHeaderNames, ";")),
fprintfWrapper(w, "Signature=%s", signature),
); err != nil {
return "", err
}
return w.String(), nil
}
func canonicalURI(u *url.URL) string {
// The algorithm states that if the path is empty, to just use a "/".
if u.Path == "" {
return "/"
}
// Each path segment must be URI-encoded.
segments := strings.Split(u.Path, "/")
for i, segment := range segments {
segments[i] = goToAwsUrlEncoding(url.QueryEscape(segment))
}
return strings.Join(segments, "/")
}
func canonicalQueryString(queryVals url.Values) string {
// AWS dictates that if duplicate keys exist, their values be
// sorted as well.
for _, values := range queryVals {
sort.Strings(values)
}
return goToAwsUrlEncoding(queryVals.Encode())
}
func goToAwsUrlEncoding(urlEncoded string) string {
// AWS dictates that we use %20 for encoding spaces rather than +.
// All significant +s should already be encoded into their
// hexadecimal equivalents before doing the string replace.
return strings.Replace(urlEncoded, "+", "%20", -1)
}
func canonicalHeaders(sortedHeaderNames []string, host string, hdr http.Header) (string, error) {
buffer := new(bytes.Buffer)
for _, hName := range sortedHeaderNames {
hdrVals := host
if hName != "host" {
canonHdrKey := http.CanonicalHeaderKey(hName)
sortedHdrVals := hdr[canonHdrKey]
sort.Strings(sortedHdrVals)
hdrVals = strings.Join(sortedHdrVals, ",")
}
if _, err := fmt.Fprintf(buffer, "%s:%s\n", hName, hdrVals); err != nil {
return "", err
}
}
// There is intentionally a hanging newline at the end of the
// header list.
return buffer.String(), nil
}
// Returns a SHA256 checksum of the request body. Represented as a
// lowercase hexadecimal string.
func payloadHash(req *http.Request, hasher hasher) (string, error) {
if req.Body == nil {
return hasher(bytes.NewBuffer(nil))
}
return hasher(req.Body)
}
// Retrieve the header names, lower-case them, and sort them.
func sortHeaderNames(header http.Header, injectedNames ...string) []string {
sortedNames := injectedNames
for hName, _ := range header {
sortedNames = append(sortedNames, strings.ToLower(hName))
}
sort.Strings(sortedNames)
return sortedNames
}
func hmacHasher(key []byte, value string) []byte {
h := hmac.New(sha256.New, key)
h.Write([]byte(value))
return h.Sum(nil)
}
func sha256Hasher(payloadReader io.Reader) (string, error) {
hasher := sha256.New()
_, err := io.Copy(hasher, payloadReader)
return fmt.Sprintf("%x", hasher.Sum(nil)), err
}
func credentialScope(t time.Time, regionName, svcName string) string {
return fmt.Sprintf(
"%s/%s/%s/aws4_request",
t.Format(ISO8601BasicFormatShort),
regionName,
svcName,
)
}
// We do a lot of fmt.Fprintfs in this package. Create a higher-order
// function to elide the bytes written return value so we can submit
// these calls to an error collector.
func fprintfWrapper(w io.Writer, format string, vals ...interface{}) func() error {
return func() error {
_, err := fmt.Fprintf(w, format, vals...)
return err
}
}
// Poor man's maybe monad.
func errorCollector(writers ...func() error) error {
for _, writer := range writers {
if err := writer(); err != nil {
return err
}
}
return nil
}
// Retrieve the request time from the request. We will attempt to
// parse whatever we find, but we will not make up a request date for
// the user (i.e.: Magic!).
func requestTime(req *http.Request) (time.Time, error) {
// Time formats to try. We want to do everything we can to accept
// all time formats, but ultimately we may fail. In the package
// scope so it doesn't get initialized for every request.
var timeFormats = []string{
time.RFC822,
ISO8601BasicFormat,
time.RFC1123,
time.ANSIC,
time.UnixDate,
time.RubyDate,
time.RFC822Z,
time.RFC850,
time.RFC1123Z,
time.RFC3339,
time.RFC3339Nano,
time.Kitchen,
}
// Get a date header.
var date string
if date = req.Header.Get("x-amz-date"); date == "" {
if date = req.Header.Get("date"); date == "" {
return time.Time{}, fmt.Errorf(`Could not retrieve a request date. Please provide one in either "x-amz-date", or "date".`)
}
}
// Start attempting to parse
for _, format := range timeFormats {
if parsedTime, err := time.Parse(format, date); err == nil {
return parsedTime, nil
}
}
return time.Time{}, fmt.Errorf(
"Could not parse the given date. Please utilize one of the following formats: %s",
strings.Join(timeFormats, ","),
)
}
// http.Request's Method member returns the entire method. Derive the
// verb.
func requestMethodVerb(rawMethod string) (verb string) {
verbPlus := strings.SplitN(rawMethod, " ", 2)
switch {
case len(verbPlus) == 0: // Per docs, Method will be empty if it's GET.
verb = "GET"
default:
verb = verbPlus[0]
}
return verb
}

View File

@ -1,285 +0,0 @@
package aws
import (
"bytes"
"fmt"
"net/http"
"time"
. "gopkg.in/check.v1"
)
var _ = Suite(&SigningSuite{})
type SigningSuite struct{}
// TODO(katco-): The signing methodology is a "one size fits all"
// approach. The hashes we check against don't include headers that
// are added in as requisite parts for S3. That doesn't mean the tests
// are invalid, or that signing is broken for these examples, but as
// long as we're adding heads in, it's impossible to know what the new
// signature should be. We should revaluate these later. See:
// https://github.com/go-amz/amz/issues/29
const v4skipReason = `Extra headers present - cannot predict generated signature (issue #29).`
// EC2 ReST authentication docs: http://goo.gl/fQmAN
var testAuth = Auth{"user", "secret"}
func (s *SigningSuite) TestV4SignedUrl(c *C) {
auth := Auth{"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}
req, err := http.NewRequest("GET", "https://examplebucket.s3.amazonaws.com/test.txt", nil)
req.Header.Add("date", "Fri, 24 May 2013 00:00:00 GMT")
c.Assert(err, IsNil)
err = SignV4URL(req, auth, USEast.Name, "s3", 86400*time.Second)
c.Assert(err, IsNil)
c.Check(req.URL.String(), Equals, "https://examplebucket.s3.amazonaws.com/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-Signature=aeeed9bbccd4d02ee5c0109b86d86835f995330da4c265957d157751f604d404&X-Amz-SignedHeaders=host")
}
func (s *SigningSuite) TestV4SignedUrlReserved(c *C) {
auth := Auth{"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}
req, err := http.NewRequest("GET", "https://examplebucket.s3.amazonaws.com/some:reserved,characters", nil)
req.Header.Add("date", "Fri, 24 May 2013 00:00:00 GMT")
c.Assert(err, IsNil)
err = SignV4URL(req, auth, USEast.Name, "s3", 86400*time.Second)
c.Assert(err, IsNil)
c.Check(req.URL.String(), Equals, "https://examplebucket.s3.amazonaws.com/some:reserved,characters?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-Signature=ac81e03593d6fc22ac045b9353b0242da755be2af80b981eb13917d8b9cf20a4&X-Amz-SignedHeaders=host")
}
func (s *SigningSuite) TestV4StringToSign(c *C) {
mockTime, err := time.Parse(time.RFC3339, "2011-09-09T23:36:00Z")
c.Assert(err, IsNil)
stringToSign, err := stringToSign(
mockTime,
"3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2",
"20110909/us-east-1/iam/aws4_request",
)
c.Assert(err, IsNil)
const expected = `AWS4-HMAC-SHA256
20110909T233600Z
20110909/us-east-1/iam/aws4_request
3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2`
c.Assert(stringToSign, Equals, expected)
}
func (s *SigningSuite) TestV4CanonicalRequest(c *C) {
c.Skip(v4skipReason)
body := new(bytes.Buffer)
_, err := fmt.Fprint(body, "Action=ListUsers&Version=2010-05-08")
c.Assert(err, IsNil)
req, err := http.NewRequest("POST", "https://iam.amazonaws.com", body)
c.Assert(err, IsNil)
req.Header.Add("content-type", "application/x-www-form-urlencoded; charset=utf-8")
req.Header.Add("host", req.URL.Host)
req.Header.Add("x-amz-date", "20110909T233600Z")
canonReq, canonReqHash, _, err := canonicalRequest(
req,
sha256Hasher,
true,
)
c.Assert(err, IsNil)
const expected = `POST
/
content-type:application/x-www-form-urlencoded; charset=utf-8
host:iam.amazonaws.com
x-amz-date:20110909T233600Z
content-type;host;x-amz-date
b6359072c78d70ebee1e81adcbab4f01bf2c23245fa365ef83fe8f1f955085e2`
c.Assert(canonReq, Equals, expected)
c.Assert(canonReqHash, Equals, "3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2")
}
func (s *SigningSuite) TestV4SigningKey(c *C) {
c.Skip(v4skipReason)
mockTime, err := time.Parse(time.RFC3339, "2011-09-09T23:36:00Z")
c.Assert(err, IsNil)
c.Assert(
fmt.Sprintf("%v", signingKey(mockTime, testAuth.SecretKey, USEast.Name, "iam")),
Equals,
"[152 241 216 137 254 196 244 66 26 220 82 43 171 12 225 248 46 105 41 194 98 237 21 229 169 76 144 239 209 227 176 231]")
}
func (s *SigningSuite) TestV4BasicSignatureV4(c *C) {
c.Skip(v4skipReason)
body := new(bytes.Buffer)
req, err := http.NewRequest("POST / http/1.1", "https://host.foo.com", body)
c.Assert(err, IsNil)
req.Header.Add("Host", req.URL.Host)
req.Header.Add("Date", "Mon, 09 Sep 2011 23:36:00 GMT")
testAuth := Auth{
AccessKey: "AKIDEXAMPLE",
SecretKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
}
err = SignV4(req, testAuth, USEast.Name, "host")
c.Assert(err, IsNil)
c.Assert(req.Header.Get("Authorization"), Equals, `AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request,SignedHeaders=date;host,Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726`)
}
func (s *SigningSuite) TestV4MoreCompleteSignature(c *C) {
req, err := http.NewRequest("GET", "https://examplebucket.s3.amazonaws.com/test.txt", nil)
c.Assert(err, IsNil)
req.Header.Set("x-amz-date", "20130524T000000Z")
req.Header.Set("Range", "bytes=0-9")
testAuth := Auth{
AccessKey: "AKIAIOSFODNN7EXAMPLE",
SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
}
err = SignV4(req, testAuth, USEast.Name, "s3")
c.Assert(err, IsNil)
c.Check(req.Header.Get("Authorization"), Equals, "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request, SignedHeaders=host;range;x-amz-content-sha256;x-amz-date, Signature=f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41")
}
//
// v2 Tests
//
func (s *SigningSuite) TestV2BasicSignature(c *C) {
req, err := http.NewRequest("GET", "http://localhost/path", nil)
c.Assert(err, IsNil)
SignV2(req, testAuth)
query := req.URL.Query()
c.Assert(query.Get("SignatureVersion"), Equals, "2")
c.Assert(query.Get("SignatureMethod"), Equals, "HmacSHA256")
expected := "6lSe5QyXum0jMVc7cOUz32/52ZnL7N5RyKRk/09yiK4="
c.Assert(query.Get("Signature"), Equals, expected)
}
func (s *SigningSuite) TestV2ParamSignature(c *C) {
req, err := http.NewRequest("GET", "http://localhost/path", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
for i := 1; i <= 3; i++ {
query.Add(fmt.Sprintf("param%d", i), fmt.Sprintf("value%d", i))
}
req.URL.RawQuery = query.Encode()
SignV2(req, testAuth)
expected := "XWOR4+0lmK8bD8CGDGZ4kfuSPbb2JibLJiCl/OPu1oU="
c.Assert(req.URL.Query().Get("Signature"), Equals, expected)
}
func (s *SigningSuite) TestV2ManyParams(c *C) {
req, err := http.NewRequest("GET", "http://localhost/path", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
orderedVals := []int{10, 2, 3, 4, 5, 6, 7, 8, 9, 1}
for i, val := range orderedVals {
query.Add(fmt.Sprintf("param%d", i+1), fmt.Sprintf("value%d", val))
}
req.URL.RawQuery = query.Encode()
SignV2(req, testAuth)
expected := "di0sjxIvezUgQ1SIL6i+C/H8lL+U0CQ9frLIak8jkVg="
c.Assert(req.URL.Query().Get("Signature"), Equals, expected)
}
func (s *SigningSuite) TestV2Escaping(c *C) {
req, err := http.NewRequest("GET", "http://localhost/path", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
query.Add("Nonce", "+ +")
req.URL.RawQuery = query.Encode()
err = SignV2(req, testAuth)
c.Assert(err, IsNil)
query = req.URL.Query()
c.Assert(query.Get("Nonce"), Equals, "+ +")
expected := "bqffDELReIqwjg/W0DnsnVUmfLK4wXVLO4/LuG+1VFA="
c.Assert(query.Get("Signature"), Equals, expected)
}
func (s *SigningSuite) TestV2SignatureExample1(c *C) {
req, err := http.NewRequest("GET", "http://sdb.amazonaws.com/", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
query.Add("Timestamp", "2009-02-01T12:53:20+00:00")
query.Add("Version", "2007-11-07")
query.Add("Action", "ListDomains")
req.URL.RawQuery = query.Encode()
SignV2(req, Auth{"access", "secret"})
expected := "okj96/5ucWBSc1uR2zXVfm6mDHtgfNv657rRtt/aunQ="
c.Assert(req.URL.Query().Get("Signature"), Equals, expected)
}
// Tests example from:
// http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
// Specifically, good for testing case when URL does not contain a /
func (s *SigningSuite) TestV2SignatureTutorialExample(c *C) {
req, err := http.NewRequest("GET", "https://elasticmapreduce.amazonaws.com/", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
query.Add("Timestamp", "2011-10-03T15:19:30")
query.Add("Version", "2009-03-31")
query.Add("Action", "DescribeJobFlows")
req.URL.RawQuery = query.Encode()
testAuth := Auth{"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}
err = SignV2(req, testAuth)
c.Assert(err, IsNil)
c.Assert(req.URL.Query().Get("Signature"), Equals, "i91nKc4PWAt0JJIdXwz9HxZCJDdiy6cf/Mj6vPxyYIs=")
}
// https://bugs.launchpad.net/goamz/+bug/1022749
func (s *SigningSuite) TestSignatureWithEndpointPath(c *C) {
req, err := http.NewRequest("GET", "http://localhost:4444/services/Cloud", nil)
c.Assert(err, IsNil)
queryStr := req.URL.Query()
queryStr.Add("Action", "RebootInstances")
queryStr.Add("Version", "2011-12-15")
queryStr.Add("InstanceId.1", "i-10a64379")
queryStr.Add("Timestamp", time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).In(time.UTC).Format(time.RFC3339))
req.URL.RawQuery = queryStr.Encode()
err = SignV2(req, Auth{"abc", "123"})
c.Assert(err, IsNil)
c.Assert(req.URL.Query().Get("Signature"), Equals, "gdG/vEm+c6ehhhfkrJy3+wuVzw/rzKR42TYelMwti7M=")
err = req.ParseForm()
c.Assert(err, IsNil)
c.Assert(req.Form["Signature"], DeepEquals, []string{"gdG/vEm+c6ehhhfkrJy3+wuVzw/rzKR42TYelMwti7M="})
}

View File

@ -1,33 +0,0 @@
package s3
import (
"net/http"
"gopkg.in/amz.v3/aws"
)
var originalStrategy = attempts
func BuildError(resp *http.Response) error {
return buildError(resp)
}
func SetAttemptStrategy(s *aws.AttemptStrategy) {
if s == nil {
attempts = originalStrategy
} else {
attempts = *s
}
}
func AttemptStrategy() aws.AttemptStrategy {
return attempts
}
func SetListPartsMax(n int) {
listPartsMax = n
}
func SetListMultiMax(n int) {
listMultiMax = n
}

View File

@ -1,502 +0,0 @@
package s3
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"errors"
"io"
"net/http"
"sort"
"strconv"
)
// Multi represents an unfinished multipart upload.
//
// Multipart uploads allow sending big objects in smaller chunks.
// After all parts have been sent, the upload must be explicitly
// completed by calling Complete with the list of parts.
//
// See http://goo.gl/vJfTG for an overview of multipart uploads.
type Multi struct {
Bucket *Bucket
Key string
UploadId string
}
// That's the default. Here just for testing.
var listMultiMax = 1000
type listMultiResp struct {
NextKeyMarker string
NextUploadIdMarker string
IsTruncated bool
Upload []Multi
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
}
// ListMulti returns the list of unfinished multipart uploads in b.
//
// The prefix parameter limits the response to keys that begin with the
// specified prefix. You can use prefixes to separate a bucket into different
// groupings of keys (to get the feeling of folders, for example).
//
// The delim parameter causes the response to group all of the keys that
// share a common prefix up to the next delimiter in a single entry within
// the CommonPrefixes field. You can use delimiters to separate a bucket
// into different groupings of keys, similar to how folders would work.
//
// See http://goo.gl/ePioY for details.
func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {
req, err := http.NewRequest("GET", b.Region.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return nil, nil, err
}
query := req.URL.Query()
query.Add("uploads", "")
query.Add("max-uploads", strconv.FormatInt(int64(listMultiMax), 10))
query.Add("prefix", prefix)
query.Add("delimiter", delim)
req.URL.RawQuery = query.Encode()
addAmazonDateHeader(req.Header)
// We need to resign every iteration because we're changing variables.
if err := b.S3.Sign(req, b.Auth); err != nil {
return nil, nil, err
}
for attempt := attempts.Start(); attempt.Next(); {
resp, err := requestRetryLoop(req, attempts)
if err == nil {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
err = buildError(resp)
}
}
if err != nil {
if shouldRetry(err) && attempt.HasNext() {
continue
}
return nil, nil, err
}
var multiResp listMultiResp
if err := xml.NewDecoder(resp.Body).Decode(&multiResp); err != nil {
return nil, nil, err
}
resp.Body.Close()
for i := range multiResp.Upload {
multi := &multiResp.Upload[i]
multi.Bucket = b
multis = append(multis, multi)
}
prefixes = append(prefixes, multiResp.CommonPrefixes...)
if !multiResp.IsTruncated {
return multis, prefixes, nil
}
query := req.URL.Query()
query.Set("key-marker", multiResp.NextKeyMarker)
query.Set("upload-id-marker", multiResp.NextUploadIdMarker)
req.URL.RawQuery = query.Encode()
// Last request worked; restart our counter.
attempt = attempts.Start()
}
panic("unreachable")
}
// Multi returns a multipart upload handler for the provided key
// inside b. If a multipart upload exists for key, it is returned,
// otherwise a new multipart upload is initiated with contType and perm.
func (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) {
multis, _, err := b.ListMulti(key, "")
if err != nil && !hasCode(err, "NoSuchUpload") {
return nil, err
}
for _, m := range multis {
if m.Key == key {
return m, nil
}
}
return b.InitMulti(key, contType, perm)
}
// InitMulti initializes a new multipart upload at the provided
// key inside b and returns a value for manipulating it.
//
// See http://goo.gl/XP8kL for details.
func (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) {
req, err := http.NewRequest("POST", b.Region.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return nil, err
}
req.URL.Path += key
query := req.URL.Query()
query.Add("uploads", "")
req.URL.RawQuery = query.Encode()
req.Header.Add("Content-Type", contType)
req.Header.Add("Content-Length", "0")
req.Header.Add("x-amz-acl", string(perm))
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return nil, err
}
resp, err := requestRetryLoop(req, attempts)
if err == nil {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
err = buildError(resp)
}
}
if err != nil {
return nil, err
}
var multiResp struct {
UploadId string `xml:"UploadId"`
}
if err := xml.NewDecoder(resp.Body).Decode(&multiResp); err != nil {
return nil, err
}
return &Multi{Bucket: b, Key: key, UploadId: multiResp.UploadId}, nil
}
// PutPart sends part n of the multipart upload, reading all the content from r.
// Each part, except for the last one, must be at least 5MB in size.
//
// See http://goo.gl/pqZer for details.
func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
partSize, _, md5b64, err := seekerInfo(r)
if err != nil {
return Part{}, err
}
return m.putPart(n, r, partSize, md5b64)
}
func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) {
_, err := r.Seek(0, 0)
if err != nil {
return Part{}, err
}
req, err := http.NewRequest("PUT", m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name), r)
if err != nil {
return Part{}, err
}
req.Close = true
req.URL.Path += m.Key
req.ContentLength = partSize
query := req.URL.Query()
query.Add("uploadId", m.UploadId)
query.Add("partNumber", strconv.FormatInt(int64(n), 10))
req.URL.RawQuery = query.Encode()
req.Header.Add("Content-MD5", md5b64)
addAmazonDateHeader(req.Header)
if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil {
return Part{}, err
}
// Signing may read the request body.
if _, err := r.Seek(0, 0); err != nil {
return Part{}, err
}
resp, err := requestRetryLoop(req, attempts)
defer resp.Body.Close()
if err != nil {
return Part{}, err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return Part{}, buildError(resp)
}
part := Part{n, resp.Header.Get("ETag"), partSize}
if part.ETag == "" {
return Part{}, errors.New("part upload succeeded with no ETag")
}
return part, nil
}
func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {
_, err = r.Seek(0, 0)
if err != nil {
return 0, "", "", err
}
digest := md5.New()
size, err = io.Copy(digest, r)
if err != nil {
return 0, "", "", err
}
sum := digest.Sum(nil)
md5hex = hex.EncodeToString(sum)
md5b64 = base64.StdEncoding.EncodeToString(sum)
return size, md5hex, md5b64, nil
}
type Part struct {
N int `xml:"PartNumber"`
ETag string
Size int64
}
type partSlice []Part
func (s partSlice) Len() int { return len(s) }
func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N }
func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
type listPartsResp struct {
NextPartNumberMarker string
IsTruncated bool
Part []Part
}
// That's the default. Here just for testing.
var listPartsMax = 1000
// ListParts returns the list of previously uploaded parts in m,
// ordered by part number.
//
// See http://goo.gl/ePioY for details.
func (m *Multi) ListParts() ([]Part, error) {
req, err := http.NewRequest("GET", m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name), nil)
if err != nil {
return nil, err
}
req.Close = true
req.URL.Path += m.Key
query := req.URL.Query()
query.Add("uploadId", m.UploadId)
query.Add("max-parts", strconv.FormatInt(int64(listPartsMax), 10))
req.URL.RawQuery = query.Encode()
var parts partSlice
for attempt := attempts.Start(); attempt.Next(); {
addAmazonDateHeader(req.Header)
// We need to resign every iteration because we're changing the URL.
if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil {
return nil, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
} else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
err = buildError(resp)
}
if err != nil {
if shouldRetry(err) && attempt.HasNext() {
continue
}
return nil, err
}
var listResp listPartsResp
if err := xml.NewDecoder(resp.Body).Decode(&listResp); err != nil {
return nil, err
}
parts = append(parts, listResp.Part...)
if listResp.IsTruncated == false {
break
}
query.Set("part-number-marker", listResp.NextPartNumberMarker)
req.URL.RawQuery = query.Encode()
// Last request worked; restart our counter.
attempt = attempts.Start()
}
sort.Sort(parts)
return parts, nil
}
type ReaderAtSeeker interface {
io.ReaderAt
io.ReadSeeker
}
// PutAll sends all of r via a multipart upload with parts no larger
// than partSize bytes, which must be set to at least 5MB.
// Parts previously uploaded are either reused if their checksum
// and size match the new part, or otherwise overwritten with the
// new content.
// PutAll returns all the parts of m (reused or not).
func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {
old, err := m.ListParts()
if err != nil && !hasCode(err, "NoSuchUpload") {
return nil, err
}
reuse := 0 // Index of next old part to consider reusing.
current := 1 // Part number of latest good part handled.
totalSize, err := r.Seek(0, 2)
if err != nil {
return nil, err
}
first := true // Must send at least one empty part if the file is empty.
var result []Part
NextSection:
for offset := int64(0); offset < totalSize || first; offset += partSize {
first = false
if offset+partSize > totalSize {
partSize = totalSize - offset
}
section := io.NewSectionReader(r, offset, partSize)
_, md5hex, md5b64, err := seekerInfo(section)
if err != nil {
return nil, err
}
for reuse < len(old) && old[reuse].N <= current {
// Looks like this part was already sent.
part := &old[reuse]
etag := `"` + md5hex + `"`
if part.N == current && part.Size == partSize && part.ETag == etag {
// Checksum matches. Reuse the old part.
result = append(result, *part)
current++
continue NextSection
}
reuse++
}
// Part wasn't found or doesn't match. Send it.
part, err := m.putPart(current, section, partSize, md5b64)
if err != nil {
return nil, err
}
result = append(result, part)
current++
}
return result, nil
}
type completeUpload struct {
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Parts completeParts `xml:"Part"`
}
type completePart struct {
PartNumber int
ETag string
}
type completeParts []completePart
func (p completeParts) Len() int { return len(p) }
func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber }
func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Complete assembles the given previously uploaded parts into the
// final object. This operation may take several minutes.
//
// See http://goo.gl/2Z7Tw for details.
func (m *Multi) Complete(parts []Part) error {
var c completeUpload
for _, p := range parts {
c.Parts = append(c.Parts, completePart{p.N, p.ETag})
}
sort.Sort(c.Parts)
data, err := xml.Marshal(&c)
if err != nil {
return err
}
body := bytes.NewReader(data)
req, err := http.NewRequest(
"POST",
m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name),
body,
)
if err != nil {
return err
}
req.Close = true
req.ContentLength = int64(len(data))
req.URL.Path += m.Key
query := req.URL.Query()
query.Add("uploadId", m.UploadId)
req.URL.RawQuery = query.Encode()
addAmazonDateHeader(req.Header)
if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil {
return err
}
// Signing may read the request body.
if _, err := body.Seek(0, 0); err != nil {
return err
}
resp, err := requestRetryLoop(req, attempts)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return buildError(resp)
}
return nil
}
// Abort deletes an unifinished multipart upload and any previously
// uploaded parts for it.
//
// After a multipart upload is aborted, no additional parts can be
// uploaded using it. However, if any part uploads are currently in
// progress, those part uploads might or might not succeed. As a result,
// it might be necessary to abort a given multipart upload multiple
// times in order to completely free all storage consumed by all parts.
//
// NOTE: If the described scenario happens to you, please report back to
// the goamz authors with details. In the future such retrying should be
// handled internally, but it's not clear what happens precisely (Is an
// error returned? Is the issue completely undetectable?).
//
// See http://goo.gl/dnyJw for details.
func (m *Multi) Abort() error {
req, err := http.NewRequest("DELETE", m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name), nil)
if err != nil {
return nil
}
req.URL.Path += m.Key
query := req.URL.Query()
query.Add("uploadId", m.UploadId)
req.URL.RawQuery = query.Encode()
addAmazonDateHeader(req.Header)
if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil {
return err
}
_, err = requestRetryLoop(req, attempts)
return err
}

View File

@ -1,383 +0,0 @@
package s3_test
import (
"encoding/xml"
"io"
"io/ioutil"
"strings"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/s3"
)
func (s *S) TestInitMulti(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"text/plain"})
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--")
}
func (s *S) TestMultiNoPreviousUpload(c *C) {
// Don't retry the NoSuchUpload error.
s3.RetryAttempts(false)
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, nil, InitMultiResultDump)
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.Multi("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(req.Form["prefix"], DeepEquals, []string{"multi"})
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--")
}
func (s *S) TestMultiReturnOld(c *C) {
testServer.Response(200, nil, ListMultiResultDump)
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.Multi("multi1", "text/plain", s3.Private)
c.Assert(err, IsNil)
c.Assert(multi.Key, Equals, "multi1")
c.Assert(multi.UploadId, Equals, "iUVug89pPvSswrikD")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(req.Form["prefix"], DeepEquals, []string{"multi1"})
}
func (s *S) TestListParts(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, nil, ListPartsResultDump1)
testServer.Response(404, nil, NoSuchUploadErrorDump) // :-(
testServer.Response(200, nil, ListPartsResultDump2)
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
parts, err := multi.ListParts()
c.Assert(err, IsNil)
c.Assert(parts, HasLen, 3)
c.Assert(parts[0].N, Equals, 1)
c.Assert(parts[0].Size, Equals, int64(5))
c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
c.Assert(parts[1].N, Equals, 2)
c.Assert(parts[1].Size, Equals, int64(5))
c.Assert(parts[1].ETag, Equals, `"d067a0fa9dc61a6e7195ca99696b5a89"`)
c.Assert(parts[2].N, Equals, 3)
c.Assert(parts[2].Size, Equals, int64(5))
c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`)
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"})
testServer.WaitRequest() // The internal error.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"})
c.Assert(req.Form["part-number-marker"], DeepEquals, []string{"2"})
}
func (s *S) TestPutPart(c *C) {
headers := map[string]string{
"ETag": `"26f90efd10d614f100252ff56d88dad8"`,
}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, headers, "")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
part, err := multi.PutPart(1, strings.NewReader("<part 1>"))
c.Assert(err, IsNil)
c.Assert(part.N, Equals, 1)
c.Assert(part.Size, Equals, int64(8))
c.Assert(part.ETag, Equals, headers["ETag"])
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"8"})
c.Assert(req.Header["Content-Md5"], DeepEquals, []string{"JvkO/RDWFPEAJS/1bYja2A=="})
}
func readAll(r io.Reader) string {
data, err := ioutil.ReadAll(r)
if err != nil {
panic(err)
}
return string(data)
}
func (s *S) TestPutAllNoPreviousUpload(c *C) {
// Don't retry the NoSuchUpload error.
s3.RetryAttempts(false)
etag1 := map[string]string{"ETag": `"etag1"`}
etag2 := map[string]string{"ETag": `"etag2"`}
etag3 := map[string]string{"ETag": `"etag3"`}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, etag1, "")
testServer.Response(200, etag2, "")
testServer.Response(200, etag3, "")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5)
c.Assert(err, IsNil)
c.Assert(parts, HasLen, 3)
c.Check(parts[0].ETag, Equals, `"etag1"`)
c.Check(parts[1].ETag, Equals, `"etag2"`)
c.Check(parts[2].ETag, Equals, `"etag3"`)
// Init
testServer.WaitRequest()
// List old parts. Won't find anything.
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
// Send part 1.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
c.Assert(readAll(req.Body), Equals, "part1")
// Send part 2.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
c.Assert(readAll(req.Body), Equals, "part2")
// Send part 3 with shorter body.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"3"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"4"})
c.Assert(readAll(req.Body), Equals, "last")
}
func (s *S) TestPutAllZeroSizeFile(c *C) {
// Don't retry the NoSuchUpload error.
s3.RetryAttempts(false)
etag1 := map[string]string{"ETag": `"etag1"`}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, etag1, "")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
// Must send at least one part, so that completing it will work.
parts, err := multi.PutAll(strings.NewReader(""), 5)
c.Assert(parts, HasLen, 1)
c.Assert(parts[0].ETag, Equals, `"etag1"`)
c.Assert(err, IsNil)
// Init
testServer.WaitRequest()
// List old parts. Won't find anything.
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
// Send empty part.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"0"})
c.Assert(readAll(req.Body), Equals, "")
}
func (s *S) TestPutAllResume(c *C) {
etag2 := map[string]string{"ETag": `"etag2"`}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, nil, ListPartsResultDump1)
testServer.Response(200, nil, ListPartsResultDump2)
testServer.Response(200, etag2, "")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
// "part1" and "part3" match the checksums in ResultDump1.
// The middle one is a mismatch (it refers to "part2").
parts, err := multi.PutAll(strings.NewReader("part1partXpart3"), 5)
c.Assert(parts, HasLen, 3)
c.Assert(parts[0].N, Equals, 1)
c.Assert(parts[0].Size, Equals, int64(5))
c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
c.Assert(parts[1].N, Equals, 2)
c.Assert(parts[1].Size, Equals, int64(5))
c.Assert(parts[1].ETag, Equals, `"etag2"`)
c.Assert(parts[2].N, Equals, 3)
c.Assert(parts[2].Size, Equals, int64(5))
c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`)
c.Assert(err, IsNil)
// Init
testServer.WaitRequest()
// List old parts, broken in two requests.
for i := 0; i < 2; i++ {
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
}
// Send part 2, as it didn't match the checksum.
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
c.Assert(readAll(req.Body), Equals, "partX")
}
func (s *S) TestMultiComplete(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
// Note the 200 response. Completing will hold the connection on some
// kind of long poll, and may return a late error even after a 200.
testServer.Response(200, nil, InternalErrorDump)
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}})
c.Assert(err, IsNil)
// Grab the 2nd request.
req := testServer.WaitRequests(2)[1]
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
var payload struct {
XMLName xml.Name
Part []struct {
PartNumber int
ETag string
}
}
err = xml.NewDecoder(req.Body).Decode(&payload)
c.Assert(err, IsNil)
c.Assert(payload.XMLName.Local, Equals, "CompleteMultipartUpload")
c.Assert(len(payload.Part), Equals, 2)
c.Assert(payload.Part[0].PartNumber, Equals, 1)
c.Assert(payload.Part[0].ETag, Equals, `"ETag1"`)
c.Assert(payload.Part[1].PartNumber, Equals, 2)
c.Assert(payload.Part[1].ETag, Equals, `"ETag2"`)
}
func (s *S) TestMultiAbort(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
err = multi.Abort()
c.Assert(err, IsNil)
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
}
func (s *S) TestListMulti(c *C) {
testServer.Response(200, nil, ListMultiResultDump)
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multis, prefixes, err := b.ListMulti("", "/")
c.Assert(err, IsNil)
c.Assert(prefixes, DeepEquals, []string{"a/", "b/"})
c.Assert(multis, HasLen, 2)
c.Assert(multis[0].Key, Equals, "multi1")
c.Assert(multis[0].UploadId, Equals, "iUVug89pPvSswrikD")
c.Assert(multis[1].Key, Equals, "multi2")
c.Assert(multis[1].UploadId, Equals, "DkirwsSvPp98guVUi")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(req.Form["prefix"], DeepEquals, []string{""})
c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"})
c.Assert(req.Form["max-uploads"], DeepEquals, []string{"1000"})
}

View File

@ -1,198 +0,0 @@
package s3_test
var GetObjectErrorDump = `
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>NoSuchBucket</Code><Message>The specified bucket does not exist</Message>
<BucketName>non-existent-bucket</BucketName><RequestId>3F1B667FAD71C3D8</RequestId>
<HostId>L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D</HostId></Error>
`
var GetListResultDump1 = `
<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>quotes</Name>
<Prefix>N</Prefix>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>Nelson</Key>
<LastModified>2006-01-01T12:00:00.000Z</LastModified>
<ETag>&quot;828ef3fdfa96f00ad9f27c383fc9ac7f&quot;</ETag>
<Size>5</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>bcaf161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>Neo</Key>
<LastModified>2006-01-01T12:00:00.000Z</LastModified>
<ETag>&quot;828ef3fdfa96f00ad9f27c383fc9ac7f&quot;</ETag>
<Size>4</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>bcaf1ffd86a5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
</ListBucketResult>
`
var GetListResultDump2 = `
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>example-bucket</Name>
<Prefix>photos/2006/</Prefix>
<Marker>some-marker</Marker>
<MaxKeys>1000</MaxKeys>
<Delimiter>/</Delimiter>
<IsTruncated>false</IsTruncated>
<CommonPrefixes>
<Prefix>photos/2006/feb/</Prefix>
</CommonPrefixes>
<CommonPrefixes>
<Prefix>photos/2006/jan/</Prefix>
</CommonPrefixes>
</ListBucketResult>
`
var InitMultiResultDump = `
<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>sample</Bucket>
<Key>multi</Key>
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
</InitiateMultipartUploadResult>
`
var ListPartsResultDump1 = `
<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>sample</Bucket>
<Key>multi</Key>
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<PartNumberMarker>0</PartNumberMarker>
<NextPartNumberMarker>2</NextPartNumberMarker>
<MaxParts>2</MaxParts>
<IsTruncated>true</IsTruncated>
<Part>
<PartNumber>1</PartNumber>
<LastModified>2013-01-30T13:45:51.000Z</LastModified>
<ETag>&quot;ffc88b4ca90a355f8ddba6b2c3b2af5c&quot;</ETag>
<Size>5</Size>
</Part>
<Part>
<PartNumber>2</PartNumber>
<LastModified>2013-01-30T13:45:52.000Z</LastModified>
<ETag>&quot;d067a0fa9dc61a6e7195ca99696b5a89&quot;</ETag>
<Size>5</Size>
</Part>
</ListPartsResult>
`
var ListPartsResultDump2 = `
<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>sample</Bucket>
<Key>multi</Key>
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<PartNumberMarker>2</PartNumberMarker>
<NextPartNumberMarker>3</NextPartNumberMarker>
<MaxParts>2</MaxParts>
<IsTruncated>false</IsTruncated>
<Part>
<PartNumber>3</PartNumber>
<LastModified>2013-01-30T13:46:50.000Z</LastModified>
<ETag>&quot;49dcd91231f801159e893fb5c6674985&quot;</ETag>
<Size>5</Size>
</Part>
</ListPartsResult>
`
var ListMultiResultDump = `
<?xml version="1.0"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>goamz-test-bucket-us-east-1-akiajk3wyewhctyqbf7a</Bucket>
<KeyMarker/>
<UploadIdMarker/>
<NextKeyMarker>multi1</NextKeyMarker>
<NextUploadIdMarker>iUVug89pPvSswrikD72p8uO62EzhNtpDxRmwC5WSiWDdK9SfzmDqe3xpP1kMWimyimSnz4uzFc3waVM5ufrKYQ--</NextUploadIdMarker>
<Delimiter>/</Delimiter>
<MaxUploads>1000</MaxUploads>
<IsTruncated>false</IsTruncated>
<Upload>
<Key>multi1</Key>
<UploadId>iUVug89pPvSswrikD</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>gustavoniemeyer</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>gustavoniemeyer</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2013-01-30T18:15:47.000Z</Initiated>
</Upload>
<Upload>
<Key>multi2</Key>
<UploadId>DkirwsSvPp98guVUi</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>joe</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>joe</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2013-01-30T18:15:47.000Z</Initiated>
</Upload>
<CommonPrefixes>
<Prefix>a/</Prefix>
</CommonPrefixes>
<CommonPrefixes>
<Prefix>b/</Prefix>
</CommonPrefixes>
</ListMultipartUploadsResult>
`
var NoSuchUploadErrorDump = `
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchUpload</Code>
<Message>Not relevant</Message>
<BucketName>sample</BucketName>
<RequestId>3F1B667FAD71C3D8</RequestId>
<HostId>kjhwqk</HostId>
</Error>
`
var InternalErrorDump = `
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>InternalError</Code>
<Message>Not relevant</Message>
<BucketName>sample</BucketName>
<RequestId>3F1B667FAD71C3D8</RequestId>
<HostId>kjhwqk</HostId>
</Error>
`

View File

@ -1,566 +0,0 @@
//
// goamz - Go packages to interact with the Amazon Web Services.
//
// https://wiki.ubuntu.com/goamz
//
// Copyright (c) 2011 Canonical Ltd.
//
package s3
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"gopkg.in/amz.v3/aws"
)
const debug = false
// The S3 type encapsulates operations with an S3 region.
type S3 struct {
aws.Auth
aws.Region
Sign aws.Signer
private byte // Reserve the right of using private data.
}
// The Bucket type encapsulates operations with an S3 bucket.
type Bucket struct {
*S3
Name string
}
// The Owner type represents the owner of the object in an S3 bucket.
type Owner struct {
ID string
DisplayName string
}
var (
attempts = defaultAttempts
defaultAttempts = aws.AttemptStrategy{
Min: 5,
Total: 5 * time.Second,
Delay: 200 * time.Millisecond,
}
)
// RetryAttempts sets whether failing S3 requests may be retried to cope
// with eventual consistency or temporary failures. It should not be
// called while operations are in progress.
func RetryAttempts(retry bool) {
if retry {
attempts = defaultAttempts
} else {
attempts = aws.AttemptStrategy{}
}
}
// New creates a new S3.
func New(auth aws.Auth, region aws.Region) *S3 {
return &S3{auth, region, aws.SignV4Factory(region.Name, "s3"), 0}
}
// Bucket returns a Bucket with the given name.
func (s3 *S3) Bucket(name string) (*Bucket, error) {
if strings.IndexAny(name, "/:@") >= 0 {
return nil, fmt.Errorf("bad S3 bucket: %q", name)
}
if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket {
name = strings.ToLower(name)
}
return &Bucket{s3, name}, nil
}
var createBucketConfiguration = `<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LocationConstraint>%s</LocationConstraint>
</CreateBucketConfiguration>`
// locationConstraint returns a *strings.Reader specifying a
// LocationConstraint if required for the region.
//
// See http://goo.gl/bh9Kq for details.
func (s3 *S3) locationConstraint() *strings.Reader {
constraint := ""
if s3.Region.S3LocationConstraint {
constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name)
}
return strings.NewReader(constraint)
}
type ACL string
const (
Private = ACL("private")
PublicRead = ACL("public-read")
PublicReadWrite = ACL("public-read-write")
AuthenticatedRead = ACL("authenticated-read")
BucketOwnerRead = ACL("bucket-owner-read")
BucketOwnerFull = ACL("bucket-owner-full-control")
)
// Put inserts an object into the S3 bucket.
//
// See http://goo.gl/FEBPD for details.
func (b *Bucket) Put(path string, data []byte, contType string, perm ACL) error {
body := bytes.NewReader(data)
return b.PutReader(path, body, int64(len(data)), contType, perm)
}
// PutBucket creates a new bucket.
//
// See http://goo.gl/ndjnR for details.
func (b *Bucket) PutBucket(perm ACL) error {
body := b.locationConstraint()
req, err := http.NewRequest("PUT", b.ResolveS3BucketEndpoint(b.Name), body)
if err != nil {
return err
}
req.Close = true
addAmazonDateHeader(req.Header)
req.Header.Add("x-amz-acl", string(perm))
if err := b.S3.Sign(req, b.Auth); err != nil {
return err
}
// Signing may read the request body.
if _, err := body.Seek(0, 0); err != nil {
return err
}
_, err = http.DefaultClient.Do(req)
return err
}
// DelBucket removes an existing S3 bucket. All objects in the bucket must
// be removed before the bucket itself can be removed.
//
// See http://goo.gl/GoBrY for details.
func (b *Bucket) DelBucket() (err error) {
req, err := http.NewRequest("DELETE", b.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return err
}
req.Close = true
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return err
}
resp, err := requestRetryLoop(req, attempts)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// Get retrieves an object from an S3 bucket.
//
// See http://goo.gl/isCO7 for details.
func (b *Bucket) Get(path string) (data []byte, err error) {
body, err := b.GetReader(path)
if err != nil {
return nil, err
}
defer body.Close()
return ioutil.ReadAll(body)
}
// GetReader retrieves an object from an S3 bucket. It is the caller's
// responsibility to call Close on rc when finished reading.
func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) {
req, err := http.NewRequest("GET", b.Region.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return nil, err
}
req.Close = true
req.URL.Path += path
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return nil, err
}
resp, err := requestRetryLoop(req, attempts)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return nil, buildError(resp)
}
return resp.Body, nil
}
// PutReader inserts an object into the S3 bucket by consuming data
// from r until EOF. Passing in an io.ReadSeeker for r will optimize
// the memory usage.
func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL) error {
return b.PutReaderWithHeader(path, r, length, contType, perm, http.Header{})
}
// PutReaderWithHeader inserts an object into the S3 bucket by
// consuming data from r until EOF. It also adds the headers provided
// to the request. Passing in an io.ReadSeeker for r will optimize the
// memory usage.
func (b *Bucket) PutReaderWithHeader(path string, r io.Reader, length int64, contType string, perm ACL, hdrs http.Header) error {
// Convert the reader to a ReadSeeker so we can seek after
// signing.
seeker, ok := r.(io.ReadSeeker)
if !ok {
content, err := ioutil.ReadAll(r)
if err != nil {
return err
}
seeker = bytes.NewReader(content)
}
req, err := http.NewRequest("PUT", b.Region.ResolveS3BucketEndpoint(b.Name), seeker)
if err != nil {
return err
}
req.Header = hdrs
req.Close = true
req.URL.Path += path
req.ContentLength = length
req.Header.Add("Content-Type", contType)
req.Header.Add("x-amz-acl", string(perm))
addAmazonDateHeader(req.Header)
// Determine the current offset.
const seekFromPos = 1
prevPos, err := seeker.Seek(0, seekFromPos)
if err != nil {
return err
}
if err := b.S3.Sign(req, b.Auth); err != nil {
return err
}
// Signing may read the request body.
if _, err := seeker.Seek(prevPos, 0); err != nil {
return err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return buildError(resp) // closes body
}
resp.Body.Close()
return nil
}
// Del removes an object from the S3 bucket.
//
// See http://goo.gl/APeTt for details.
func (b *Bucket) Del(path string) error {
req, err := http.NewRequest("DELETE", b.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return err
}
req.Close = true
req.URL.Path += path
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// The ListResp type holds the results of a List bucket operation.
type ListResp struct {
Name string
Prefix string
Delimiter string
Marker string
NextMarker string
MaxKeys int
// IsTruncated is true if the results have been truncated because
// there are more keys and prefixes than can fit in MaxKeys.
// N.B. this is the opposite sense to that documented (incorrectly) in
// http://goo.gl/YjQTc
IsTruncated bool
Contents []Key
CommonPrefixes []string `xml:">Prefix"`
}
// The Key type represents an item stored in an S3 bucket.
type Key struct {
Key string
LastModified string
Size int64
// ETag gives the hex-encoded MD5 sum of the contents,
// surrounded with double-quotes.
ETag string
StorageClass string
Owner Owner
}
// List returns information about objects in an S3 bucket.
//
// The prefix parameter limits the response to keys that begin with the
// specified prefix.
//
// The delim parameter causes the response to group all of the keys that
// share a common prefix up to the next delimiter in a single entry within
// the CommonPrefixes field. You can use delimiters to separate a bucket
// into different groupings of keys, similar to how folders would work.
//
// The marker parameter specifies the key to start with when listing objects
// in a bucket. Amazon S3 lists objects in alphabetical order and
// will return keys alphabetically greater than the marker.
//
// The max parameter specifies how many keys + common prefixes to return in
// the response. The default is 1000.
//
// For example, given these keys in a bucket:
//
// index.html
// index2.html
// photos/2006/January/sample.jpg
// photos/2006/February/sample2.jpg
// photos/2006/February/sample3.jpg
// photos/2006/February/sample4.jpg
//
// Listing this bucket with delimiter set to "/" would yield the
// following result:
//
// &ListResp{
// Name: "sample-bucket",
// MaxKeys: 1000,
// Delimiter: "/",
// Contents: []Key{
// {Key: "index.html", "index2.html"},
// },
// CommonPrefixes: []string{
// "photos/",
// },
// }
//
// Listing the same bucket with delimiter set to "/" and prefix set to
// "photos/2006/" would yield the following result:
//
// &ListResp{
// Name: "sample-bucket",
// MaxKeys: 1000,
// Delimiter: "/",
// Prefix: "photos/2006/",
// CommonPrefixes: []string{
// "photos/2006/February/",
// "photos/2006/January/",
// },
// }
//
// See http://goo.gl/YjQTc for details.
func (b *Bucket) List(prefix, delim, marker string, max int) (*ListResp, error) {
req, err := http.NewRequest("GET", b.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return nil, err
}
req.Close = true
query := req.URL.Query()
query.Add("prefix", prefix)
query.Add("delimiter", delim)
query.Add("marker", marker)
if max != 0 {
query.Add("max-keys", strconv.FormatInt(int64(max), 10))
}
req.URL.RawQuery = query.Encode()
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return nil, err
}
resp, err := requestRetryLoop(req, attempts)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return nil, buildError(resp) // closes body
}
var result ListResp
err = xml.NewDecoder(resp.Body).Decode(&result)
resp.Body.Close()
return &result, nil
}
// URL returns a non-signed URL that allows retriving the
// object at path. It only works if the object is publicly
// readable (see SignedURL).
func (b *Bucket) URL(path string) string {
return b.ResolveS3BucketEndpoint(b.Name) + path
}
// SignedURL returns a URL which can be used to fetch objects without
// signing for the given duration.
func (b *Bucket) SignedURL(path string, expires time.Duration) (string, error) {
req, err := http.NewRequest("GET", b.URL(path), nil)
if err != nil {
return "", err
}
req.Header.Add("date", time.Now().Format(aws.ISO8601BasicFormat))
if err := aws.SignV4URL(req, b.Auth, b.Region.Name, "s3", expires); err != nil {
return "", err
}
return req.URL.String(), nil
}
type request struct {
method string
bucket string
path string
signpath string
params url.Values
headers http.Header
baseurl string
payload io.Reader
prepared bool
}
func (req *request) url() (*url.URL, error) {
u, err := url.Parse(req.baseurl)
if err != nil {
return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
}
u.RawQuery = req.params.Encode()
u.Path = req.path
return u, nil
}
// Error represents an error in an operation with S3.
type Error struct {
StatusCode int // HTTP status code (200, 403, ...)
Code string // EC2 error code ("UnsupportedOperation", ...)
Message string // The human-oriented error message
BucketName string
RequestId string
HostId string
}
func (e *Error) Error() string {
return e.Message
}
func buildError(r *http.Response) error {
if debug {
log.Printf("got error (status code %v)", r.StatusCode)
data, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Printf("\tread error: %v", err)
} else {
log.Printf("\tdata:\n%s\n\n", data)
}
r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
}
err := Error{}
// TODO return error if Unmarshal fails?
xml.NewDecoder(r.Body).Decode(&err)
r.Body.Close()
err.StatusCode = r.StatusCode
if err.Message == "" {
err.Message = r.Status
}
if debug {
log.Printf("err: %#v\n", err)
}
return &err
}
func shouldRetry(err error) bool {
if err == nil {
return false
}
switch err {
case io.ErrUnexpectedEOF, io.EOF:
return true
}
switch e := err.(type) {
case *net.DNSError:
return true
case *net.OpError:
switch e.Op {
case "read", "write":
return true
}
case *Error:
switch e.Code {
case "InternalError", "NoSuchUpload", "NoSuchBucket":
return true
}
}
return false
}
func hasCode(err error, code string) bool {
s3err, ok := err.(*Error)
return ok && s3err.Code == code
}
// requestRetryLoop attempts to send the request until the given
// strategy says to stop.
func requestRetryLoop(req *http.Request, retryStrat aws.AttemptStrategy) (*http.Response, error) {
for attempt := attempts.Start(); attempt.Next(); {
if debug {
log.Printf("Full URL (in loop): %v", req.URL)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
if shouldRetry(err) && attempt.HasNext() {
continue
}
return nil, fmt.Errorf("making request: %v", err)
}
if debug {
log.Printf("Full response (in loop): %v", resp)
}
return resp, nil
}
return nil, fmt.Errorf("could not complete the request within the specified retry attempts")
}
func addAmazonDateHeader(header http.Header) {
header.Set("x-amz-date", time.Now().In(time.UTC).Format(aws.ISO8601BasicFormat))
}

View File

@ -1,321 +0,0 @@
package s3_test
import (
"bytes"
"io/ioutil"
"net/http"
"testing"
"time"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"gopkg.in/amz.v3/testutil"
)
func Test(t *testing.T) {
TestingT(t)
}
type S struct {
s3 *s3.S3
}
var _ = Suite(&S{})
var testServer = testutil.NewHTTPServer()
func (s *S) SetUpSuite(c *C) {
testServer.Start()
s.s3 = s3.New(
aws.Auth{"abc", "123"},
aws.Region{
Name: "faux-region-1",
S3Endpoint: testServer.URL,
},
)
}
func (s *S) TearDownSuite(c *C) {
s3.SetAttemptStrategy(nil)
testServer.Stop()
}
func (s *S) SetUpTest(c *C) {
attempts := aws.AttemptStrategy{
Total: 300 * time.Millisecond,
Delay: 100 * time.Millisecond,
}
s3.SetAttemptStrategy(&attempts)
}
func (s *S) TearDownTest(c *C) {
testServer.Flush()
}
// PutBucket docs: http://goo.gl/kBTCu
func (s *S) TestPutBucket(c *C) {
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
err = b.PutBucket(s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/")
c.Assert(req.Header["Date"], Not(Equals), "")
}
func (s *S) TestURL(c *C) {
testServer.Response(200, nil, "content")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
url := b.URL("name")
r, err := http.Get(url)
c.Assert(err, IsNil)
data, err := ioutil.ReadAll(r.Body)
r.Body.Close()
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
}
// DeleteBucket docs: http://goo.gl/GoBrY
func (s *S) TestDelBucket(c *C) {
testServer.Response(204, nil, "")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
err = b.DelBucket()
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/bucket/")
c.Assert(req.Header["Date"], Not(Equals), "")
}
// GetObject docs: http://goo.gl/isCO7
func (s *S) TestGet(c *C) {
testServer.Response(200, nil, "content")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
data, err := b.Get("name")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
}
func (s *S) TestGetReader(c *C) {
testServer.Response(200, nil, "content")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
rc, err := b.GetReader("name")
c.Assert(err, IsNil)
data, err := ioutil.ReadAll(rc)
rc.Close()
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
}
func (s *S) TestGetNotFound(c *C) {
for i := 0; i < 10; i++ {
testServer.Response(404, nil, GetObjectErrorDump)
}
b, err := s.s3.Bucket("non-existent-bucket")
c.Assert(err, IsNil)
data, err := b.Get("non-existent")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/non-existent-bucket/non-existent")
c.Assert(req.Header["Date"], Not(Equals), "")
s3err, _ := err.(*s3.Error)
c.Assert(s3err, NotNil)
c.Assert(s3err.StatusCode, Equals, 404)
c.Assert(s3err.BucketName, Equals, "non-existent-bucket")
c.Assert(s3err.RequestId, Equals, "3F1B667FAD71C3D8")
c.Assert(s3err.HostId, Equals, "L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D")
c.Assert(s3err.Code, Equals, "NoSuchBucket")
c.Assert(s3err.Message, Equals, "The specified bucket does not exist")
c.Assert(s3err.Error(), Equals, "The specified bucket does not exist")
c.Assert(data, IsNil)
}
// PutObject docs: http://goo.gl/FEBPD
func (s *S) TestPutObject(c *C) {
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
err = b.Put("name", []byte("content"), "content-type", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], DeepEquals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPutReader(c *C) {
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
buf := bytes.NewReader([]byte("content"))
err = b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], Equals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPutReaderWithHeader(c *C) {
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
buf := bytes.NewReader([]byte("content"))
err = b.PutReaderWithHeader("name", buf, int64(buf.Len()), "content-type", s3.Private, http.Header{
"Cache-Control": []string{"max-age=5"},
})
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
c.Assert(req.Header["Cache-Control"], DeepEquals, []string{"max-age=5"})
}
// DelObject docs: http://goo.gl/APeTt
func (s *S) TestDelObject(c *C) {
testServer.Response(200, nil, "")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
err = b.Del("name")
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
}
// Bucket List Objects docs: http://goo.gl/YjQTc
func (s *S) TestList(c *C) {
testServer.Response(200, nil, GetListResultDump1)
b, err := s.s3.Bucket("quotes")
c.Assert(err, IsNil)
data, err := b.List("N", "", "", 0)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/quotes/")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(req.Form["prefix"], DeepEquals, []string{"N"})
c.Assert(req.Form["delimiter"], DeepEquals, []string{""})
c.Assert(req.Form["marker"], DeepEquals, []string{""})
c.Assert(req.Form["max-keys"], DeepEquals, []string(nil))
c.Assert(data.Name, Equals, "quotes")
c.Assert(data.Prefix, Equals, "N")
c.Assert(data.IsTruncated, Equals, false)
c.Assert(len(data.Contents), Equals, 2)
c.Assert(data.Contents[0].Key, Equals, "Nelson")
c.Assert(data.Contents[0].LastModified, Equals, "2006-01-01T12:00:00.000Z")
c.Assert(data.Contents[0].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
c.Assert(data.Contents[0].Size, Equals, int64(5))
c.Assert(data.Contents[0].StorageClass, Equals, "STANDARD")
c.Assert(data.Contents[0].Owner.ID, Equals, "bcaf161ca5fb16fd081034f")
c.Assert(data.Contents[0].Owner.DisplayName, Equals, "webfile")
c.Assert(data.Contents[1].Key, Equals, "Neo")
c.Assert(data.Contents[1].LastModified, Equals, "2006-01-01T12:00:00.000Z")
c.Assert(data.Contents[1].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
c.Assert(data.Contents[1].Size, Equals, int64(4))
c.Assert(data.Contents[1].StorageClass, Equals, "STANDARD")
c.Assert(data.Contents[1].Owner.ID, Equals, "bcaf1ffd86a5fb16fd081034f")
c.Assert(data.Contents[1].Owner.DisplayName, Equals, "webfile")
}
func (s *S) TestListWithDelimiter(c *C) {
testServer.Response(200, nil, GetListResultDump2)
b, err := s.s3.Bucket("quotes")
c.Assert(err, IsNil)
data, err := b.List("photos/2006/", "/", "some-marker", 1000)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/quotes/")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(req.Form["prefix"], DeepEquals, []string{"photos/2006/"})
c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"})
c.Assert(req.Form["marker"], DeepEquals, []string{"some-marker"})
c.Assert(req.Form["max-keys"], DeepEquals, []string{"1000"})
c.Assert(data.Name, Equals, "example-bucket")
c.Assert(data.Prefix, Equals, "photos/2006/")
c.Assert(data.Delimiter, Equals, "/")
c.Assert(data.Marker, Equals, "some-marker")
c.Assert(data.IsTruncated, Equals, false)
c.Assert(len(data.Contents), Equals, 0)
c.Assert(data.CommonPrefixes, DeepEquals, []string{"photos/2006/feb/", "photos/2006/jan/"})
}
func (s *S) TestRetryAttempts(c *C) {
s3.SetAttemptStrategy(nil)
orig := s3.AttemptStrategy()
s3.RetryAttempts(false)
c.Assert(s3.AttemptStrategy(), Equals, aws.AttemptStrategy{})
s3.RetryAttempts(true)
c.Assert(s3.AttemptStrategy(), Equals, orig)
}

View File

@ -1,610 +0,0 @@
package s3_test
import (
"bytes"
"crypto/md5"
"fmt"
"io/ioutil"
"net"
"net/http"
"sort"
"strings"
"time"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"gopkg.in/amz.v3/testutil"
)
// AmazonServer represents an Amazon S3 server.
type AmazonServer struct {
auth aws.Auth
}
func (s *AmazonServer) SetUp(c *C) {
auth, err := aws.EnvAuth()
if err != nil {
c.Fatal(err.Error())
}
s.auth = auth
}
var _ = Suite(&AmazonClientSuite{Region: aws.USEast})
var _ = Suite(&AmazonClientSuite{Region: aws.EUWest})
var _ = Suite(&AmazonDomainClientSuite{Region: aws.USEast})
// AmazonClientSuite tests the client against a live S3 server.
type AmazonClientSuite struct {
aws.Region
srv AmazonServer
ClientTests
}
func (s *AmazonClientSuite) SetUpSuite(c *C) {
if !testutil.Amazon {
c.Skip("live tests against AWS disabled (no -amazon)")
}
s.srv.SetUp(c)
s.s3 = s3.New(s.srv.auth, s.Region)
// In case tests were interrupted in the middle before.
s.ClientTests.Cleanup()
}
func (s *AmazonClientSuite) TearDownTest(c *C) {
s.ClientTests.Cleanup()
}
// AmazonDomainClientSuite tests the client against a live S3
// server using bucket names in the endpoint domain name rather
// than the request path.
type AmazonDomainClientSuite struct {
aws.Region
srv AmazonServer
ClientTests
}
func (s *AmazonDomainClientSuite) SetUpSuite(c *C) {
if !testutil.Amazon {
c.Skip("live tests against AWS disabled (no -amazon)")
}
s.srv.SetUp(c)
region := s.Region
region.S3BucketEndpoint += "https://s3.amazonaws.com/${bucket}/"
s.s3 = s3.New(s.srv.auth, region)
s.ClientTests.Cleanup()
}
func (s *AmazonDomainClientSuite) TearDownTest(c *C) {
s.ClientTests.Cleanup()
}
// ClientTests defines integration tests designed to test the client.
// It is not used as a test suite in itself, but embedded within
// another type.
type ClientTests struct {
s3 *s3.S3
}
func (s *ClientTests) Cleanup() {
killBucket(testBucket(s.s3))
}
func testBucket(s *s3.S3) *s3.Bucket {
// Watch out! If this function is corrupted and made to match with something
// people own, killBucket will happily remove *everything* inside the bucket.
key := s.Auth.AccessKey
if len(key) >= 8 {
key = s.Auth.AccessKey[:8]
}
b, err := s.Bucket(strings.ToLower(fmt.Sprintf(
"goamz-%s-%s-%s",
s.Region.Name,
key,
// Add in the time element to help isolate tests from one
// another.
time.Now().Format("20060102T150405.999999999"),
)))
if err != nil {
panic(err)
}
return b
}
var attempts = aws.AttemptStrategy{
Min: 5,
Total: 20 * time.Second,
Delay: 100 * time.Millisecond,
}
func killBucket(b *s3.Bucket) {
var err error
for attempt := attempts.Start(); attempt.Next(); {
err = b.DelBucket()
if err == nil {
return
}
if _, ok := err.(*net.DNSError); ok {
return
}
e, ok := err.(*s3.Error)
if ok && e.Code == "NoSuchBucket" {
return
}
if ok && e.Code == "BucketNotEmpty" {
// Errors are ignored here. Just retry.
resp, err := b.List("", "", "", 1000)
if err == nil {
for _, key := range resp.Contents {
_ = b.Del(key.Key)
}
}
multis, _, _ := b.ListMulti("", "")
for _, m := range multis {
_ = m.Abort()
}
}
}
message := "cannot delete test bucket"
if err != nil {
message += ": " + err.Error()
}
panic(message)
}
func (s *ClientTests) TestSignedUrl(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
c.Assert(err, IsNil)
s.testSignedUrl(c, b, "name")
// Test that various special characters get escaped properly.
s.testSignedUrl(c, b, "&@$=:,!-_.*'( )")
}
func (s *ClientTests) testSignedUrl(c *C, b *s3.Bucket, name string) {
err := b.Put(name, []byte("test for signed URLs."), "text/plain", s3.Private)
c.Assert(err, IsNil)
defer b.Del(name)
req, err := http.NewRequest("GET", b.URL(name), nil)
c.Assert(err, IsNil)
resp, err := http.DefaultClient.Do(req)
c.Assert(err, IsNil)
err = s3.BuildError(resp)
c.Check(err, NotNil)
c.Check(err.(*s3.Error).Code, Equals, "AccessDenied")
url, err := b.SignedURL(name, 24*time.Hour)
c.Assert(err, IsNil)
req, err = http.NewRequest("GET", url, nil)
c.Assert(err, IsNil)
resp, err = http.DefaultClient.Do(req)
c.Assert(err, IsNil)
body, err := ioutil.ReadAll(resp.Body)
c.Assert(err, IsNil)
c.Check(string(body), Equals, "test for signed URLs.")
}
func (s *ClientTests) TestBasicFunctionality(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
c.Assert(err, IsNil)
err = b.Put("name", []byte("yo!"), "text/plain", s3.PublicRead)
c.Assert(err, IsNil)
defer b.Del("name")
data, err := b.Get("name")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "yo!")
buf := bytes.NewReader([]byte("hey!"))
err = b.PutReader("name2", buf, int64(buf.Len()), "text/plain", s3.Private)
c.Assert(err, IsNil)
defer b.Del("name2")
rc, err := b.GetReader("name2")
c.Assert(err, IsNil)
data, err = ioutil.ReadAll(rc)
c.Check(err, IsNil)
c.Check(string(data), Equals, "hey!")
rc.Close()
data, err = b.Get("name2")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "hey!")
err = b.Del("name")
c.Assert(err, IsNil)
err = b.Del("name2")
c.Assert(err, IsNil)
err = b.DelBucket()
c.Assert(err, IsNil)
}
func (s *ClientTests) TestGetNotFound(c *C) {
b, err := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey)
c.Assert(err, IsNil)
data, err := b.Get("non-existent")
s3err, _ := err.(*s3.Error)
c.Assert(s3err, NotNil)
c.Assert(s3err.StatusCode, Equals, 404)
c.Assert(s3err.Code, Equals, "NoSuchBucket")
c.Assert(s3err.Message, Equals, "The specified bucket does not exist")
c.Assert(data, IsNil)
}
// Communicate with all endpoints to see if they are alive.
func (s *ClientTests) TestRegions(c *C) {
type result struct {
aws.Region
error
}
results := make(chan result, len(aws.Regions))
for _, region := range aws.Regions {
go func(r aws.Region) {
s := s3.New(s.s3.Auth, r)
b, err := s.Bucket("goamz-" + s.Auth.AccessKey)
if !c.Check(err, IsNil) {
return
}
_, err = b.Get("non-existent")
if !c.Check(err, NotNil) {
return
}
results <- result{r, err}
}(region)
}
for _ = range aws.Regions {
result := <-results
if s3_err, ok := result.error.(*s3.Error); ok {
if result.Region == aws.CNNorth && s3_err.Code == "InvalidAccessKeyId" {
c.Log("You must utilize an account specifically for CNNorth.")
continue
}
c.Check(s3_err.Code, Matches, "NoSuchBucket")
} else if _, ok = result.error.(*net.DNSError); ok {
// Okay as well.
} else {
c.Errorf("Non-S3 error: %s", result.error)
}
}
}
var objectNames = []string{
"index.html",
"index2.html",
"photos/2006/February/sample2.jpg",
"photos/2006/February/sample3.jpg",
"photos/2006/February/sample4.jpg",
"photos/2006/January/sample.jpg",
"test/bar",
"test/foo",
}
func keys(names ...string) []s3.Key {
ks := make([]s3.Key, len(names))
for i, name := range names {
ks[i].Key = name
}
return ks
}
// As the ListResp specifies all the parameters to the
// request too, we use it to specify request parameters
// and expected results. The Contents field is
// used only for the key names inside it.
var listTests = []s3.ListResp{
// normal list.
{
Contents: keys(objectNames...),
}, {
Marker: objectNames[0],
Contents: keys(objectNames[1:]...),
}, {
Marker: objectNames[0] + "a",
Contents: keys(objectNames[1:]...),
}, {
Marker: "z",
},
// limited results.
{
MaxKeys: 2,
Contents: keys(objectNames[0:2]...),
IsTruncated: true,
}, {
MaxKeys: 2,
Marker: objectNames[0],
Contents: keys(objectNames[1:3]...),
IsTruncated: true,
}, {
MaxKeys: 2,
Marker: objectNames[len(objectNames)-2],
Contents: keys(objectNames[len(objectNames)-1:]...),
},
// with delimiter
{
Delimiter: "/",
CommonPrefixes: []string{"photos/", "test/"},
Contents: keys("index.html", "index2.html"),
}, {
Delimiter: "/",
Prefix: "photos/2006/",
CommonPrefixes: []string{"photos/2006/February/", "photos/2006/January/"},
}, {
Delimiter: "/",
Prefix: "t",
CommonPrefixes: []string{"test/"},
}, {
Delimiter: "/",
MaxKeys: 1,
Contents: keys("index.html"),
IsTruncated: true,
}, {
Delimiter: "/",
MaxKeys: 1,
Marker: "index2.html",
CommonPrefixes: []string{"photos/"},
IsTruncated: true,
}, {
Delimiter: "/",
MaxKeys: 1,
Marker: "photos/",
CommonPrefixes: []string{"test/"},
IsTruncated: false,
}, {
Delimiter: "Feb",
CommonPrefixes: []string{"photos/2006/Feb"},
Contents: keys("index.html", "index2.html", "photos/2006/January/sample.jpg", "test/bar", "test/foo"),
},
}
func (s *ClientTests) TestDoublePutBucket(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
c.Assert(err, IsNil)
err = b.PutBucket(s3.PublicRead)
if err != nil {
c.Assert(err, FitsTypeOf, new(s3.Error))
c.Assert(err.(*s3.Error).Code, Equals, "BucketAlreadyOwnedByYou")
}
}
func (s *ClientTests) TestBucketList(c *C) {
b := testBucket(s.s3)
defer b.DelBucket()
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
objData := make(map[string][]byte)
for i, path := range objectNames {
data := []byte(strings.Repeat("a", i))
err := b.Put(path, data, "text/plain", s3.Private)
c.Assert(err, IsNil)
defer b.Del(path)
objData[path] = data
}
for i, t := range listTests {
c.Logf("test %d", i)
resp, err := b.List(t.Prefix, t.Delimiter, t.Marker, t.MaxKeys)
c.Assert(err, IsNil)
c.Check(resp.Name, Equals, b.Name)
c.Check(resp.Delimiter, Equals, t.Delimiter)
c.Check(resp.IsTruncated, Equals, t.IsTruncated)
c.Check(resp.CommonPrefixes, DeepEquals, t.CommonPrefixes)
checkContents(c, resp.Contents, objData, t.Contents)
}
}
func etag(data []byte) string {
sum := md5.New()
sum.Write(data)
return fmt.Sprintf(`"%x"`, sum.Sum(nil))
}
func checkContents(c *C, contents []s3.Key, data map[string][]byte, expected []s3.Key) {
c.Assert(contents, HasLen, len(expected))
for i, k := range contents {
c.Check(k.Key, Equals, expected[i].Key)
// TODO mtime
c.Check(k.Size, Equals, int64(len(data[k.Key])))
c.Check(k.ETag, Equals, etag(data[k.Key]))
}
}
func (s *ClientTests) TestMultiInitPutList(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
c.Assert(multi.UploadId, Matches, ".+")
defer multi.Abort()
var sent []s3.Part
for i := 0; i < 5; i++ {
p, err := multi.PutPart(i+1, strings.NewReader(fmt.Sprintf("<part %d>", i+1)))
c.Assert(err, IsNil)
c.Assert(p.N, Equals, i+1)
c.Assert(p.Size, Equals, int64(8))
c.Assert(p.ETag, Matches, ".+")
sent = append(sent, p)
}
s3.SetListPartsMax(2)
parts, err := multi.ListParts()
c.Assert(err, IsNil)
c.Assert(parts, HasLen, len(sent))
for i := range parts {
c.Assert(parts[i].N, Equals, sent[i].N)
c.Assert(parts[i].Size, Equals, sent[i].Size)
c.Assert(parts[i].ETag, Equals, sent[i].ETag)
}
err = multi.Complete(parts)
s3err, failed := err.(*s3.Error)
c.Assert(failed, Equals, true)
c.Assert(s3err.Code, Equals, "EntityTooSmall")
err = multi.Abort()
c.Assert(err, IsNil)
_, err = multi.ListParts()
s3err, ok := err.(*s3.Error)
c.Assert(ok, Equals, true)
c.Assert(s3err.Code, Equals, "NoSuchUpload")
}
// This may take a minute or more due to the minimum size accepted S3
// on multipart upload parts.
func (s *ClientTests) TestMultiComplete(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
c.Assert(multi.UploadId, Matches, ".+")
defer multi.Abort()
// Minimum size S3 accepts for all but the last part is 5MB.
data1 := make([]byte, 5*1024*1024)
data2 := []byte("<part 2>")
part1, err := multi.PutPart(1, bytes.NewReader(data1))
c.Assert(err, IsNil)
part2, err := multi.PutPart(2, bytes.NewReader(data2))
c.Assert(err, IsNil)
// Purposefully reversed. The order requirement must be handled.
err = multi.Complete([]s3.Part{part2, part1})
c.Assert(err, IsNil)
data, err := b.Get("multi")
c.Assert(err, IsNil)
c.Assert(len(data), Equals, len(data1)+len(data2))
for i := range data1 {
if data[i] != data1[i] {
c.Fatalf("uploaded object at byte %d: want %d, got %d", data1[i], data[i])
}
}
c.Assert(string(data[len(data1):]), Equals, string(data2))
}
type multiList []*s3.Multi
func (l multiList) Len() int { return len(l) }
func (l multiList) Less(i, j int) bool { return l[i].Key < l[j].Key }
func (l multiList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (s *ClientTests) TestListMulti(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
// Ensure an empty state before testing its behavior.
multis, _, err := b.ListMulti("", "")
for _, m := range multis {
err := m.Abort()
c.Assert(err, IsNil)
}
keys := []string{
"a/multi2",
"a/multi3",
"b/multi4",
"multi1",
}
for _, key := range keys {
m, err := b.InitMulti(key, "", s3.Private)
c.Assert(err, IsNil)
defer m.Abort()
}
// Amazon's implementation of the multiple-request listing for
// multipart uploads in progress seems broken in multiple ways.
// (next tokens are not provided, etc).
//s3.SetListMultiMax(2)
multis, prefixes, err := b.ListMulti("", "")
c.Assert(err, IsNil)
for attempt := attempts.Start(); attempt.Next() && len(multis) < len(keys); {
multis, prefixes, err = b.ListMulti("", "")
c.Assert(err, IsNil)
}
sort.Sort(multiList(multis))
c.Assert(prefixes, IsNil)
var gotKeys []string
for _, m := range multis {
gotKeys = append(gotKeys, m.Key)
}
c.Assert(gotKeys, DeepEquals, keys)
for _, m := range multis {
c.Assert(m.Bucket, Equals, b)
c.Assert(m.UploadId, Matches, ".+")
}
multis, prefixes, err = b.ListMulti("", "/")
for attempt := attempts.Start(); attempt.Next() && len(prefixes) < 2; {
multis, prefixes, err = b.ListMulti("", "")
c.Assert(err, IsNil)
}
c.Assert(err, IsNil)
c.Assert(prefixes, DeepEquals, []string{"a/", "b/"})
c.Assert(multis, HasLen, 1)
c.Assert(multis[0].Bucket, Equals, b)
c.Assert(multis[0].Key, Equals, "multi1")
c.Assert(multis[0].UploadId, Matches, ".+")
for attempt := attempts.Start(); attempt.Next() && len(multis) < 2; {
multis, prefixes, err = b.ListMulti("", "")
c.Assert(err, IsNil)
}
multis, prefixes, err = b.ListMulti("a/", "/")
c.Assert(err, IsNil)
c.Assert(prefixes, IsNil)
c.Assert(multis, HasLen, 2)
c.Assert(multis[0].Bucket, Equals, b)
c.Assert(multis[0].Key, Equals, "a/multi2")
c.Assert(multis[0].UploadId, Matches, ".+")
c.Assert(multis[1].Bucket, Equals, b)
c.Assert(multis[1].Key, Equals, "a/multi3")
c.Assert(multis[1].UploadId, Matches, ".+")
}
func (s *ClientTests) TestMultiPutAllZeroLength(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
defer multi.Abort()
// This tests an edge case. Amazon requires at least one
// part for multiprat uploads to work, even the part is empty.
parts, err := multi.PutAll(strings.NewReader(""), 5*1024*1024)
c.Assert(err, IsNil)
c.Assert(parts, HasLen, 1)
c.Assert(parts[0].Size, Equals, int64(0))
c.Assert(parts[0].ETag, Equals, `"d41d8cd98f00b204e9800998ecf8427e"`)
err = multi.Complete(parts)
c.Assert(err, IsNil)
}

View File

@ -1,77 +0,0 @@
package s3_test
import (
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"gopkg.in/amz.v3/s3/s3test"
)
type LocalServer struct {
auth aws.Auth
region aws.Region
srv *s3test.Server
config *s3test.Config
}
func (s *LocalServer) SetUp(c *C) {
srv, err := s3test.NewServer(s.config)
c.Assert(err, IsNil)
c.Assert(srv, NotNil)
s.srv = srv
s.region = aws.Region{
Name: "faux-region-1",
S3Endpoint: srv.URL(),
S3LocationConstraint: true, // s3test server requires a LocationConstraint
}
}
// LocalServerSuite defines tests that will run
// against the local s3test server. It includes
// selected tests from ClientTests;
// when the s3test functionality is sufficient, it should
// include all of them, and ClientTests can be simply embedded.
type LocalServerSuite struct {
srv LocalServer
clientTests ClientTests
}
var (
// run tests twice, once in us-east-1 mode, once not.
_ = Suite(&LocalServerSuite{})
_ = Suite(&LocalServerSuite{
srv: LocalServer{
config: &s3test.Config{
Send409Conflict: true,
},
},
})
)
func (s *LocalServerSuite) SetUpSuite(c *C) {
s.srv.SetUp(c)
s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region)
s.clientTests.Cleanup()
}
func (s *LocalServerSuite) TearDownTest(c *C) {
s.clientTests.Cleanup()
}
func (s *LocalServerSuite) TestBasicFunctionality(c *C) {
s.clientTests.TestBasicFunctionality(c)
}
func (s *LocalServerSuite) TestGetNotFound(c *C) {
s.clientTests.TestGetNotFound(c)
}
func (s *LocalServerSuite) TestBucketList(c *C) {
s.clientTests.TestBucketList(c)
}
func (s *LocalServerSuite) TestDoublePutBucket(c *C) {
s.clientTests.TestDoublePutBucket(c)
}

View File

@ -1,629 +0,0 @@
package s3test
import (
"bytes"
"crypto/md5"
"encoding/hex"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"gopkg.in/amz.v3/s3"
)
const debug = false
type s3Error struct {
statusCode int
XMLName struct{} `xml:"Error"`
Code string
Message string
BucketName string
RequestId string
HostId string
}
type action struct {
srv *Server
w http.ResponseWriter
req *http.Request
reqId string
}
// Config controls the internal behaviour of the Server. A nil config is the default
// and behaves as if all configurations assume their default behaviour. Once passed
// to NewServer, the configuration must not be modified.
type Config struct {
// Send409Conflict controls how the Server will respond to calls to PUT on a
// previously existing bucket. The default is false, and corresponds to the
// us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of
// all other regions.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html
Send409Conflict bool
}
func (c *Config) send409Conflict() bool {
if c != nil {
return c.Send409Conflict
}
return false
}
// Server is a fake S3 server for testing purposes.
// All of the data for the server is kept in memory.
type Server struct {
url string
reqId int
listener net.Listener
mu sync.Mutex
buckets map[string]*bucket
config *Config
}
type bucket struct {
name string
acl s3.ACL
ctime time.Time
objects map[string]*object
}
type object struct {
name string
mtime time.Time
meta http.Header // metadata to return with requests.
checksum []byte // also held as Content-MD5 in meta.
data []byte
}
// A resource encapsulates the subject of an HTTP request.
// The resource referred to may or may not exist
// when the request is made.
type resource interface {
put(a *action) interface{}
get(a *action) interface{}
post(a *action) interface{}
delete(a *action) interface{}
}
func NewServer(config *Config) (*Server, error) {
l, err := net.Listen("tcp", "localhost:0")
if err != nil {
return nil, fmt.Errorf("cannot listen on localhost: %v", err)
}
srv := &Server{
listener: l,
url: "http://" + l.Addr().String(),
buckets: make(map[string]*bucket),
config: config,
}
go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
srv.serveHTTP(w, req)
}))
return srv, nil
}
// Quit closes down the server.
func (srv *Server) Quit() {
srv.listener.Close()
}
// URL returns a URL for the server.
func (srv *Server) URL() string {
return srv.url
}
func fatalf(code int, codeStr string, errf string, a ...interface{}) {
panic(&s3Error{
statusCode: code,
Code: codeStr,
Message: fmt.Sprintf(errf, a...),
})
}
// serveHTTP serves the S3 protocol.
func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) {
// ignore error from ParseForm as it's usually spurious.
req.ParseForm()
srv.mu.Lock()
defer srv.mu.Unlock()
if debug {
log.Printf("s3test %q %q", req.Method, req.URL)
}
a := &action{
srv: srv,
w: w,
req: req,
reqId: fmt.Sprintf("%09X", srv.reqId),
}
srv.reqId++
var r resource
defer func() {
switch err := recover().(type) {
case *s3Error:
switch r := r.(type) {
case objectResource:
err.BucketName = r.bucket.name
case bucketResource:
err.BucketName = r.name
}
err.RequestId = a.reqId
// TODO HostId
w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`)
w.WriteHeader(err.statusCode)
xmlMarshal(w, err)
case nil:
default:
panic(err)
}
}()
r = srv.resourceForURL(req.URL)
var resp interface{}
switch req.Method {
case "PUT":
resp = r.put(a)
case "GET", "HEAD":
resp = r.get(a)
case "DELETE":
resp = r.delete(a)
case "POST":
resp = r.post(a)
default:
fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method)
}
if resp != nil && req.Method != "HEAD" {
xmlMarshal(w, resp)
}
}
// xmlMarshal is the same as xml.Marshal except that
// it panics on error. The marshalling should not fail,
// but we want to know if it does.
func xmlMarshal(w io.Writer, x interface{}) {
if err := xml.NewEncoder(w).Encode(x); err != nil {
panic(fmt.Errorf("error marshalling %#v: %v", x, err))
}
}
// In a fully implemented test server, each of these would have
// its own resource type.
var unimplementedBucketResourceNames = map[string]bool{
"acl": true,
"lifecycle": true,
"policy": true,
"location": true,
"logging": true,
"notification": true,
"versions": true,
"requestPayment": true,
"versioning": true,
"website": true,
"uploads": true,
}
var unimplementedObjectResourceNames = map[string]bool{
"uploadId": true,
"acl": true,
"torrent": true,
"uploads": true,
}
var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?")
// resourceForURL returns a resource object for the given URL.
func (srv *Server) resourceForURL(u *url.URL) (r resource) {
m := pathRegexp.FindStringSubmatch(u.Path)
if m == nil {
fatalf(404, "InvalidURI", "Couldn't parse the specified URI")
}
bucketName := m[2]
objectName := m[4]
if bucketName == "" {
return nullResource{} // root
}
b := bucketResource{
name: bucketName,
bucket: srv.buckets[bucketName],
}
q := u.Query()
if objectName == "" {
for name := range q {
if unimplementedBucketResourceNames[name] {
return nullResource{}
}
}
return b
}
if b.bucket == nil {
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
}
objr := objectResource{
name: objectName,
version: q.Get("versionId"),
bucket: b.bucket,
}
for name := range q {
if unimplementedObjectResourceNames[name] {
return nullResource{}
}
}
if obj := objr.bucket.objects[objr.name]; obj != nil {
objr.object = obj
}
return objr
}
// nullResource has error stubs for all resource methods.
type nullResource struct{}
func notAllowed() interface{} {
fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
return nil
}
func (nullResource) put(a *action) interface{} { return notAllowed() }
func (nullResource) get(a *action) interface{} { return notAllowed() }
func (nullResource) post(a *action) interface{} { return notAllowed() }
func (nullResource) delete(a *action) interface{} { return notAllowed() }
const timeFormat = "2006-01-02T15:04:05.000Z07:00"
type bucketResource struct {
name string
bucket *bucket // non-nil if the bucket already exists.
}
// GET on a bucket lists the objects in the bucket.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html
func (r bucketResource) get(a *action) interface{} {
if r.bucket == nil {
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
}
delimiter := a.req.Form.Get("delimiter")
marker := a.req.Form.Get("marker")
maxKeys := -1
if s := a.req.Form.Get("max-keys"); s != "" {
i, err := strconv.Atoi(s)
if err != nil || i < 0 {
fatalf(400, "invalid value for max-keys: %q", s)
}
maxKeys = i
}
prefix := a.req.Form.Get("prefix")
a.w.Header().Set("Content-Type", "application/xml")
if a.req.Method == "HEAD" {
return nil
}
var objs orderedObjects
// first get all matching objects and arrange them in alphabetical order.
for name, obj := range r.bucket.objects {
if strings.HasPrefix(name, prefix) {
objs = append(objs, obj)
}
}
sort.Sort(objs)
if maxKeys <= 0 {
maxKeys = 1000
}
resp := &s3.ListResp{
Name: r.bucket.name,
Prefix: prefix,
Delimiter: delimiter,
Marker: marker,
MaxKeys: maxKeys,
}
var prefixes []string
for _, obj := range objs {
if !strings.HasPrefix(obj.name, prefix) {
continue
}
name := obj.name
isPrefix := false
if delimiter != "" {
if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 {
name = obj.name[:len(prefix)+i+len(delimiter)]
if prefixes != nil && prefixes[len(prefixes)-1] == name {
continue
}
isPrefix = true
}
}
if name <= marker {
continue
}
if len(resp.Contents)+len(prefixes) >= maxKeys {
resp.IsTruncated = true
break
}
if isPrefix {
prefixes = append(prefixes, name)
} else {
// Contents contains only keys not found in CommonPrefixes
resp.Contents = append(resp.Contents, obj.s3Key())
}
}
resp.CommonPrefixes = prefixes
return resp
}
// orderedObjects holds a slice of objects that can be sorted
// by name.
type orderedObjects []*object
func (s orderedObjects) Len() int {
return len(s)
}
func (s orderedObjects) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s orderedObjects) Less(i, j int) bool {
return s[i].name < s[j].name
}
func (obj *object) s3Key() s3.Key {
return s3.Key{
Key: obj.name,
LastModified: obj.mtime.Format(timeFormat),
Size: int64(len(obj.data)),
ETag: fmt.Sprintf(`"%x"`, obj.checksum),
// TODO StorageClass
// TODO Owner
}
}
// DELETE on a bucket deletes the bucket if it's not empty.
func (r bucketResource) delete(a *action) interface{} {
b := r.bucket
if b == nil {
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
}
if len(b.objects) > 0 {
fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty")
}
delete(a.srv.buckets, b.name)
return nil
}
// PUT on a bucket creates the bucket.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html
func (r bucketResource) put(a *action) interface{} {
var created bool
if r.bucket == nil {
if !validBucketName(r.name) {
fatalf(400, "InvalidBucketName", "The specified bucket is not valid")
}
if loc := locationConstraint(a); loc == "" {
fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.")
}
// TODO validate acl
r.bucket = &bucket{
name: r.name,
// TODO default acl
objects: make(map[string]*object),
}
a.srv.buckets[r.name] = r.bucket
created = true
}
if !created && a.srv.config.send409Conflict() {
fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.")
}
r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl"))
return nil
}
func (bucketResource) post(a *action) interface{} {
fatalf(400, "Method", "bucket POST method not available")
return nil
}
// validBucketName returns whether name is a valid bucket name.
// Here are the rules, from:
// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html
//
// Can contain lowercase letters, numbers, periods (.), underscores (_),
// and dashes (-). You can use uppercase letters for buckets only in the
// US Standard region.
//
// Must start with a number or letter
//
// Must be between 3 and 255 characters long
//
// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4)
// but the real S3 server does not seem to check that rule, so we will not
// check it either.
//
func validBucketName(name string) bool {
if len(name) < 3 || len(name) > 255 {
return false
}
r := name[0]
if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') {
return false
}
for _, r := range name {
switch {
case r >= '0' && r <= '9':
case r >= 'a' && r <= 'z':
case r == '_' || r == '-':
case r == '.':
default:
return false
}
}
return true
}
var responseParams = map[string]bool{
"content-type": true,
"content-language": true,
"expires": true,
"cache-control": true,
"content-disposition": true,
"content-encoding": true,
}
type objectResource struct {
name string
version string
bucket *bucket // always non-nil.
object *object // may be nil.
}
// GET on an object gets the contents of the object.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html
func (objr objectResource) get(a *action) interface{} {
obj := objr.object
if obj == nil {
fatalf(404, "NoSuchKey", "The specified key does not exist.")
}
h := a.w.Header()
// add metadata
for name, d := range obj.meta {
h[name] = d
}
// override header values in response to request parameters.
for name, vals := range a.req.Form {
if strings.HasPrefix(name, "response-") {
name = name[len("response-"):]
if !responseParams[name] {
continue
}
h.Set(name, vals[0])
}
}
if r := a.req.Header.Get("Range"); r != "" {
fatalf(400, "NotImplemented", "range unimplemented")
}
// TODO Last-Modified-Since
// TODO If-Modified-Since
// TODO If-Unmodified-Since
// TODO If-Match
// TODO If-None-Match
// TODO Connection: close ??
// TODO x-amz-request-id
h.Set("Content-Length", fmt.Sprint(len(obj.data)))
h.Set("ETag", hex.EncodeToString(obj.checksum))
h.Set("Last-Modified", obj.mtime.Format(time.RFC1123))
if a.req.Method == "HEAD" {
return nil
}
// TODO avoid holding the lock when writing data.
_, err := a.w.Write(obj.data)
if err != nil {
// we can't do much except just log the fact.
log.Printf("error writing data: %v", err)
}
return nil
}
var metaHeaders = map[string]bool{
"Content-MD5": true,
"x-amz-acl": true,
"Content-Type": true,
"Content-Encoding": true,
"Content-Disposition": true,
}
// PUT on an object creates the object.
func (objr objectResource) put(a *action) interface{} {
// TODO Cache-Control header
// TODO Expires header
// TODO x-amz-server-side-encryption
// TODO x-amz-storage-class
// TODO is this correct, or should we erase all previous metadata?
obj := objr.object
if obj == nil {
obj = &object{
name: objr.name,
meta: make(http.Header),
}
}
var expectHash []byte
if c := a.req.Header.Get("Content-MD5"); c != "" {
var err error
expectHash, err = hex.DecodeString(c)
if err != nil || len(expectHash) != md5.Size {
fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid")
}
}
sum := md5.New()
// TODO avoid holding lock while reading data.
data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum))
if err != nil {
fatalf(400, "TODO", "read error")
}
gotHash := sum.Sum(nil)
if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 {
fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received")
}
if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength {
fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header")
}
// PUT request has been successful - save data and metadata
for key, values := range a.req.Header {
key = http.CanonicalHeaderKey(key)
if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") {
obj.meta[key] = values
}
}
obj.data = data
obj.checksum = gotHash
obj.mtime = time.Now()
objr.bucket.objects[objr.name] = obj
return nil
}
func (objr objectResource) delete(a *action) interface{} {
delete(objr.bucket.objects, objr.name)
return nil
}
func (objr objectResource) post(a *action) interface{} {
fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
return nil
}
type CreateBucketConfiguration struct {
LocationConstraint string
}
// locationConstraint parses the <CreateBucketConfiguration /> request body (if present).
// If there is no body, an empty string will be returned.
func locationConstraint(a *action) string {
var body bytes.Buffer
if _, err := io.Copy(&body, a.req.Body); err != nil {
fatalf(400, "InvalidRequest", err.Error())
}
if body.Len() == 0 {
return ""
}
var loc CreateBucketConfiguration
if err := xml.NewDecoder(&body).Decode(&loc); err != nil {
fatalf(400, "InvalidRequest", err.Error())
}
return loc.LocationConstraint
}

View File

@ -5,9 +5,12 @@ import (
"fmt"
"io"
"io/ioutil"
"math/rand"
"sort"
"testing"
crand "crypto/rand"
"github.com/restic/restic/backend"
. "github.com/restic/restic/test"
)
@ -37,6 +40,117 @@ func testBackendConfig(b backend.Backend, t *testing.T) {
}
}
func testGetReader(b backend.Backend, t testing.TB) {
length := rand.Intn(1<<24) + 2000
data := make([]byte, length)
_, err := io.ReadFull(crand.Reader, data)
OK(t, err)
blob, err := b.Create()
OK(t, err)
id := backend.Hash(data)
_, err = blob.Write([]byte(data))
OK(t, err)
OK(t, blob.Finalize(backend.Data, id.String()))
for i := 0; i < 500; i++ {
l := rand.Intn(length + 2000)
o := rand.Intn(length + 2000)
d := data
if o < len(d) {
d = d[o:]
} else {
o = len(d)
d = d[:0]
}
if l > 0 && l < len(d) {
d = d[:l]
}
rd, err := b.GetReader(backend.Data, id.String(), uint(o), uint(l))
OK(t, err)
buf, err := ioutil.ReadAll(rd)
OK(t, err)
if !bytes.Equal(buf, d) {
t.Fatalf("data not equal")
}
}
OK(t, b.Remove(backend.Data, id.String()))
}
func testWrite(b backend.Backend, t testing.TB) {
length := rand.Intn(1<<23) + 2000
data := make([]byte, length)
_, err := io.ReadFull(crand.Reader, data)
OK(t, err)
id := backend.Hash(data)
for i := 0; i < 10; i++ {
blob, err := b.Create()
OK(t, err)
o := 0
for o < len(data) {
l := rand.Intn(len(data) - o)
if len(data)-o < 20 {
l = len(data) - o
}
n, err := blob.Write(data[o : o+l])
OK(t, err)
if n != l {
t.Fatalf("wrong number of bytes written, want %v, got %v", l, n)
}
o += l
}
name := fmt.Sprintf("%s-%d", id, i)
OK(t, blob.Finalize(backend.Data, name))
rd, err := b.Get(backend.Data, name)
OK(t, err)
buf, err := ioutil.ReadAll(rd)
OK(t, err)
if len(buf) != len(data) {
t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
}
if !bytes.Equal(buf, data) {
t.Fatalf("data not equal")
}
}
}
func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) {
id := backend.Hash(data)
blob, err := b.Create()
OK(t, err)
_, err = blob.Write([]byte(data))
OK(t, err)
OK(t, blob.Finalize(tpe, id.String()))
}
func read(t testing.TB, rd io.Reader, expectedData []byte) {
buf, err := ioutil.ReadAll(rd)
OK(t, err)
if expectedData != nil {
Equals(t, expectedData, buf)
}
}
func testBackend(b backend.Backend, t *testing.T) {
testBackendConfig(b, t)
@ -70,41 +184,34 @@ func testBackend(b backend.Backend, t *testing.T) {
// add files
for _, test := range TestStrings {
// store string in backend
blob, err := b.Create()
OK(t, err)
store(t, b, tpe, []byte(test.data))
_, err = blob.Write([]byte(test.data))
OK(t, err)
OK(t, blob.Finalize(tpe, test.id))
// try to get it out again
// test Get()
rd, err := b.Get(tpe, test.id)
OK(t, err)
Assert(t, rd != nil, "Get() returned nil")
// try to read it out again
reader, err := b.GetReader(tpe, test.id, 0, uint(len(test.data)))
read(t, rd, []byte(test.data))
OK(t, rd.Close())
// test GetReader()
rd, err = b.GetReader(tpe, test.id, 0, uint(len(test.data)))
OK(t, err)
Assert(t, reader != nil, "GetReader() returned nil")
bytes := make([]byte, len(test.data))
reader.Read(bytes)
Assert(t, test.data == string(bytes), "Read() returned different content")
Assert(t, rd != nil, "GetReader() returned nil")
read(t, rd, []byte(test.data))
OK(t, rd.Close())
// try to read it out with an offset and a length
readerOffLen, err := b.GetReader(tpe, test.id, 1, uint(len(test.data)-2))
start := 1
end := len(test.data) - 2
length := end - start
rd, err = b.GetReader(tpe, test.id, uint(start), uint(length))
OK(t, err)
Assert(t, readerOffLen != nil, "GetReader() returned nil")
bytesOffLen := make([]byte, len(test.data)-2)
readerOffLen.Read(bytesOffLen)
Assert(t, test.data[1:len(test.data)-1] == string(bytesOffLen), "Read() with offset and length returned different content")
Assert(t, rd != nil, "GetReader() returned nil")
buf, err := ioutil.ReadAll(rd)
OK(t, err)
Equals(t, test.data, string(buf))
// compare content
Equals(t, test.data, string(buf))
read(t, rd, []byte(test.data[start:end]))
OK(t, rd.Close())
}
// test adding the first file again
@ -161,7 +268,6 @@ func testBackend(b backend.Backend, t *testing.T) {
found, err := b.Test(tpe, id.String())
OK(t, err)
Assert(t, found, fmt.Sprintf("id %q was not found before removal", id))
OK(t, b.Remove(tpe, id.String()))
@ -170,6 +276,8 @@ func testBackend(b backend.Backend, t *testing.T) {
Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
}
}
}
testGetReader(b, t)
testWrite(b, t)
}

15
backend/local/config.go Normal file
View File

@ -0,0 +1,15 @@
package local
import (
"errors"
"strings"
)
// ParseConfig parses a local backend config.
func ParseConfig(cfg string) (interface{}, error) {
if !strings.HasPrefix(cfg, "local:") {
return nil, errors.New(`invalid format, prefix "local" not found`)
}
return cfg[6:], nil
}

View File

@ -21,7 +21,7 @@ type Local struct {
open map[string][]*os.File // Contains open files. Guarded by 'mu'.
}
// Open opens the local backend at dir.
// Open opens the local backend as specified by config.
func Open(dir string) (*Local, error) {
items := []string{
dir,

View File

@ -125,17 +125,21 @@ func memCreate(be *MemoryBackend) (Blob, error) {
return blob, nil
}
// ReadCloser wraps a reader and adds a noop Close method.
// ReadCloser wraps a reader and adds a noop Close method if rd does not implement io.Closer.
func ReadCloser(rd io.Reader) io.ReadCloser {
return readCloser{rd}
}
// readCloser wraps a reader and adds a noop Close method.
// readCloser wraps a reader and adds a noop Close method if rd does not implement io.Closer.
type readCloser struct {
io.Reader
}
func (rd readCloser) Close() error {
if r, ok := rd.Reader.(io.Closer); ok {
return r.Close()
}
return nil
}

81
backend/s3/config.go Normal file
View File

@ -0,0 +1,81 @@
package s3
import (
"errors"
"net/url"
"strings"
)
// Config contains all configuration necessary to connect to an s3 compatible
// server.
type Config struct {
Endpoint string
UseHTTP bool
KeyID, Secret string
Bucket string
}
// ParseConfig parses the string s and extracts the s3 config. The two
// supported configuration formats are s3://host/bucketname and
// s3:host:bucketname. The host can also be a valid s3 region name.
func ParseConfig(s string) (interface{}, error) {
if strings.HasPrefix(s, "s3://") {
s = s[5:]
data := strings.SplitN(s, "/", 2)
if len(data) != 2 {
return nil, errors.New("s3: invalid format, host/region or bucket name not found")
}
cfg := Config{
Endpoint: data[0],
Bucket: data[1],
}
return cfg, nil
}
data := strings.SplitN(s, ":", 2)
if len(data) != 2 {
return nil, errors.New("s3: invalid format")
}
if data[0] != "s3" {
return nil, errors.New(`s3: config does not start with "s3"`)
}
s = data[1]
cfg := Config{}
rest := strings.Split(s, "/")
if len(rest) < 2 {
return nil, errors.New("s3: region or bucket not found")
}
if len(rest) == 2 {
// assume that just a region name and a bucket has been specified, in
// the format region/bucket
cfg.Endpoint = rest[0]
cfg.Bucket = rest[1]
} else {
// assume that a URL has been specified, parse it and use the path as
// the bucket name.
url, err := url.Parse(s)
if err != nil {
return nil, err
}
if url.Path == "" {
return nil, errors.New("s3: bucket name not found")
}
cfg.Endpoint = url.Host
if url.Scheme == "http" {
cfg.UseHTTP = true
}
cfg.Bucket = url.Path[1:]
}
return cfg, nil
}

42
backend/s3/config_test.go Normal file
View File

@ -0,0 +1,42 @@
package s3
import "testing"
var configTests = []struct {
s string
cfg Config
}{
{"s3://eu-central-1/bucketname", Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
}},
{"s3:eu-central-1/foobar", Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
}},
{"s3:https://hostname:9999/foobar", Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
}},
{"s3:http://hostname:9999/foobar", Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
UseHTTP: true,
}},
}
func TestParseConfig(t *testing.T) {
for i, test := range configTests {
cfg, err := ParseConfig(test.s)
if err != nil {
t.Errorf("test %d failed: %v", i, err)
continue
}
if cfg != test.cfg {
t.Errorf("test %d: wrong config, want:\n %v\ngot:\n %v",
i, test.cfg, cfg)
continue
}
}
}

16
backend/s3/cont_reader.go Normal file
View File

@ -0,0 +1,16 @@
package s3
import "io"
// ContinuousReader implements an io.Reader on top of an io.ReaderAt, advancing
// an offset.
type ContinuousReader struct {
R io.ReaderAt
Offset int64
}
func (c *ContinuousReader) Read(p []byte) (int, error) {
n, err := c.R.ReadAt(p, c.Offset)
c.Offset += int64(n)
return n, err
}

View File

@ -3,15 +3,13 @@ package s3
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"github.com/minio/minio-go"
"github.com/restic/restic/backend"
"github.com/restic/restic/debug"
)
const maxKeysInList = 1000
@ -26,41 +24,48 @@ func s3path(t backend.Type, name string) string {
}
type S3Backend struct {
bucket *s3.Bucket
connChan chan struct{}
path string
client minio.CloudStorageClient
connChan chan struct{}
bucketname string
}
// Open a backend using an S3 bucket object
func OpenS3Bucket(bucket *s3.Bucket, bucketname string) *S3Backend {
connChan := make(chan struct{}, connLimit)
for i := 0; i < connLimit; i++ {
connChan <- struct{}{}
}
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
func Open(cfg Config) (backend.Backend, error) {
debug.Log("s3.Open", "open, config %#v", cfg)
return &S3Backend{bucket: bucket, path: bucketname, connChan: connChan}
}
// Open opens the S3 backend at bucket and region.
func Open(regionname, bucketname string) (backend.Backend, error) {
auth, err := aws.EnvAuth()
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, cfg.UseHTTP)
if err != nil {
return nil, err
}
client := s3.New(auth, aws.Regions[regionname])
be := &S3Backend{client: client, bucketname: cfg.Bucket}
be.createConnections()
s3bucket, s3err := client.Bucket(bucketname)
if s3err != nil {
return nil, s3err
if err := client.BucketExists(cfg.Bucket); err != nil {
debug.Log("s3.Open", "BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
// create new bucket with default ACL in default region
err = client.MakeBucket(cfg.Bucket, "", "")
if err != nil {
return nil, err
}
}
return OpenS3Bucket(s3bucket, bucketname), nil
return be, nil
}
func (be *S3Backend) createConnections() {
be.connChan = make(chan struct{}, connLimit)
for i := 0; i < connLimit; i++ {
be.connChan <- struct{}{}
}
}
// Location returns this backend's location (the bucket name).
func (be *S3Backend) Location() string {
return be.path
return be.bucketname
}
type s3Blob struct {
@ -93,6 +98,7 @@ func (bb *s3Blob) Size() uint {
}
func (bb *s3Blob) Finalize(t backend.Type, name string) error {
debug.Log("s3.blob.Finalize()", "bucket %v, finalize %v, %d bytes", bb.b.bucketname, name, bb.buf.Len())
if bb.final {
return errors.New("Already finalized")
}
@ -102,16 +108,30 @@ func (bb *s3Blob) Finalize(t backend.Type, name string) error {
path := s3path(t, name)
// Check key does not already exist
_, err := bb.b.bucket.GetReader(path)
_, err := bb.b.client.StatObject(bb.b.bucketname, path)
if err == nil {
return errors.New("key already exists!")
debug.Log("s3.blob.Finalize()", "%v already exists", name)
return errors.New("key already exists")
}
expectedBytes := bb.buf.Len()
<-bb.b.connChan
err = bb.b.bucket.PutReader(path, bb.buf, int64(bb.buf.Len()), "binary/octet-stream", "private")
debug.Log("s3.Finalize", "PutObject(%v, %v, %v, %v)",
bb.b.bucketname, path, int64(bb.buf.Len()), "binary/octet-stream")
n, err := bb.b.client.PutObject(bb.b.bucketname, path, bb.buf, "binary/octet-stream")
debug.Log("s3.Finalize", "finalized %v -> n %v, err %#v", path, n, err)
bb.b.connChan <- struct{}{}
bb.buf.Reset()
return err
if err != nil {
return err
}
if n != int64(expectedBytes) {
return errors.New("could not store all bytes")
}
return nil
}
// Create creates a new Blob. The data is available only after Finalize()
@ -129,36 +149,45 @@ func (be *S3Backend) Create() (backend.Blob, error) {
// name. The reader should be closed after draining it.
func (be *S3Backend) Get(t backend.Type, name string) (io.ReadCloser, error) {
path := s3path(t, name)
return be.bucket.GetReader(path)
rc, err := be.client.GetObject(be.bucketname, path)
debug.Log("s3.Get", "%v %v -> err %v", t, name, err)
if err != nil {
return nil, err
}
return rc, nil
}
// GetReader returns an io.ReadCloser for the Blob with the given name of
// type t at offset and length. If length is 0, the reader reads until EOF.
func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) {
rc, err := be.Get(t, name)
debug.Log("s3.GetReader", "%v %v, offset %v len %v", t, name, offset, length)
path := s3path(t, name)
obj, err := be.client.GetObject(be.bucketname, path)
if err != nil {
debug.Log("s3.GetReader", " err %v", err)
return nil, err
}
n, errc := io.CopyN(ioutil.Discard, rc, int64(offset))
if errc != nil {
return nil, errc
} else if n != int64(offset) {
return nil, fmt.Errorf("less bytes read than expected, read: %d, expected: %d", n, offset)
if offset > 0 {
_, err = obj.Seek(int64(offset), 0)
if err != nil {
return nil, err
}
}
if length == 0 {
return rc, nil
return obj, nil
}
return backend.LimitReadCloser(rc, int64(length)), nil
return backend.LimitReadCloser(obj, int64(length)), nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (be *S3Backend) Test(t backend.Type, name string) (bool, error) {
found := false
path := s3path(t, name)
_, err := be.bucket.GetReader(path)
_, err := be.client.StatObject(be.bucketname, path)
if err == nil {
found = true
}
@ -170,45 +199,26 @@ func (be *S3Backend) Test(t backend.Type, name string) (bool, error) {
// Remove removes the blob with the given name and type.
func (be *S3Backend) Remove(t backend.Type, name string) error {
path := s3path(t, name)
return be.bucket.Del(path)
err := be.client.RemoveObject(be.bucketname, path)
debug.Log("s3.Remove", "%v %v -> err %v", t, name, err)
return err
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string {
debug.Log("s3.List", "listing %v", t)
ch := make(chan string)
prefix := s3path(t, "")
listresp, err := be.bucket.List(prefix, "/", "", maxKeysInList)
if err != nil {
close(ch)
return ch
}
matches := make([]string, len(listresp.Contents))
for idx, key := range listresp.Contents {
matches[idx] = strings.TrimPrefix(key.Key, prefix)
}
// Continue making requests to get full list.
for listresp.IsTruncated {
listresp, err = be.bucket.List(prefix, "/", listresp.NextMarker, maxKeysInList)
if err != nil {
close(ch)
return ch
}
for _, key := range listresp.Contents {
matches = append(matches, strings.TrimPrefix(key.Key, prefix))
}
}
listresp := be.client.ListObjects(be.bucketname, prefix, true, done)
go func() {
defer close(ch)
for _, m := range matches {
for obj := range listresp {
m := strings.TrimPrefix(obj.Key, prefix)
if m == "" {
continue
}
@ -224,24 +234,37 @@ func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string {
return ch
}
// Remove keys for a specified backend type
func (be *S3Backend) removeKeys(t backend.Type) {
doneChan := make(chan struct{})
for key := range be.List(backend.Data, doneChan) {
be.Remove(backend.Data, key)
// Remove keys for a specified backend type.
func (be *S3Backend) removeKeys(t backend.Type) error {
done := make(chan struct{})
defer close(done)
for key := range be.List(backend.Data, done) {
err := be.Remove(backend.Data, key)
if err != nil {
return err
}
}
doneChan <- struct{}{}
return nil
}
// Delete removes all restic keys
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *S3Backend) Delete() error {
be.removeKeys(backend.Data)
be.removeKeys(backend.Key)
be.removeKeys(backend.Lock)
be.removeKeys(backend.Snapshot)
be.removeKeys(backend.Index)
be.removeKeys(backend.Config)
return nil
alltypes := []backend.Type{
backend.Data,
backend.Key,
backend.Lock,
backend.Snapshot,
backend.Index}
for _, t := range alltypes {
err := be.removeKeys(t)
if err != nil {
return nil
}
}
return be.Remove(backend.Config, "")
}
// Close does nothing

7
backend/s3/s3_test.go Normal file
View File

@ -0,0 +1,7 @@
package s3
import "testing"
func TestGetReader(t *testing.T) {
}

View File

@ -1,54 +1,42 @@
package backend_test
import (
"net/url"
"os"
"testing"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"gopkg.in/amz.v3/s3/s3test"
bes3 "github.com/restic/restic/backend/s3"
"github.com/restic/restic/backend/s3"
. "github.com/restic/restic/test"
)
type LocalServer struct {
auth aws.Auth
region aws.Region
srv *s3test.Server
config *s3test.Config
}
var s LocalServer
func setupS3Backend(t *testing.T) *bes3.S3Backend {
s.config = &s3test.Config{
Send409Conflict: true,
}
srv, err := s3test.NewServer(s.config)
OK(t, err)
s.srv = srv
s.region = aws.Region{
Name: "faux-region-1",
S3Endpoint: srv.URL(),
S3LocationConstraint: true, // s3test server requires a LocationConstraint
}
s.auth = aws.Auth{"abc", "123"}
service := s3.New(s.auth, s.region)
bucket, berr := service.Bucket("testbucket")
OK(t, berr)
err = bucket.PutBucket("private")
OK(t, err)
t.Logf("created s3 backend locally")
return bes3.OpenS3Bucket(bucket, "testbucket")
type deleter interface {
Delete() error
}
func TestS3Backend(t *testing.T) {
s := setupS3Backend(t)
if TestS3Server == "" {
t.Skip("s3 test server not available")
}
testBackend(s, t)
url, err := url.Parse(TestS3Server)
OK(t, err)
cfg := s3.Config{
Endpoint: url.Host,
Bucket: "restictestbucket",
KeyID: os.Getenv("AWS_ACCESS_KEY_ID"),
Secret: os.Getenv("AWS_SECRET_ACCESS_KEY"),
}
if url.Scheme == "http" {
cfg.UseHTTP = true
}
be, err := s3.Open(cfg)
OK(t, err)
testBackend(be, t)
del := be.(deleter)
OK(t, del.Delete())
}

68
backend/sftp/config.go Normal file
View File

@ -0,0 +1,68 @@
package sftp
import (
"errors"
"net/url"
"strings"
)
// Config collects all information required to connect to an sftp server.
type Config struct {
User, Host, Dir string
}
// ParseConfig extracts all information for the sftp connection from the string s.
func ParseConfig(s string) (interface{}, error) {
if strings.HasPrefix(s, "sftp://") {
return parseFormat1(s)
}
// otherwise parse in the sftp:user@host:path format, which means we'll get
// "user@host:path" in s
return parseFormat2(s)
}
// parseFormat1 parses the first format, starting with a slash, so the user
// either specified "sftp://host/path", so we'll get everything after the first
// colon character
func parseFormat1(s string) (Config, error) {
url, err := url.Parse(s)
if err != nil {
return Config{}, err
}
cfg := Config{
Host: url.Host,
Dir: url.Path[1:],
}
if url.User != nil {
cfg.User = url.User.Username()
}
return cfg, nil
}
// parseFormat2 parses the second format, sftp:user@host:path
func parseFormat2(s string) (cfg Config, err error) {
// split user/host and path at the second colon
data := strings.SplitN(s, ":", 3)
if len(data) < 3 {
return Config{}, errors.New("sftp: invalid format, hostname or path not found")
}
if data[0] != "sftp" {
return Config{}, errors.New(`invalid format, does not start with "sftp:"`)
}
userhost := data[1]
cfg.Dir = data[2]
data = strings.SplitN(userhost, "@", 2)
if len(data) == 2 {
cfg.User = data[0]
cfg.Host = data[1]
} else {
cfg.Host = userhost
}
return cfg, nil
}

Some files were not shown because too many files have changed in this diff Show More