2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-26 23:06:32 +00:00

Merge pull request #553 from restic/update-minio-go

Update minio-go
This commit is contained in:
Alexander Neumann 2016-07-29 21:23:39 +02:00
commit c0fb2c306d
39 changed files with 3004 additions and 1111 deletions

2
vendor/manifest vendored
View File

@ -28,7 +28,7 @@
{ {
"importpath": "github.com/minio/minio-go", "importpath": "github.com/minio/minio-go",
"repository": "https://github.com/minio/minio-go", "repository": "https://github.com/minio/minio-go",
"revision": "a8babf4220d5dd7240d011bdb7be567b439460f9", "revision": "76b385d8c68e7079c5fe6182570a6bd51cb36905",
"branch": "master" "branch": "master"
}, },
{ {

View File

@ -1,536 +0,0 @@
## API Documentation
### Minio client object creation
Minio client object is created using minio-go:
```go
package main
import (
"fmt"
"github.com/minio/minio-go"
)
func main() {
secure := true // Make HTTPS requests by default.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", secure)
if err !!= nil {
fmt.Println(err)
return
}
}
```
s3Client can be used to perform operations on S3 storage. APIs are described below.
### Bucket operations
* [`MakeBucket`](#MakeBucket)
* [`ListBuckets`](#ListBuckets)
* [`BucketExists`](#BucketExists)
* [`RemoveBucket`](#RemoveBucket)
* [`ListObjects`](#ListObjects)
* [`ListIncompleteUploads`](#ListIncompleteUploads)
### Object operations
* [`GetObject`](#GetObject)
* [`PutObject`](#PutObject)
* [`CopyObject`](#CopyObject)
* [`StatObject`](#StatObject)
* [`RemoveObject`](#RemoveObject)
* [`RemoveIncompleteUpload`](#RemoveIncompleteUpload)
### File operations.
* [`FPutObject`](#FPutObject)
* [`FGetObject`](#FPutObject)
### Bucket policy operations.
* [`SetBucketPolicy`](#SetBucketPolicy)
* [`GetBucketPolicy`](#GetBucketPolicy)
* [`RemoveBucketPolicy`](#RemoveBucketPolicy)
### Presigned operations
* [`PresignedGetObject`](#PresignedGetObject)
* [`PresignedPutObject`](#PresignedPutObject)
* [`PresignedPostPolicy`](#PresignedPostPolicy)
### Bucket operations
---------------------------------------
<a name="MakeBucket">
#### MakeBucket(bucketName string, location string) error
Create a new bucket.
__Parameters__
* `bucketName` _string_ - Name of the bucket.
* `location` _string_ - region valid values are _us-west-1_, _us-west-2_, _eu-west-1_, _eu-central-1_, _ap-southeast-1_, _ap-northeast-1_, _ap-southeast-2_, _sa-east-1_
__Example__
```go
err := s3Client.MakeBucket("mybucket", "us-west-1")
if err != nil {
fmt.Println(err)
return
}
fmt.Println("Successfully created mybucket.")
```
---------------------------------------
<a name="ListBuckets">
#### ListBuckets() ([]BucketInfo, error)
Lists all buckets.
`bucketList` lists bucket in the format:
* `bucket.Name` _string_: bucket name
* `bucket.CreationDate` time.Time : date when bucket was created
__Example__
```go
buckets, err := s3Client.ListBuckets()
if err != nil {
fmt.Println(err)
return
}
for _, bucket := range buckets {
fmt.Println(bucket)
}
```
---------------------------------------
<a name="BucketExists">
#### BucketExists(bucketName string) error
Check if bucket exists.
__Parameters__
* `bucketName` _string_ : name of the bucket
__Example__
```go
err := s3Client.BucketExists("mybucket")
if err != nil {
fmt.Println(err)
return
}
```
---------------------------------------
<a name="RemoveBucket">
#### RemoveBucket(bucketName string) error
Remove a bucket.
__Parameters__
* `bucketName` _string_ : name of the bucket
__Example__
```go
err := s3Client.RemoveBucket("mybucket")
if err != nil {
fmt.Println(err)
return
}
```
---------------------------------------
<a name="GetBucketPolicy">
#### GetBucketPolicy(bucketName string, objectPrefix string) error
Get access permissions on a bucket or a prefix.
__Parameters__
* `bucketName` _string_ : name of the bucket
* `objectPrefix` _string_ : name of the object prefix
__Example__
```go
bucketPolicy, err := s3Client.GetBucketPolicy("mybucket", "")
if err != nil {
fmt.Println(err)
return
}
fmt.Println("Access permissions for mybucket is", bucketPolicy)
```
---------------------------------------
<a name="SetBucketPolicy">
#### SetBucketPolicy(bucketname string, objectPrefix string, policy BucketPolicy) error
Set access permissions on bucket or an object prefix.
__Parameters__
* `bucketName` _string_: name of the bucket
* `objectPrefix` _string_ : name of the object prefix
* `policy` _BucketPolicy_: policy can be _BucketPolicyNone_, _BucketPolicyReadOnly_, _BucketPolicyReadWrite_, _BucketPolicyWriteOnly_
__Example__
```go
err := s3Client.SetBucketPolicy("mybucket", "myprefix", BucketPolicyReadWrite)
if err != nil {
fmt.Println(err)
return
}
```
---------------------------------------
<a name="RemoveBucketPolicy">
#### RemoveBucketPolicy(bucketname string, objectPrefix string) error
Remove existing permissions on bucket or an object prefix.
__Parameters__
* `bucketName` _string_: name of the bucket
* `objectPrefix` _string_ : name of the object prefix
__Example__
```go
err := s3Client.RemoveBucketPolicy("mybucket", "myprefix")
if err != nil {
fmt.Println(err)
return
}
```
---------------------------------------
<a name="ListObjects">
#### ListObjects(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
List objects in a bucket.
__Parameters__
* `bucketName` _string_: name of the bucket
* `objectPrefix` _string_: the prefix of the objects that should be listed
* `recursive` _bool_: `true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'
* `doneCh` chan struct{} : channel for pro-actively closing the internal go routine
__Return Value__
* `<-chan ObjectInfo` _chan ObjectInfo_: Read channel for all the objects in the bucket, the object is of the format:
* `objectInfo.Key` _string_: name of the object
* `objectInfo.Size` _int64_: size of the object
* `objectInfo.ETag` _string_: etag of the object
* `objectInfo.LastModified` _time.Time_: modified time stamp
__Example__
```go
// Create a done channel to control 'ListObjects' go routine.
doneCh := make(chan struct{})
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
isRecursive := true
objectCh := s3Client.ListObjects("mybucket", "myprefix", isRecursive, doneCh)
for object := range objectCh {
if object.Err != nil {
fmt.Println(object.Err)
return
}
fmt.Println(object)
}
```
---------------------------------------
<a name="ListIncompleteUploads">
#### ListIncompleteUploads(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectMultipartInfo
List partially uploaded objects in a bucket.
__Parameters__
* `bucketname` _string_: name of the bucket
* `prefix` _string_: prefix of the object names that are partially uploaded
* `recursive` bool: directory style listing when false, recursive listing when true
* `doneCh` chan struct{} : channel for pro-actively closing the internal go routine
__Return Value__
* `<-chan ObjectMultipartInfo` _chan ObjectMultipartInfo_ : emits multipart objects of the format:
* `multiPartObjInfo.Key` _string_: name of the incomplete object
* `multiPartObjInfo.UploadID` _string_: upload ID of the incomplete object
* `multiPartObjInfo.Size` _int64_: size of the incompletely uploaded object
__Example__
```go
// Create a done channel to control 'ListObjects' go routine.
doneCh := make(chan struct{})
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
isRecursive := true
multiPartObjectCh := s3Client.ListIncompleteUploads("mybucket", "myprefix", isRecursive, doneCh)
for multiPartObject := range multiPartObjectCh {
if multiPartObject.Err != nil {
fmt.Println(multiPartObject.Err)
return
}
fmt.Println(multiPartObject)
}
```
---------------------------------------
### Object operations
<a name="GetObject">
#### GetObject(bucketName string, objectName string) *Object
Download an object.
__Parameters__
* `bucketName` _string_: name of the bucket
* `objectName` _string_: name of the object
__Return Value__
* `object` _*Object_ : _Object_ represents object reader.
__Example__
```go
object, err := s3Client.GetObject("mybucket", "photo.jpg")
if err != nil {
fmt.Println(err)
return
}
localFile _ := os.Open("/tmp/local-file")
if _, err := io.Copy(localFile, object); err != nil {
fmt.Println(err)
return
}
```
---------------------------------------
---------------------------------------
<a name="FGetObject">
#### FGetObject(bucketName string, objectName string, filePath string) error
Callback is called with `error` in case of error or `null` in case of success
__Parameters__
* `bucketName` _string_: name of the bucket
* `objectName` _string_: name of the object
* `filePath` _string_: path to which the object data will be written to
__Example__
```go
err := s3Client.FGetObject("mybucket", "photo.jpg", "/tmp/photo.jpg")
if err != nil {
fmt.Println(err)
return
}
```
---------------------------------------
<a name="PutObject">
#### PutObject(bucketName string, objectName string, reader io.Reader, contentType string) (n int, err error)
Upload contents from `io.Reader` to objectName.
__Parameters__
* `bucketName` _string_: name of the bucket
* `objectName` _string_: name of the object
* `reader` _io.Reader_: Any golang object implementing io.Reader
* `contentType` _string_: content type of the object.
__Example__
```go
file, err := os.Open("my-testfile")
if err != nil {
fmt.Println(err)
return
}
defer file.Close()
n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
if err != nil {
fmt.Println(err)
return
}
```
---------------------------------------
<a name="CopyObject">
#### CopyObject(bucketName string, objectName string, objectSource string, conditions CopyConditions) error
Copy a source object into a new object with the provided name in the provided bucket.
__Parameters__
* `bucketName` _string_: name of the bucket
* `objectName` _string_: name of the object
* `objectSource` _string_: name of the object source.
* `conditions` _CopyConditions_: Collection of supported CopyObject conditions. ['x-amz-copy-source', 'x-amz-copy-source-if-match', 'x-amz-copy-source-if-none-match', 'x-amz-copy-source-if-unmodified-since', 'x-amz-copy-source-if-modified-since']
__Example__
```go
// All following conditions are allowed and can be combined together.
// Set copy conditions.
var copyConds = minio.NewCopyConditions()
// Set modified condition, copy object modified since 2014 April.
copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
// Set unmodified condition, copy object unmodified since 2014 April.
// copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
// Set matching ETag condition, copy object which matches the following ETag.
// copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
// Set matching ETag except condition, copy object which does not match the following ETag.
// copyConds.SetMatchETagExcept("31624deb84149d2f8ef9c385918b653a")
err := s3Client.CopyObject("my-bucketname", "my-objectname", "/my-sourcebucketname/my-sourceobjectname", copyConds)
if err != nil {
fmt.Println(err)
return
}
```
---------------------------------------
<a name="FPutObject">
#### FPutObject(bucketName string, objectName string, filePath string, contentType string) error
Uploads the object using contents from a file
__Parameters__
* `bucketName` _string_: name of the bucket
* `objectName` _string_: name of the object
* `filePath` _string_: file path of the file to be uploaded
* `contentType` _string_: content type of the object
__Example__
```go
n, err := s3Client.FPutObject("my-bucketname", "my-objectname", "/tmp/my-filename.csv", "application/csv")
if err != nil {
fmt.Println(err)
return
}
```
---------------------------------------
<a name="StatObject">
#### StatObject(bucketName string, objectName string) (ObjectInfo, error)
Get metadata of an object.
__Parameters__
* `bucketName` _string_: name of the bucket
* `objectName` _string_: name of the object
__Return Value__
`objInfo` _ObjectInfo_ : object stat info for following format:
* `objInfo.Size` _int64_: size of the object
* `objInfo.ETag` _string_: etag of the object
* `objInfo.ContentType` _string_: Content-Type of the object
* `objInfo.LastModified` _string_: modified time stamp
__Example__
```go
objInfo, err := s3Client.StatObject("mybucket", "photo.jpg")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(objInfo)
```
---------------------------------------
<a name="RemoveObject">
#### RemoveObject(bucketName string, objectName string) error
Remove an object.
__Parameters__
* `bucketName` _string_: name of the bucket
* `objectName` _string_: name of the object
__Example__
```go
err := s3Client.RemoveObject("mybucket", "photo.jpg")
if err != nil {
fmt.Println(err)
return
}
```
---------------------------------------
<a name="RemoveIncompleteUpload">
#### RemoveIncompleteUpload(bucketName string, objectName string) error
Remove an partially uploaded object.
__Parameters__
* `bucketName` _string_: name of the bucket
* `objectName` _string_: name of the object
__Example__
```go
err := s3Client.RemoveIncompleteUpload("mybucket", "photo.jpg")
if err != nil {
fmt.Println(err)
return
}
```
### Presigned operations
---------------------------------------
<a name="PresignedGetObject">
#### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
Generate a presigned URL for GET.
__Parameters__
* `bucketName` _string_: name of the bucket.
* `objectName` _string_: name of the object.
* `expiry` _time.Duration_: expiry in seconds.
* `reqParams` _url.Values_ : additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_
__Example__
```go
// Set request parameters for content-disposition.
reqParams := make(url.Values)
reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
// Generates a presigned url which expires in a day.
presignedURL, err := s3Client.PresignedGetObject("mybucket", "photo.jpg", time.Second * 24 * 60 * 60, reqParams)
if err != nil {
fmt.Println(err)
return
}
```
---------------------------------------
<a name="PresignedPutObject">
#### PresignedPutObject(bucketName string, objectName string, expiry time.Duration) (*url.URL, error)
Generate a presigned URL for PUT.
<blockquote>
NOTE: you can upload to S3 only with specified object name.
</blockquote>
__Parameters__
* `bucketName` _string_: name of the bucket
* `objectName` _string_: name of the object
* `expiry` _time.Duration_: expiry in seconds
__Example__
```go
// Generates a url which expires in a day.
presignedURL, err := s3Client.PresignedPutObject("mybucket", "photo.jpg", time.Second * 24 * 60 * 60)
if err != nil {
fmt.Println(err)
return
}
```
---------------------------------------
<a name="PresignedPostPolicy">
#### PresignedPostPolicy(policy PostPolicy) (*url.URL, map[string]string, error)
PresignedPostPolicy we can provide policies specifying conditions restricting
what you want to allow in a POST request, such as bucket name where objects can be
uploaded, key name prefixes that you want to allow for the object being created and more.
We need to create our policy first:
```go
policy := minio.NewPostPolicy()
```
Apply upload policy restrictions:
```go
policy.SetBucket("my-bucketname")
policy.SetKey("my-objectname")
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
// Only allow 'png' images.
policy.SetContentType("image/png")
// Only allow content size in range 1KB to 1MB.
policy.SetContentLengthRange(1024, 1024*1024)
```
Get the POST form key/value object:
```go
url, formData, err := s3Client.PresignedPostPolicy(policy)
if err != nil {
fmt.Println(err)
return
}
```
POST your content from the command line using `curl`:
```go
fmt.Printf("curl ")
for k, v := range m {
fmt.Printf("-F %s=%s ", k, v)
}
fmt.Printf("-F file=@/etc/bash.bashrc ")
fmt.Printf("%s\n", url)
```

View File

@ -1,83 +0,0 @@
## Ubuntu (Kylin) 14.04
### Build Dependencies
This installation guide is based on Ubuntu 14.04+ on x86-64 platform.
##### Install Git, GCC
```sh
$ sudo apt-get install git build-essential
```
##### Install Go 1.5+
Download Go 1.5+ from [https://golang.org/dl/](https://golang.org/dl/).
```sh
$ wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz
$ mkdir -p ${HOME}/bin/
$ mkdir -p ${HOME}/go/
$ tar -C ${HOME}/bin/ -xzf go1.5.1.linux-amd64.tar.gz
```
##### Setup GOROOT and GOPATH
Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries
and GOPATH specifies the location of your project workspace.
```sh
export GOROOT=${HOME}/bin/go
export GOPATH=${HOME}/go
export PATH=$PATH:${HOME}/bin/go/bin:${GOPATH}/bin
```
```sh
$ source ~/.bashrc
```
##### Testing it all
```sh
$ go env
```
## OS X (Yosemite) 10.10
### Build Dependencies
This installation document assumes OS X Yosemite 10.10+ on x86-64 platform.
##### Install brew
```sh
$ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
```
##### Install Git, Python
```sh
$ brew install git python
```
##### Install Go 1.5+
Install golang binaries using `brew`
```sh
$ brew install go
$ mkdir -p $HOME/go
```
##### Setup GOROOT and GOPATH
Add the following exports to your ``~/.bash_profile``. Environment variable GOROOT specifies the location of your golang binaries
and GOPATH specifies the location of your project workspace.
```sh
export GOPATH=${HOME}/go
export GOVERSION=$(brew list go | head -n 1 | cut -d '/' -f 6)
export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec
export PATH=$PATH:${GOPATH}/bin
```
##### Source the new environment
```sh
$ source ~/.bash_profile
```
##### Testing it all
```sh
$ go env
```

View File

@ -1,106 +1,230 @@
# Minio Go Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) # Minio Golang Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
The Minio Golang Client SDK provides simple APIs to access any Amazon S3 compatible object storage server.
## Description **List of supported cloud storage providers.**
Minio Go library is a simple client library for S3 compatible cloud storage servers. Supports AWS Signature Version 4 and 2. AWS Signature Version 4 is chosen as default. - AWS Signature Version 4
List of supported cloud storage providers.
- AWS Signature Version 4
- Amazon S3 - Amazon S3
- Minio - Minio
- AWS Signature Version 2 - AWS Signature Version 2
- Google Cloud Storage (Compatibility Mode) - Google Cloud Storage (Compatibility Mode)
- Openstack Swift + Swift3 middleware - Openstack Swift + Swift3 middleware
- Ceph Object Gateway - Ceph Object Gateway
- Riak CS - Riak CS
## Install This quickstart guide will show you how to install the client SDK and execute an example Golang program. For a complete list of APIs and examples, please take a look at the [Golang Client API Reference](https://docs.minio.io/docs/golang-client-api-reference) documentation.
If you do not have a working Golang environment, please follow [Install Golang](./INSTALLGO.md). This document assumes that you have a working [Golang](https://docs.minio.io/docs/how-to-install-golang) setup in place.
## Download from Github
```sh ```sh
$ go get github.com/minio/minio-go
$ go get -u github.com/minio/minio-go
``` ```
## Initialize Minio Client
## Example You need four items in order to connect to Minio object storage server.
### ListBuckets()
This example shows how to List your buckets.
| Params | Description|
| :--- | :--- |
| endpoint | URL to object storage service. |
| accessKeyID | Access key is like user ID that uniquely identifies your account. |
| secretAccessKey | Secret key is the password to your account. |
|secure | Set this value to 'true' to enable secure (HTTPS) access. |
```go ```go
package main package main
import ( import (
"log" "fmt"
"github.com/minio/minio-go" "github.com/minio/minio-go"
) )
func main() { func main() {
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. // Use a secure connection.
// This boolean value is the last argument for New(). ssl := true
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // Initialize minio client object.
// determined based on the Endpoint value. minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl)
secure := true // Defaults to HTTPS requests. if err != nil {
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", secure) fmt.Println(err)
if err != nil { return
log.Fatalln(err) }
}
buckets, err := s3Client.ListBuckets()
if err != nil {
log.Fatalln(err)
}
for _, bucket := range buckets {
log.Println(bucket)
}
} }
``` ```
## Documentation ## Quick Start Example - File Uploader
[API documentation](./API.md) This example program connects to an object storage server, makes a bucket on the server and then uploads a file to the bucket.
## Examples
### Bucket Operations.
* [MakeBucket(bucketName, location) error](examples/s3/makebucket.go)
* [BucketExists(bucketName) error](examples/s3/bucketexists.go)
* [RemoveBucket(bucketName) error](examples/s3/removebucket.go)
* [ListBuckets() []BucketInfo](examples/s3/listbuckets.go)
* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectInfo](examples/s3/listobjects.go)
* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartInfo](examples/s3/listincompleteuploads.go)
### Object Operations.
* [PutObject(bucketName, objectName, io.Reader, contentType) error](examples/s3/putobject.go)
* [GetObject(bucketName, objectName) (*Object, error)](examples/s3/getobject.go)
* [StatObject(bucketName, objectName) (ObjectInfo, error)](examples/s3/statobject.go)
* [RemoveObject(bucketName, objectName) error](examples/s3/removeobject.go)
* [RemoveIncompleteUpload(bucketName, objectName) <-chan error](examples/s3/removeincompleteupload.go)
### File Object Operations. We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
* [FPutObject(bucketName, objectName, filePath, contentType) (size, error)](examples/s3/fputobject.go)
* [FGetObject(bucketName, objectName, filePath) error](examples/s3/fgetobject.go)
### Presigned Operations. #### FileUploader.go
* [PresignedGetObject(bucketName, objectName, time.Duration, url.Values) (*url.URL, error)](examples/s3/presignedgetobject.go)
* [PresignedPutObject(bucketName, objectName, time.Duration) (*url.URL, error)](examples/s3/presignedputobject.go)
* [PresignedPostPolicy(NewPostPolicy()) (*url.URL, map[string]string, error)](examples/s3/presignedpostpolicy.go)
### Bucket Policy Operations. ```go
* [SetBucketPolicy(bucketName, objectPrefix, BucketPolicy) error](examples/s3/setbucketpolicy.go)
* [GetBucketPolicy(bucketName, objectPrefix) (BucketPolicy, error)](examples/s3/getbucketpolicy.go)
* [RemoveBucketPolicy(bucketName, objectPrefix) error](examples/s3/removebucketpolicy.go)
### API Reference package main
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/minio/minio-go) import "fmt"
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Use a secure connection.
ssl := true
// Initialize minio client object.
minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl)
if err != nil {
log.Fatalln(err)
}
// Make a new bucket called mymusic.
err = minioClient.MakeBucket("mymusic", "us-east-1")
if err != nil {
log.Fatalln(err)
}
fmt.Println("Successfully created mymusic")
// Upload the zip file with FPutObject.
n, err := minioClient.FPutObject("mymusic", "golden-oldies.zip", "/tmp/golden-oldies.zip", "application/zip")
if err != nil {
log.Fatalln(err)
}
log.Printf("Successfully uploaded golden-oldies.zip of size %d\n", n)
}
```
#### Run FileUploader
```sh
$ go run file-uploader.go
$ Successfully created mymusic
$ Successfully uploaded golden-oldies.zip of size 17MiB
$ mc ls play/mymusic/
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
```
## API Reference
The full API Reference is available here.
* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference)
### API Reference : Bucket Operations
* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket)
* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects)
* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2)
* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
### API Reference : Bucket policy Operations
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
### API Reference : Bucket notification Operations
* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
* [`DeleteBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#DeleteBucketNotification)
### API Reference : File Object Operations
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
### API Reference : Object Operations
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
### API Reference : Presigned Operations
* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
## Full Examples
#### Full Examples : Bucket Operations
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
#### Full Examples : Bucket policy Operations
* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
#### Full Examples : Bucket notification Operations
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
* [deletebucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketnotification.go)
#### Full Examples : File Object Operations
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
#### Full Examples : Object Operations
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
#### Full Examples : Presigned Operations
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
## Explore Further
* [Complete Documentation](https://docs.minio.io)
* [Minio Golang Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
* [Go Music Player App- Full Application Example ](https://docs.minio.io/docs/go-music-player-app)
## Contribute ## Contribute
[Contributors Guide](./CONTRIBUTING.md) [Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go)
[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)
[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) [![Build status](https://ci.appveyor.com/api/projects/status/1ep7n2resn6fk1w6?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)

View File

@ -67,6 +67,10 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
case req := <-reqCh: case req := <-reqCh:
// Offset changes fetch the new object at an Offset. // Offset changes fetch the new object at an Offset.
if req.DidOffsetChange { if req.DidOffsetChange {
if httpReader != nil {
// Close previously opened http reader.
httpReader.Close()
}
// Read from offset. // Read from offset.
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, 0) httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, 0)
if err != nil { if err != nil {

View File

@ -53,6 +53,179 @@ func (c Client) ListBuckets() ([]BucketInfo, error) {
return listAllMyBucketsResult.Buckets.Bucket, nil return listAllMyBucketsResult.Buckets.Bucket, nil
} }
/// Bucket Read Operations.
// ListObjectsV2 lists all objects matching the objectPrefix from
// the specified bucket. If recursion is enabled it would list
// all subdirectories and all its contents.
//
// Your input parameters are just bucketName, objectPrefix, recursive
// and a done channel for pro-actively closing the internal go
// routine. If you enable recursive as 'true' this function will
// return back all the objects in a given bucket name and object
// prefix.
//
// api := client.New(....)
// // Create a done channel.
// doneCh := make(chan struct{})
// defer close(doneCh)
// // Recurively list all objects in 'mytestbucket'
// recursive := true
// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) {
// fmt.Println(message)
// }
//
func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
// Allocate new list objects channel.
objectStatCh := make(chan ObjectInfo, 1)
// Default listing is delimited at "/"
delimiter := "/"
if recursive {
// If recursive we do not delimit.
delimiter = ""
}
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
}
return objectStatCh
}
// Validate incoming object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
}
return objectStatCh
}
// Initiate list objects goroutine here.
go func(objectStatCh chan<- ObjectInfo) {
defer close(objectStatCh)
// Save continuationToken for next request.
var continuationToken string
for {
// Get list of objects a maximum of 1000 per request.
result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, delimiter, 1000)
if err != nil {
objectStatCh <- ObjectInfo{
Err: err,
}
return
}
// If contents are available loop through and send over channel.
for _, object := range result.Contents {
// Save the marker.
select {
// Send object content.
case objectStatCh <- object:
// If receives done from the caller, return here.
case <-doneCh:
return
}
}
// Send all common prefixes if any.
// NOTE: prefixes are only present if the request is delimited.
for _, obj := range result.CommonPrefixes {
object := ObjectInfo{}
object.Key = obj.Prefix
object.Size = 0
select {
// Send object prefixes.
case objectStatCh <- object:
// If receives done from the caller, return here.
case <-doneCh:
return
}
}
// If continuation token present, save it for next request.
if result.NextContinuationToken != "" {
continuationToken = result.NextContinuationToken
}
// Listing ends result is not truncated, return right here.
if !result.IsTruncated {
return
}
}
}(objectStatCh)
return objectStatCh
}
// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket.
//
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
// request parameters :-
// ---------
// ?continuation-token - Specifies the key to start with when listing objects in a bucket.
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken, delimiter string, maxkeys int) (listBucketV2Result, error) {
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
return listBucketV2Result{}, err
}
// Validate object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
return listBucketV2Result{}, err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
// Always set list-type in ListObjects V2
urlValues.Set("list-type", "2")
// Set object prefix.
if objectPrefix != "" {
urlValues.Set("prefix", objectPrefix)
}
// Set continuation token
if continuationToken != "" {
urlValues.Set("continuation-token", continuationToken)
}
// Set delimiter.
if delimiter != "" {
urlValues.Set("delimiter", delimiter)
}
// maxkeys should default to 1000 or less.
if maxkeys == 0 || maxkeys > 1000 {
maxkeys = 1000
}
// Set max keys.
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
// Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return listBucketV2Result{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return listBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "")
}
}
// Decode listBuckets XML.
listBucketResult := listBucketV2Result{}
err = xmlDecoder(resp.Body, &listBucketResult)
if err != nil {
return listBucketResult, err
}
return listBucketResult, nil
}
// ListObjects - (List Objects) - List some objects or all recursively. // ListObjects - (List Objects) - List some objects or all recursively.
// //
// ListObjects lists all objects matching the objectPrefix from // ListObjects lists all objects matching the objectPrefix from
@ -158,8 +331,6 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
return objectStatCh return objectStatCh
} }
/// Bucket Read Operations.
// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. // listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
// //
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. // You can use the request parameters as selection criteria to return a subset of the objects in a bucket.

View File

@ -0,0 +1,69 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"net/http"
"net/url"
)
// GetBucketNotification - get bucket notification at a given path.
func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return BucketNotification{}, err
}
notification, err := c.getBucketNotification(bucketName)
if err != nil {
return BucketNotification{}, err
}
return notification, nil
}
// Request server for notification rules.
func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) {
urlValues := make(url.Values)
urlValues.Set("notification", "")
// Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return BucketNotification{}, err
}
return processBucketNotificationResponse(bucketName, resp)
}
// processes the GetNotification http response from the server.
func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) {
if resp.StatusCode != http.StatusOK {
errResponse := httpRespToErrorResponse(resp, bucketName, "")
return BucketNotification{}, errResponse
}
var bucketNotification BucketNotification
err := xmlDecoder(resp.Body, &bucketNotification)
if err != nil {
return BucketNotification{}, err
}
return bucketNotification, nil
}

View File

@ -22,12 +22,13 @@ import (
"time" "time"
) )
// supportedGetReqParams - supported request parameters for GET // supportedGetReqParams - supported request parameters for GET presigned request.
// presigned request.
var supportedGetReqParams = map[string]struct{}{ var supportedGetReqParams = map[string]struct{}{
"response-expires": {}, "response-expires": {},
"response-content-type": {}, "response-content-type": {},
"response-cache-control": {}, "response-cache-control": {},
"response-content-language": {},
"response-content-encoding": {},
"response-content-disposition": {}, "response-content-disposition": {},
} }
@ -66,8 +67,7 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.") return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.")
} }
} }
// Save the request parameters to be used in presigning for // Save the request parameters to be used in presigning for GET request.
// GET request.
reqMetadata.queryValues = reqParams reqMetadata.queryValues = reqParams
} }

View File

@ -88,7 +88,10 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
// is the preferred method here. The final location of the // is the preferred method here. The final location of the
// 'bucket' is provided through XML LocationConstraint data with // 'bucket' is provided through XML LocationConstraint data with
// the request. // the request.
targetURL := *c.endpointURL targetURL, err := url.Parse(c.endpointURL)
if err != nil {
return nil, err
}
targetURL.Path = "/" + bucketName + "/" targetURL.Path = "/" + bucketName + "/"
// get a new HTTP request for the method. // get a new HTTP request for the method.
@ -163,8 +166,8 @@ func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPo
} }
// For bucket policy set to 'none' we need to remove the policy. // For bucket policy set to 'none' we need to remove the policy.
if bucketPolicy == BucketPolicyNone && policy.Statements == nil { if bucketPolicy == BucketPolicyNone && policy.Statements == nil {
// No policies to set, return success. // No policy exists on the given prefix so return with ErrNoSuchBucketPolicy.
return nil return ErrNoSuchBucketPolicy(fmt.Sprintf("No policy exists on %s/%s", bucketName, objectPrefix))
} }
// Remove any previous policies at this path. // Remove any previous policies at this path.
statements := removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix) statements := removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
@ -176,10 +179,19 @@ func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPo
} }
statements = append(statements, generatedStatements...) statements = append(statements, generatedStatements...)
// No change in the statements indicates an attempt of setting 'none' on a prefix // No change in the statements indicates either an attempt of setting 'none'
// which doesn't have a pre-existing policy. // on a prefix which doesn't have a pre-existing policy, or setting a policy
// on a prefix which already has the same policy.
if reflect.DeepEqual(policy.Statements, statements) { if reflect.DeepEqual(policy.Statements, statements) {
return ErrNoSuchBucketPolicy(fmt.Sprintf("No policy exists on %s/%s", bucketName, objectPrefix)) // If policy being set is 'none' return an error, otherwise return nil to
// prevent the unnecessary request from being sent
var err error
if bucketPolicy == BucketPolicyNone {
err = ErrNoSuchBucketPolicy(fmt.Sprintf("No policy exists on %s/%s", bucketName, objectPrefix))
} else {
err = nil
}
return err
} }
policy.Statements = statements policy.Statements = statements
@ -232,3 +244,72 @@ func (c Client) putBucketPolicy(bucketName string, policy BucketAccessPolicy) er
} }
return nil return nil
} }
// Removes all policies on a bucket.
func (c Client) removeBucketPolicy(bucketName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("policy", "")
// Execute DELETE on objectName.
resp, err := c.executeMethod("DELETE", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return err
}
return nil
}
// SetBucketNotification saves a new bucket notification.
func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("notification", "")
notifBytes, err := xml.Marshal(bucketNotification)
if err != nil {
return err
}
notifBuffer := bytes.NewReader(notifBytes)
reqMetadata := requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentBody: notifBuffer,
contentLength: int64(len(notifBytes)),
contentMD5Bytes: sumMD5(notifBytes),
contentSHA256Bytes: sum256(notifBytes),
}
// Execute PUT to upload a new bucket notification.
resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, bucketName, "")
}
}
return nil
}
// DeleteBucketNotification - Remove bucket notification clears all previously specified config
func (c Client) DeleteBucketNotification(bucketName string) error {
return c.SetBucketNotification(bucketName, BucketNotification{})
}

View File

@ -24,6 +24,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url"
"testing" "testing"
) )
@ -33,11 +34,14 @@ func TestMakeBucketRequest(t *testing.T) {
// Used for asserting with the actual request generated. // Used for asserting with the actual request generated.
createExpectedRequest := func(c *Client, bucketName string, location string, req *http.Request) (*http.Request, error) { createExpectedRequest := func(c *Client, bucketName string, location string, req *http.Request) (*http.Request, error) {
targetURL := *c.endpointURL targetURL, err := url.Parse(c.endpointURL)
if err != nil {
return nil, err
}
targetURL.Path = "/" + bucketName + "/" targetURL.Path = "/" + bucketName + "/"
// get a new HTTP request for the method. // get a new HTTP request for the method.
req, err := http.NewRequest("PUT", targetURL.String(), nil) req, err = http.NewRequest("PUT", targetURL.String(), nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -17,11 +17,10 @@
package minio package minio
import ( import (
"crypto/md5"
"crypto/sha256"
"fmt" "fmt"
"hash" "hash"
"io" "io"
"io/ioutil"
"math" "math"
"os" "os"
) )
@ -101,15 +100,10 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las
// //
// Stages reads from offsets into the buffer, if buffer is nil it is // Stages reads from offsets into the buffer, if buffer is nil it is
// initialized to optimalBufferSize. // initialized to optimalBufferSize.
func (c Client) hashCopyBuffer(writer io.Writer, reader io.ReaderAt, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) { func hashCopyBuffer(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.ReaderAt, buf []byte) (size int64, err error) {
// MD5 and SHA256 hasher. hashWriter := writer
var hashMD5, hashSHA256 hash.Hash for _, v := range hashAlgorithms {
// MD5 and SHA256 hasher. hashWriter = io.MultiWriter(hashWriter, v)
hashMD5 = md5.New()
hashWriter := io.MultiWriter(writer, hashMD5)
if c.signature.isV4() {
hashSHA256 = sha256.New()
hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
} }
// Buffer is nil, initialize. // Buffer is nil, initialize.
@ -126,15 +120,15 @@ func (c Client) hashCopyBuffer(writer io.Writer, reader io.ReaderAt, buf []byte)
readAtSize, rerr := reader.ReadAt(buf, readAtOffset) readAtSize, rerr := reader.ReadAt(buf, readAtOffset)
if rerr != nil { if rerr != nil {
if rerr != io.EOF { if rerr != io.EOF {
return nil, nil, 0, rerr return 0, rerr
} }
} }
writeSize, werr := hashWriter.Write(buf[:readAtSize]) writeSize, werr := hashWriter.Write(buf[:readAtSize])
if werr != nil { if werr != nil {
return nil, nil, 0, werr return 0, werr
} }
if readAtSize != writeSize { if readAtSize != writeSize {
return nil, nil, 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue) return 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue)
} }
readAtOffset += int64(writeSize) readAtOffset += int64(writeSize)
size += int64(writeSize) size += int64(writeSize)
@ -143,52 +137,17 @@ func (c Client) hashCopyBuffer(writer io.Writer, reader io.ReaderAt, buf []byte)
} }
} }
// Finalize md5 sum and sha256 sum. for k, v := range hashAlgorithms {
md5Sum = hashMD5.Sum(nil) hashSums[k] = v.Sum(nil)
if c.signature.isV4() {
sha256Sum = hashSHA256.Sum(nil)
} }
return md5Sum, sha256Sum, size, err return size, err
} }
// hashCopy is identical to hashCopyN except that it doesn't take // hashCopyN - Calculates chosen hashes up to partSize amount of bytes.
// any size argument. func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.Reader, partSize int64) (size int64, err error) {
func (c Client) hashCopy(writer io.Writer, reader io.Reader) (md5Sum, sha256Sum []byte, size int64, err error) { hashWriter := writer
// MD5 and SHA256 hasher. for _, v := range hashAlgorithms {
var hashMD5, hashSHA256 hash.Hash hashWriter = io.MultiWriter(hashWriter, v)
// MD5 and SHA256 hasher.
hashMD5 = md5.New()
hashWriter := io.MultiWriter(writer, hashMD5)
if c.signature.isV4() {
hashSHA256 = sha256.New()
hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
}
// Using copyBuffer to copy in large buffers, default buffer
// for io.Copy of 32KiB is too small.
size, err = io.Copy(hashWriter, reader)
if err != nil {
return nil, nil, 0, err
}
// Finalize md5 sum and sha256 sum.
md5Sum = hashMD5.Sum(nil)
if c.signature.isV4() {
sha256Sum = hashSHA256.Sum(nil)
}
return md5Sum, sha256Sum, size, err
}
// hashCopyN - Calculates Md5sum and SHA256sum for up to partSize amount of bytes.
func (c Client) hashCopyN(writer io.Writer, reader io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) {
// MD5 and SHA256 hasher.
var hashMD5, hashSHA256 hash.Hash
// MD5 and SHA256 hasher.
hashMD5 = md5.New()
hashWriter := io.MultiWriter(writer, hashMD5)
if c.signature.isV4() {
hashSHA256 = sha256.New()
hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
} }
// Copies to input at writer. // Copies to input at writer.
@ -196,16 +155,14 @@ func (c Client) hashCopyN(writer io.Writer, reader io.Reader, partSize int64) (m
if err != nil { if err != nil {
// If not EOF return error right here. // If not EOF return error right here.
if err != io.EOF { if err != io.EOF {
return nil, nil, 0, err return 0, err
} }
} }
// Finalize md5shum and sha256 sum. for k, v := range hashAlgorithms {
md5Sum = hashMD5.Sum(nil) hashSums[k] = v.Sum(nil)
if c.signature.isV4() {
sha256Sum = hashSHA256.Sum(nil)
} }
return md5Sum, sha256Sum, size, err return size, err
} }
// getUploadID - fetch upload id if already present for an object name // getUploadID - fetch upload id if already present for an object name
@ -243,33 +200,26 @@ func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadI
return uploadID, isNew, nil return uploadID, isNew, nil
} }
// computeHash - Calculates MD5 and SHA256 for an input read Seeker. // computeHash - Calculates hashes for an input read Seeker.
func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) { func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) {
// MD5 and SHA256 hasher. hashWriter := ioutil.Discard
var hashMD5, hashSHA256 hash.Hash for _, v := range hashAlgorithms {
// MD5 and SHA256 hasher. hashWriter = io.MultiWriter(hashWriter, v)
hashMD5 = md5.New()
hashWriter := io.MultiWriter(hashMD5)
if c.signature.isV4() {
hashSHA256 = sha256.New()
hashWriter = io.MultiWriter(hashMD5, hashSHA256)
} }
// If no buffer is provided, no need to allocate just use io.Copy. // If no buffer is provided, no need to allocate just use io.Copy.
size, err = io.Copy(hashWriter, reader) size, err = io.Copy(hashWriter, reader)
if err != nil { if err != nil {
return nil, nil, 0, err return 0, err
} }
// Seek back reader to the beginning location. // Seek back reader to the beginning location.
if _, err := reader.Seek(0, 0); err != nil { if _, err := reader.Seek(0, 0); err != nil {
return nil, nil, 0, err return 0, err
} }
// Finalize md5shum and sha256 sum. for k, v := range hashAlgorithms {
md5Sum = hashMD5.Sum(nil) hashSums[k] = v.Sum(nil)
if c.signature.isV4() {
sha256Sum = hashSHA256.Sum(nil)
} }
return md5Sum, sha256Sum, size, nil return size, nil
} }

View File

@ -38,7 +38,7 @@ func (c Client) CopyObject(bucketName string, objectName string, objectSource st
} }
// Set copy source. // Set copy source.
customHeaders.Set("x-amz-copy-source", objectSource) customHeaders.Set("x-amz-copy-source", urlEncodePath(objectSource))
// Execute PUT on objectName. // Execute PUT on objectName.
resp, err := c.executeMethod("PUT", requestMetadata{ resp, err := c.executeMethod("PUT", requestMetadata{

View File

@ -17,8 +17,11 @@
package minio package minio
import ( import (
"crypto/md5"
"crypto/sha256"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"hash"
"io" "io"
"io/ioutil" "io/ioutil"
"mime" "mime"
@ -176,10 +179,17 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
// Get a section reader on a particular offset. // Get a section reader on a particular offset.
sectionReader := io.NewSectionReader(fileReader, totalUploadedSize, partSize) sectionReader := io.NewSectionReader(fileReader, totalUploadedSize, partSize)
// Calculates MD5 and SHA256 sum for a section reader. // Add hash algorithms that need to be calculated by computeHash()
var md5Sum, sha256Sum []byte // In case of a non-v4 signature or https connection, sha256 is not needed.
hashAlgos := make(map[string]hash.Hash)
hashSums := make(map[string][]byte)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
var prtSize int64 var prtSize int64
md5Sum, sha256Sum, prtSize, err = c.computeHash(sectionReader) prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -191,14 +201,14 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
// Verify if part should be uploaded. // Verify if part should be uploaded.
if shouldUploadPart(objectPart{ if shouldUploadPart(objectPart{
ETag: hex.EncodeToString(md5Sum), ETag: hex.EncodeToString(hashSums["md5"]),
PartNumber: partNumber, PartNumber: partNumber,
Size: prtSize, Size: prtSize,
}, partsInfo) { }, partsInfo) {
// Proceed to upload the part. // Proceed to upload the part.
var objPart objectPart var objPart objectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber,
md5Sum, sha256Sum, prtSize) hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil { if err != nil {
return totalUploadedSize, err return totalUploadedSize, err
} }

View File

@ -18,8 +18,11 @@ package minio
import ( import (
"bytes" "bytes"
"crypto/md5"
"crypto/sha256"
"encoding/hex" "encoding/hex"
"encoding/xml" "encoding/xml"
"hash"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -112,9 +115,18 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
tmpBuffer := new(bytes.Buffer) tmpBuffer := new(bytes.Buffer)
for partNumber <= totalPartsCount { for partNumber <= totalPartsCount {
// Calculates MD5 and SHA256 sum while copying partSize bytes
// into tmpBuffer. // Choose hash algorithms to be calculated by hashCopyN, avoid sha256
md5Sum, sha256Sum, prtSize, rErr := c.hashCopyN(tmpBuffer, reader, partSize) // with non-v4 signature request or HTTPS connection
hashSums := make(map[string][]byte)
hashAlgos := make(map[string]hash.Hash)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
// Calculates hash sums while copying partSize bytes into tmpBuffer.
prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize)
if rErr != nil { if rErr != nil {
if rErr != io.EOF { if rErr != io.EOF {
return 0, rErr return 0, rErr
@ -128,13 +140,13 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// Verify if part should be uploaded. // Verify if part should be uploaded.
if shouldUploadPart(objectPart{ if shouldUploadPart(objectPart{
ETag: hex.EncodeToString(md5Sum), ETag: hex.EncodeToString(hashSums["md5"]),
PartNumber: partNumber, PartNumber: partNumber,
Size: prtSize, Size: prtSize,
}, partsInfo) { }, partsInfo) {
// Proceed to upload the part. // Proceed to upload the part.
var objPart objectPart var objPart objectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, md5Sum, sha256Sum, prtSize) objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil { if err != nil {
// Reset the temporary buffer upon any error. // Reset the temporary buffer upon any error.
tmpBuffer.Reset() tmpBuffer.Reset()
@ -351,11 +363,32 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
} }
} }
// Read resp.Body into a []bytes to parse for Error response inside the body
var b []byte
b, err = ioutil.ReadAll(resp.Body)
if err != nil {
return completeMultipartUploadResult{}, err
}
// Decode completed multipart upload response on success. // Decode completed multipart upload response on success.
completeMultipartUploadResult := completeMultipartUploadResult{} completeMultipartUploadResult := completeMultipartUploadResult{}
err = xmlDecoder(resp.Body, &completeMultipartUploadResult) err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult)
if err != nil { if err != nil {
// xml parsing failure due to presence an ill-formed xml fragment
return completeMultipartUploadResult, err return completeMultipartUploadResult, err
} else if completeMultipartUploadResult.Bucket == "" {
// xml's Decode method ignores well-formed xml that don't apply to the type of value supplied.
// In this case, it would leave completeMultipartUploadResult with the corresponding zero-values
// of the members.
// Decode completed multipart upload response on failure
completeMultipartUploadErr := ErrorResponse{}
err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr)
if err != nil {
// xml parsing failure due to presence an ill-formed xml fragment
return completeMultipartUploadResult, err
}
return completeMultipartUploadResult, completeMultipartUploadErr
} }
return completeMultipartUploadResult, nil return completeMultipartUploadResult, nil
} }

View File

@ -18,6 +18,9 @@ package minio
import ( import (
"bytes" "bytes"
"crypto/md5"
"crypto/sha256"
"hash"
"io" "io"
"io/ioutil" "io/ioutil"
"sort" "sort"
@ -144,10 +147,17 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
// Get a section reader on a particular offset. // Get a section reader on a particular offset.
sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize) sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
// Calculates MD5 and SHA256 sum for a section reader. // Choose the needed hash algorithms to be calculated by hashCopyBuffer.
var md5Sum, sha256Sum []byte // Sha256 is avoided in non-v4 signature requests or HTTPS connections
hashSums := make(map[string][]byte)
hashAlgos := make(map[string]hash.Hash)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
var prtSize int64 var prtSize int64
md5Sum, sha256Sum, prtSize, err = c.hashCopyBuffer(tmpBuffer, sectionReader, readAtBuffer) prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -159,7 +169,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
// Proceed to upload the part. // Proceed to upload the part.
var objPart objectPart var objPart objectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, md5Sum, sha256Sum, prtSize) objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil { if err != nil {
// Reset the buffer upon any error. // Reset the buffer upon any error.
tmpBuffer.Reset() tmpBuffer.Reset()

View File

@ -18,6 +18,9 @@ package minio
import ( import (
"bytes" "bytes"
"crypto/md5"
"crypto/sha256"
"hash"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -193,11 +196,20 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
if size <= -1 { if size <= -1 {
size = maxSinglePutObjectSize size = maxSinglePutObjectSize
} }
var md5Sum, sha256Sum []byte
// Add the appropriate hash algorithms that need to be calculated by hashCopyN
// In case of non-v4 signature request or HTTPS connection, sha256 is not needed.
hashAlgos := make(map[string]hash.Hash)
hashSums := make(map[string][]byte)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
if size <= minPartSize { if size <= minPartSize {
// Initialize a new temporary buffer. // Initialize a new temporary buffer.
tmpBuffer := new(bytes.Buffer) tmpBuffer := new(bytes.Buffer)
md5Sum, sha256Sum, size, err = c.hashCopyN(tmpBuffer, reader, size) size, err = hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, size)
reader = bytes.NewReader(tmpBuffer.Bytes()) reader = bytes.NewReader(tmpBuffer.Bytes())
tmpBuffer.Reset() tmpBuffer.Reset()
} else { } else {
@ -208,7 +220,7 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
return 0, err return 0, err
} }
defer tmpFile.Close() defer tmpFile.Close()
md5Sum, sha256Sum, size, err = c.hashCopyN(tmpFile, reader, size) size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
// Seek back to beginning of the temporary file. // Seek back to beginning of the temporary file.
if _, err = tmpFile.Seek(0, 0); err != nil { if _, err = tmpFile.Seek(0, 0); err != nil {
return 0, err return 0, err
@ -222,7 +234,7 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
} }
} }
// Execute put object. // Execute put object.
st, err := c.putObjectDo(bucketName, objectName, reader, md5Sum, sha256Sum, size, contentType) st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, contentType)
if err != nil { if err != nil {
return 0, err return 0, err
} }

View File

@ -50,54 +50,6 @@ func (c Client) RemoveBucket(bucketName string) error {
return nil return nil
} }
// RemoveBucketPolicy remove a bucket policy on given path.
func (c Client) RemoveBucketPolicy(bucketName, objectPrefix string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectPrefix(objectPrefix); err != nil {
return err
}
policy, err := c.getBucketPolicy(bucketName, objectPrefix)
if err != nil {
return err
}
// No bucket policy found, nothing to remove return success.
if policy.Statements == nil {
return nil
}
// Save new statements after removing requested bucket policy.
policy.Statements = removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
// Commit the update policy.
return c.putBucketPolicy(bucketName, policy)
}
// Removes all policies on a bucket.
func (c Client) removeBucketPolicy(bucketName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("policy", "")
// Execute DELETE on objectName.
resp, err := c.executeMethod("DELETE", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return err
}
return nil
}
// RemoveObject remove an object from a bucket. // RemoveObject remove an object from a bucket.
func (c Client) RemoveObject(bucketName, objectName string) error { func (c Client) RemoveObject(bucketName, objectName string) error {
// Input validation. // Input validation.

View File

@ -41,6 +41,35 @@ type commonPrefix struct {
Prefix string Prefix string
} }
// listBucketResult container for listObjects V2 response.
type listBucketV2Result struct {
// A response can contain CommonPrefixes only if you have
// specified a delimiter.
CommonPrefixes []commonPrefix
// Metadata about each object returned.
Contents []ObjectInfo
Delimiter string
// Encoding type used to encode object keys in the response.
EncodingType string
// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
MaxKeys int64
Name string
// Hold the token that will be sent in the next request to fetch the next group of keys
NextContinuationToken string
ContinuationToken string
Prefix string
// FetchOwner and StartAfter are currently not used
FetchOwner string
StartAfter string
}
// listBucketResult container for listObjects response. // listBucketResult container for listObjects response.
type listBucketResult struct { type listBucketResult struct {
// A response can contain CommonPrefixes only if you have // A response can contain CommonPrefixes only if you have

View File

@ -53,7 +53,10 @@ type Client struct {
appName string appName string
appVersion string appVersion string
} }
endpointURL *url.URL endpointURL string
// Indicate whether we are using https or not
secure bool
// Needs allocation. // Needs allocation.
httpClient *http.Client httpClient *http.Client
@ -70,7 +73,7 @@ type Client struct {
// Global constants. // Global constants.
const ( const (
libraryName = "minio-go" libraryName = "minio-go"
libraryVersion = "1.0.1" libraryVersion = "2.0.1"
) )
// User Agent should always following the below style. // User Agent should always following the below style.
@ -163,8 +166,11 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl
clnt.anonymous = true clnt.anonymous = true
} }
// Remember whether we are using https or not
clnt.secure = secure
// Save endpoint URL, user agent for future uses. // Save endpoint URL, user agent for future uses.
clnt.endpointURL = endpointURL clnt.endpointURL = endpointURL.String()
// Instantiate http client and bucket location cache. // Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{ clnt.httpClient = &http.Client{
@ -583,10 +589,15 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
// set sha256 sum for signature calculation only with // set sha256 sum for signature calculation only with
// signature version '4'. // signature version '4'.
if c.signature.isV4() { if c.signature.isV4() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) shaHeader := "UNSIGNED-PAYLOAD"
if metadata.contentSHA256Bytes != nil { if !c.secure {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSHA256Bytes)) if metadata.contentSHA256Bytes == nil {
shaHeader = hex.EncodeToString(sum256([]byte{}))
} else {
shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes)
}
} }
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
} }
} }
@ -621,14 +632,18 @@ func (c Client) setUserAgent(req *http.Request) {
// makeTargetURL make a new target url. // makeTargetURL make a new target url.
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) { func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
// Save host. // Save host.
host := c.endpointURL.Host url, err := url.Parse(c.endpointURL)
if err != nil {
return nil, err
}
host := url.Host
// For Amazon S3 endpoint, try to fetch location based endpoint. // For Amazon S3 endpoint, try to fetch location based endpoint.
if isAmazonEndpoint(c.endpointURL) { if isAmazonEndpoint(c.endpointURL) {
// Fetch new host based on the bucket location. // Fetch new host based on the bucket location.
host = getS3Endpoint(bucketLocation) host = getS3Endpoint(bucketLocation)
} }
// Save scheme. // Save scheme.
scheme := c.endpointURL.Scheme scheme := url.Scheme
urlStr := scheme + "://" + host + "/" urlStr := scheme + "://" + host + "/"
// Make URL only if bucketName is available, otherwise use the // Make URL only if bucketName is available, otherwise use the

View File

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package minio_test package minio
import ( import (
"bytes" "bytes"
@ -28,8 +28,6 @@ import (
"os" "os"
"testing" "testing"
"time" "time"
"github.com/minio/minio-go"
) )
// Tests bucket re-create errors. // Tests bucket re-create errors.
@ -42,7 +40,7 @@ func TestMakeBucketErrorV2(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.NewV2( c, err := NewV2(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -69,8 +67,8 @@ func TestMakeBucketErrorV2(t *testing.T) {
t.Fatal("Error: make bucket should should fail for", bucketName) t.Fatal("Error: make bucket should should fail for", bucketName)
} }
// Verify valid error response from server. // Verify valid error response from server.
if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && if ToErrorResponse(err).Code != "BucketAlreadyExists" &&
minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
t.Fatal("Error: Invalid error returned by server", err) t.Fatal("Error: Invalid error returned by server", err)
} }
if err = c.RemoveBucket(bucketName); err != nil { if err = c.RemoveBucket(bucketName); err != nil {
@ -88,7 +86,7 @@ func TestGetObjectClosedTwiceV2(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.NewV2( c, err := NewV2(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -173,7 +171,7 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.NewV2( c, err := NewV2(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -240,7 +238,7 @@ func TestResumablePutObjectV2(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.NewV2( c, err := NewV2(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -351,7 +349,7 @@ func TestFPutObjectV2(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.NewV2( c, err := NewV2(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -499,7 +497,7 @@ func TestResumableFPutObjectV2(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.NewV2( c, err := NewV2(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -576,7 +574,7 @@ func TestMakeBucketRegionsV2(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.NewV2( c, err := NewV2(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -627,7 +625,7 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.NewV2( c, err := NewV2(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -765,7 +763,7 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.NewV2( c, err := NewV2(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -906,7 +904,7 @@ func TestCopyObjectV2(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object // Instantiate new minio client object
c, err := minio.NewV2( c, err := NewV2(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -958,7 +956,7 @@ func TestCopyObjectV2(t *testing.T) {
} }
// Set copy conditions. // Set copy conditions.
copyConds := minio.NewCopyConditions() copyConds := NewCopyConditions()
err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -1028,7 +1026,7 @@ func TestFunctionalV2(t *testing.T) {
// Seed random based on current time. // Seed random based on current time.
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
c, err := minio.NewV2( c, err := NewV2(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -1075,7 +1073,7 @@ func TestFunctionalV2(t *testing.T) {
} }
// Make the bucket 'public read/write'. // Make the bucket 'public read/write'.
err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyReadWrite) err = c.SetBucketPolicy(bucketName, "", BucketPolicyReadWrite)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
@ -1144,6 +1142,18 @@ func TestFunctionalV2(t *testing.T) {
t.Fatal("Error: object " + objectName + " not found.") t.Fatal("Error: object " + objectName + " not found.")
} }
objFound = false
isRecursive = true // Recursive is true.
for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
if obj.Key == objectName {
objFound = true
break
}
}
if !objFound {
t.Fatal("Error: object " + objectName + " not found.")
}
incompObjNotFound := true incompObjNotFound := true
for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
if objIncompl.Key != "" { if objIncompl.Key != "" {

View File

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package minio_test package minio
import ( import (
"bytes" "bytes"
@ -28,8 +28,6 @@ import (
"os" "os"
"testing" "testing"
"time" "time"
"github.com/minio/minio-go"
) )
const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
@ -66,7 +64,7 @@ func TestMakeBucketError(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.New( c, err := New(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -93,8 +91,8 @@ func TestMakeBucketError(t *testing.T) {
t.Fatal("Error: make bucket should should fail for", bucketName) t.Fatal("Error: make bucket should should fail for", bucketName)
} }
// Verify valid error response from server. // Verify valid error response from server.
if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && if ToErrorResponse(err).Code != "BucketAlreadyExists" &&
minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
t.Fatal("Error: Invalid error returned by server", err) t.Fatal("Error: Invalid error returned by server", err)
} }
if err = c.RemoveBucket(bucketName); err != nil { if err = c.RemoveBucket(bucketName); err != nil {
@ -112,7 +110,7 @@ func TestMakeBucketRegions(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.New( c, err := New(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -153,6 +151,162 @@ func TestMakeBucketRegions(t *testing.T) {
} }
} }
// Test PutObject using a large data to trigger multipart readat
func TestPutObjectReadAt(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Instantiate new minio client object.
c, err := New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate data
buf := make([]byte, minPartSize*4)
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
st, err := r.Stat()
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if st.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
if err := r.Close(); err != nil {
t.Fatal("Error:", err)
}
if err := r.Close(); err == nil {
t.Fatal("Error: object is already closed, should return error")
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Test listing partially uploaded objects.
func TestListPartiallyUploaded(t *testing.T) {
if testing.Short() {
t.Skip("skipping function tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Instantiate new minio client object.
c, err := New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
)
if err != nil {
t.Fatal("Error:", err)
}
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Enable tracing, write to stdout.
// c.TraceOn(os.Stderr)
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
reader, writer := io.Pipe()
go func() {
i := 0
for i < 25 {
_, err = io.CopyN(writer, crand.Reader, (minPartSize*2)/25)
if err != nil {
t.Fatal("Error:", err, bucketName)
}
i++
}
err := writer.CloseWithError(errors.New("Proactively closed to be verified later."))
if err != nil {
t.Fatal("Error:", err)
}
}()
objectName := bucketName + "-resumable"
_, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
if err == nil {
t.Fatal("Error: PutObject should fail.")
}
if err.Error() != "Proactively closed to be verified later." {
t.Fatal("Error:", err)
}
doneCh := make(chan struct{})
defer close(doneCh)
isRecursive := true
multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)
for multiPartObject := range multiPartObjectCh {
if multiPartObject.Err != nil {
t.Fatalf("Error: Error when listing incomplete upload")
}
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Test get object reader to not throw error on being closed twice. // Test get object reader to not throw error on being closed twice.
func TestGetObjectClosedTwice(t *testing.T) { func TestGetObjectClosedTwice(t *testing.T) {
if testing.Short() { if testing.Short() {
@ -163,7 +317,7 @@ func TestGetObjectClosedTwice(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.New( c, err := New(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -248,7 +402,7 @@ func TestRemovePartiallyUploaded(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.New( c, err := New(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -318,7 +472,7 @@ func TestResumablePutObject(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.New( c, err := New(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -350,12 +504,12 @@ func TestResumablePutObject(t *testing.T) {
} }
// Copy 11MiB worth of random data. // Copy 11MiB worth of random data.
n, err := io.CopyN(file, crand.Reader, 11*1024*1024) n, err := io.CopyN(file, crand.Reader, minPartSize*2)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if n != int64(11*1024*1024) { if n != int64(minPartSize*2) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
} }
// Close the file pro-actively for windows. // Close the file pro-actively for windows.
@ -371,8 +525,8 @@ func TestResumablePutObject(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if n != int64(11*1024*1024) { if n != int64(minPartSize*2) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
} }
// Get the uploaded object. // Get the uploaded object.
@ -428,7 +582,7 @@ func TestResumableFPutObject(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.New( c, err := New(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -458,12 +612,12 @@ func TestResumableFPutObject(t *testing.T) {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
n, err := io.CopyN(file, crand.Reader, 11*1024*1024) n, err := io.CopyN(file, crand.Reader, minPartSize*2)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if n != int64(11*1024*1024) { if n != int64(minPartSize*2) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
} }
// Close the file pro-actively for windows. // Close the file pro-actively for windows.
@ -478,8 +632,8 @@ func TestResumableFPutObject(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if n != int64(11*1024*1024) { if n != int64(minPartSize*2) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
} }
err = c.RemoveObject(bucketName, objectName) err = c.RemoveObject(bucketName, objectName)
@ -498,8 +652,8 @@ func TestResumableFPutObject(t *testing.T) {
} }
} }
// Tests FPutObject hidden contentType setting // Tests FPutObject of a big file to trigger multipart
func TestFPutObject(t *testing.T) { func TestFPutObjectMultipart(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("skipping functional tests for short runs") t.Skip("skipping functional tests for short runs")
} }
@ -508,7 +662,7 @@ func TestFPutObject(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.New( c, err := New(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -533,18 +687,18 @@ func TestFPutObject(t *testing.T) {
t.Fatal("Error:", err, bucketName) t.Fatal("Error:", err, bucketName)
} }
// Make a temp file with 11*1024*1024 bytes of data. // Make a temp file with minPartSize*2 bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
n, err := io.CopyN(file, crand.Reader, 11*1024*1024) n, err := io.CopyN(file, crand.Reader, minPartSize*2)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if n != int64(11*1024*1024) { if n != int64(minPartSize*2) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
} }
// Close the file pro-actively for windows. // Close the file pro-actively for windows.
@ -561,8 +715,87 @@ func TestFPutObject(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if n != int64(11*1024*1024) { if n != int64(minPartSize*2) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
}
// Remove all objects and bucket and temp file
err = c.RemoveObject(bucketName, objectName+"-standard")
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests FPutObject hidden contentType setting
func TestFPutObject(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Instantiate new minio client object.
c, err := New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Make a temp file with minPartSize*2 bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
if err != nil {
t.Fatal("Error:", err)
}
n, err := io.CopyN(file, crand.Reader, minPartSize*2)
if err != nil {
t.Fatal("Error:", err)
}
if n != int64(minPartSize*2) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
}
// Close the file pro-actively for windows.
err = file.Close()
if err != nil {
t.Fatal("Error:", err)
}
// Set base object name
objectName := bucketName + "FPutObject"
// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
if err != nil {
t.Fatal("Error:", err)
}
if n != int64(minPartSize*2) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
} }
// Perform FPutObject with no contentType provided (Expecting application/octet-stream) // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
@ -570,8 +803,8 @@ func TestFPutObject(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if n != int64(11*1024*1024) { if n != int64(minPartSize*2) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
} }
// Add extension to temp file name // Add extension to temp file name
@ -586,8 +819,8 @@ func TestFPutObject(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if n != int64(11*1024*1024) { if n != int64(minPartSize*2) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
} }
// Check headers // Check headers
@ -656,7 +889,7 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.New( c, err := New(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -794,7 +1027,7 @@ func TestGetObjectReadAtFunctional(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object. // Instantiate new minio client object.
c, err := minio.New( c, err := New(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -926,6 +1159,106 @@ func TestGetObjectReadAtFunctional(t *testing.T) {
} }
} }
// Test Presigned Post Policy
func TestPresignedPostPolicy(t *testing.T) {
if testing.Short() {
t.Skip("Skipping functional tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Instantiate new minio client object
c, err := NewV4(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket in 'us-east-1' (source bucket).
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match want %v, got %v",
len(buf), n)
}
policy := NewPostPolicy()
if err := policy.SetBucket(""); err == nil {
t.Fatalf("Error: %s", err)
}
if err := policy.SetKey(""); err == nil {
t.Fatalf("Error: %s", err)
}
if err := policy.SetKeyStartsWith(""); err == nil {
t.Fatalf("Error: %s", err)
}
if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil {
t.Fatalf("Error: %s", err)
}
if err := policy.SetContentType(""); err == nil {
t.Fatalf("Error: %s", err)
}
if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
t.Fatalf("Error: %s", err)
}
policy.SetBucket(bucketName)
policy.SetKey(objectName)
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
policy.SetContentType("image/png")
policy.SetContentLengthRange(1024, 1024*1024)
_, _, err = c.PresignedPostPolicy(policy)
if err != nil {
t.Fatal("Error:", err)
}
policy = NewPostPolicy()
// Remove all objects and buckets
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests copy object // Tests copy object
func TestCopyObject(t *testing.T) { func TestCopyObject(t *testing.T) {
if testing.Short() { if testing.Short() {
@ -935,7 +1268,7 @@ func TestCopyObject(t *testing.T) {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Instantiate new minio client object // Instantiate new minio client object
c, err := minio.NewV4( c, err := NewV4(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -986,12 +1319,45 @@ func TestCopyObject(t *testing.T) {
len(buf), n) len(buf), n)
} }
r, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err)
}
// Check the various fields of source object against destination object.
objInfo, err := r.Stat()
if err != nil {
t.Fatal("Error:", err)
}
// Set copy conditions. // Set copy conditions.
copyConds := minio.NewCopyConditions() copyConds := NewCopyConditions()
// Start by setting wrong conditions
err = copyConds.SetModified(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
if err == nil {
t.Fatal("Error:", err)
}
err = copyConds.SetUnmodified(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
if err == nil {
t.Fatal("Error:", err)
}
err = copyConds.SetMatchETag("")
if err == nil {
t.Fatal("Error:", err)
}
err = copyConds.SetMatchETagExcept("")
if err == nil {
t.Fatal("Error:", err)
}
err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
err = copyConds.SetMatchETag(objInfo.ETag)
if err != nil {
t.Fatal("Error:", err)
}
// Copy source. // Copy source.
copySource := bucketName + "/" + objectName copySource := bucketName + "/" + objectName
@ -1013,7 +1379,7 @@ func TestCopyObject(t *testing.T) {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
// Check the various fields of source object against destination object. // Check the various fields of source object against destination object.
objInfo, err := reader.Stat() objInfo, err = reader.Stat()
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
@ -1026,6 +1392,23 @@ func TestCopyObject(t *testing.T) {
objInfo.Size, objInfoCopy.Size) objInfo.Size, objInfoCopy.Size)
} }
// CopyObject again but with wrong conditions
copyConds = NewCopyConditions()
err = copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
t.Fatal("Error:", err)
}
err = copyConds.SetMatchETagExcept(objInfo.ETag)
if err != nil {
t.Fatal("Error:", err)
}
// Perform the Copy which should fail
err = c.CopyObject(bucketName+"-copy", objectName+"-copy", copySource, copyConds)
if err == nil {
t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy should fail")
}
// Remove all objects and buckets // Remove all objects and buckets
err = c.RemoveObject(bucketName, objectName) err = c.RemoveObject(bucketName, objectName)
if err != nil { if err != nil {
@ -1048,6 +1431,64 @@ func TestCopyObject(t *testing.T) {
} }
} }
func TestBucketNotification(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
c, err := New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable to debug
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
bucketName := os.Getenv("NOTIFY_BUCKET")
topicArn := NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE"))
topicConfig := NewNotificationConfig(topicArn)
topicConfig.AddEvents(ObjectCreatedAll, ObjectRemovedAll)
topicConfig.AddFilterSuffix("jpg")
bNotification := BucketNotification{}
bNotification.AddTopic(topicConfig)
err = c.SetBucketNotification(bucketName, bNotification)
if err != nil {
t.Fatal("Error: ", err)
}
bNotification, err = c.GetBucketNotification(bucketName)
if err != nil {
t.Fatal("Error: ", err)
}
if len(bNotification.TopicConfigs) != 1 {
t.Fatal("Error: Topic config is empty")
}
if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" {
t.Fatal("Error: cannot get the suffix")
}
err = c.DeleteBucketNotification(bucketName)
if err != nil {
t.Fatal("Error: cannot delete bucket notification")
}
}
// Tests comprehensive list of all methods. // Tests comprehensive list of all methods.
func TestFunctional(t *testing.T) { func TestFunctional(t *testing.T) {
if testing.Short() { if testing.Short() {
@ -1057,7 +1498,7 @@ func TestFunctional(t *testing.T) {
// Seed random based on current time. // Seed random based on current time.
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
c, err := minio.New( c, err := New(
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
@ -1112,7 +1553,7 @@ func TestFunctional(t *testing.T) {
t.Fatalf("Default bucket policy incorrect") t.Fatalf("Default bucket policy incorrect")
} }
// Set the bucket policy to 'public readonly'. // Set the bucket policy to 'public readonly'.
err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyReadOnly) err = c.SetBucketPolicy(bucketName, "", BucketPolicyReadOnly)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
@ -1126,7 +1567,7 @@ func TestFunctional(t *testing.T) {
} }
// Make the bucket 'public writeonly'. // Make the bucket 'public writeonly'.
err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyWriteOnly) err = c.SetBucketPolicy(bucketName, "", BucketPolicyWriteOnly)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
@ -1139,7 +1580,7 @@ func TestFunctional(t *testing.T) {
t.Fatalf("Expected bucket policy to be writeonly") t.Fatalf("Expected bucket policy to be writeonly")
} }
// Make the bucket 'public read/write'. // Make the bucket 'public read/write'.
err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyReadWrite) err = c.SetBucketPolicy(bucketName, "", BucketPolicyReadWrite)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
@ -1215,6 +1656,18 @@ func TestFunctional(t *testing.T) {
t.Fatal("Error: object " + objectName + " not found.") t.Fatal("Error: object " + objectName + " not found.")
} }
objFound = false
isRecursive = true // Recursive is true.
for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) {
if obj.Key == objectName {
objFound = true
break
}
}
if !objFound {
t.Fatal("Error: object " + objectName + " not found.")
}
incompObjNotFound := true incompObjNotFound := true
for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
if objIncompl.Key != "" { if objIncompl.Key != "" {

View File

@ -341,13 +341,13 @@ func TestPartSize(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
if totalPartsCount != 9987 { if totalPartsCount != 9103 {
t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount) t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount)
} }
if partSize != 550502400 { if partSize != 603979776 {
t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize) t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize)
} }
if lastPartSize != 241172480 { if lastPartSize != 134217728 {
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize) t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
} }
totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(5000000000) totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(5000000000)
@ -361,13 +361,13 @@ func TestPartSize(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if totalPartsCount != 9987 { if totalPartsCount != 9103 {
t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount) t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount)
} }
if partSize != 550502400 { if partSize != 603979776 {
t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize) t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize)
} }
if lastPartSize != 241172480 { if lastPartSize != 134217728 {
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize) t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
} }
} }

View File

@ -147,7 +147,10 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
urlValues.Set("location", "") urlValues.Set("location", "")
// Set get bucket location always as path style. // Set get bucket location always as path style.
targetURL := c.endpointURL targetURL, err := url.Parse(c.endpointURL)
if err != nil {
return nil, err
}
targetURL.Path = path.Join(bucketName, "") + "/" targetURL.Path = path.Join(bucketName, "") + "/"
targetURL.RawQuery = urlValues.Encode() targetURL.RawQuery = urlValues.Encode()

View File

@ -70,12 +70,15 @@ func TestGetBucketLocationRequest(t *testing.T) {
urlValues.Set("location", "") urlValues.Set("location", "")
// Set get bucket location always as path style. // Set get bucket location always as path style.
targetURL := c.endpointURL targetURL, err := url.Parse(c.endpointURL)
if err != nil {
return nil, err
}
targetURL.Path = path.Join(bucketName, "") + "/" targetURL.Path = path.Join(bucketName, "") + "/"
targetURL.RawQuery = urlValues.Encode() targetURL.RawQuery = urlValues.Encode()
// Get a new HTTP request for the method. // Get a new HTTP request for the method.
req, err := http.NewRequest("GET", targetURL.String(), nil) req, err = http.NewRequest("GET", targetURL.String(), nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -0,0 +1,140 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/xml"
)
// S3 notification events
type Event string
const (
ObjectCreatedAll Event = "s3:ObjectCreated:*"
ObjectCreatePut = "s3:ObjectCreated:Put"
ObjectCreatedPost = "s3:ObjectCreated:Post"
ObjectCreatedCopy = "s3:ObjectCreated:Copy"
ObjectCreatedCompleteMultipartUpload = "sh:ObjectCreated:CompleteMultipartUpload"
ObjectRemovedAll = "s3:ObjectRemoved:*"
ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
)
type FilterRule struct {
Name string `xml:"Name"`
Value string `xml:"Value"`
}
type S3Key struct {
FilterRules []FilterRule `xml:"FilterRule,omitempty"`
}
type Filter struct {
S3Key S3Key `xml:"S3Key,omitempty"`
}
// Arn - holds ARN information that will be sent to the web service
type Arn struct {
Partition string
Service string
Region string
AccountID string
Resource string
}
func NewArn(partition, service, region, accountID, resource string) Arn {
return Arn{Partition: partition,
Service: service,
Region: region,
AccountID: accountID,
Resource: resource}
}
func (arn Arn) String() string {
return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
}
// NotificationConfig - represents one single notification configuration
// such as topic, queue or lambda configuration.
type NotificationConfig struct {
Id string `xml:"Id,omitempty"`
Arn Arn `xml:"-"`
Events []Event `xml:"Event"`
Filter *Filter `xml:"Filter,omitempty"`
}
func NewNotificationConfig(arn Arn) NotificationConfig {
return NotificationConfig{Arn: arn}
}
func (t *NotificationConfig) AddEvents(events ...Event) {
t.Events = append(t.Events, events...)
}
func (t *NotificationConfig) AddFilterSuffix(suffix string) {
if t.Filter == nil {
t.Filter = &Filter{}
}
t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, FilterRule{Name: "suffix", Value: suffix})
}
func (t *NotificationConfig) AddFilterPrefix(prefix string) {
if t.Filter == nil {
t.Filter = &Filter{}
}
t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, FilterRule{Name: "prefix", Value: prefix})
}
// Topic notification config
type TopicConfig struct {
NotificationConfig
Topic string `xml:"Topic"`
}
type QueueConfig struct {
NotificationConfig
Queue string `xml:"Queue"`
}
type LambdaConfig struct {
NotificationConfig
Lambda string `xml:"CloudFunction"`
}
// BucketNotification - the struct that represents the whole XML to be sent to the web service
type BucketNotification struct {
XMLName xml.Name `xml:"NotificationConfiguration"`
LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"`
TopicConfigs []TopicConfig `xml:"TopicConfiguration"`
QueueConfigs []QueueConfig `xml:"QueueConfiguration"`
}
func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) {
config := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()}
b.TopicConfigs = append(b.TopicConfigs, config)
}
func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) {
config := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()}
b.QueueConfigs = append(b.QueueConfigs, config)
}
func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) {
config := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
b.LambdaConfigs = append(b.LambdaConfigs, config)
}

View File

@ -74,7 +74,6 @@ type BucketAccessPolicy struct {
var ( var (
readWriteBucketActions = []string{ readWriteBucketActions = []string{
"s3:GetBucketLocation", "s3:GetBucketLocation",
"s3:ListBucket",
"s3:ListBucketMultipartUploads", "s3:ListBucketMultipartUploads",
// Add more bucket level read-write actions here. // Add more bucket level read-write actions here.
} }
@ -108,7 +107,6 @@ var (
var ( var (
readOnlyBucketActions = []string{ readOnlyBucketActions = []string{
"s3:GetBucketLocation", "s3:GetBucketLocation",
"s3:ListBucket",
// Add more bucket level read actions here. // Add more bucket level read actions here.
} }
readOnlyObjectActions = []string{ readOnlyObjectActions = []string{
@ -144,6 +142,9 @@ func isBucketPolicyReadWrite(statements []Statement, bucketName string, objectPr
sort.Strings(readWriteBucketActions) sort.Strings(readWriteBucketActions)
sort.Strings(readWriteObjectActions) sort.Strings(readWriteObjectActions)
for _, statement := range statements { for _, statement := range statements {
if statement.Principal.AWS[0] != "*" {
continue
}
for _, resource := range statement.Resources { for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName { if resource == awsResourcePrefix+bucketName {
if subsetActions(readWriteBucketActions, statement.Actions) { if subsetActions(readWriteBucketActions, statement.Actions) {
@ -166,6 +167,9 @@ func isBucketPolicyWriteOnly(statements []Statement, bucketName string, objectPr
sort.Strings(writeOnlyBucketActions) sort.Strings(writeOnlyBucketActions)
sort.Strings(writeOnlyObjectActions) sort.Strings(writeOnlyObjectActions)
for _, statement := range statements { for _, statement := range statements {
if statement.Principal.AWS[0] != "*" {
continue
}
for _, resource := range statement.Resources { for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName { if resource == awsResourcePrefix+bucketName {
if subsetActions(writeOnlyBucketActions, statement.Actions) { if subsetActions(writeOnlyBucketActions, statement.Actions) {
@ -188,6 +192,9 @@ func isBucketPolicyReadOnly(statements []Statement, bucketName string, objectPre
sort.Strings(readOnlyBucketActions) sort.Strings(readOnlyBucketActions)
sort.Strings(readOnlyObjectActions) sort.Strings(readOnlyObjectActions)
for _, statement := range statements { for _, statement := range statements {
if statement.Principal.AWS[0] != "*" {
continue
}
for _, resource := range statement.Resources { for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName { if resource == awsResourcePrefix+bucketName {
if subsetActions(readOnlyBucketActions, statement.Actions) { if subsetActions(readOnlyBucketActions, statement.Actions) {
@ -205,28 +212,76 @@ func isBucketPolicyReadOnly(statements []Statement, bucketName string, objectPre
return commonActions && readOnly return commonActions && readOnly
} }
// Removes read write bucket policy if found. // isAction - returns true if action is found amond the list of actions.
func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName string, objectPrefix string) []Statement { func isAction(action string, actions []string) bool {
for _, act := range actions {
if action == act {
return true
}
}
return false
}
// removeReadBucketActions - removes readWriteBucket actions if found.
func removeReadBucketActions(statements []Statement, bucketName string) []Statement {
var newStatements []Statement var newStatements []Statement
var bucketResourceStatementRemoved bool var bucketActionsRemoved bool
for _, statement := range statements { for _, statement := range statements {
for _, resource := range statement.Resources { for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName && !bucketResourceStatementRemoved { if resource == awsResourcePrefix+bucketName && !bucketActionsRemoved {
var newActions []string var newActions []string
for _, action := range statement.Actions { for _, action := range statement.Actions {
switch action { if isAction(action, readWriteBucketActions) {
case "s3:GetBucketLocation", "s3:ListBucket", "s3:ListBucketMultipartUploads":
continue continue
} }
newActions = append(newActions, action) newActions = append(newActions, action)
} }
statement.Actions = newActions statement.Actions = newActions
bucketResourceStatementRemoved = true bucketActionsRemoved = true
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" { }
}
if len(statement.Actions) != 0 {
newStatements = append(newStatements, statement)
}
}
return newStatements
}
// removeListBucketActions - removes "s3:ListBucket" action if found.
func removeListBucketAction(statements []Statement, bucketName string) []Statement {
var newStatements []Statement
var listBucketActionsRemoved bool
for _, statement := range statements {
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName && !listBucketActionsRemoved {
var newActions []string var newActions []string
for _, action := range statement.Actions { for _, action := range statement.Actions {
switch action { if isAction(action, []string{"s3:ListBucket"}) {
case "s3:PutObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:DeleteObject", "s3:GetObject": delete(statement.Conditions, "StringEquals")
continue
}
newActions = append(newActions, action)
}
statement.Actions = newActions
listBucketActionsRemoved = true
}
}
if len(statement.Actions) != 0 {
newStatements = append(newStatements, statement)
}
}
return newStatements
}
// removeWriteObjectActions - removes writeOnlyObject actions if found.
func removeWriteObjectActions(statements []Statement, bucketName string, objectPrefix string) []Statement {
var newStatements []Statement
for _, statement := range statements {
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
var newActions []string
for _, action := range statement.Actions {
if isAction(action, writeOnlyObjectActions) {
continue continue
} }
newActions = append(newActions, action) newActions = append(newActions, action)
@ -241,74 +296,72 @@ func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName str
return newStatements return newStatements
} }
// removeReadObjectActions - removes "s3:GetObject" actions if found.
func removeReadObjectActions(statements []Statement, bucketName string, objectPrefix string) []Statement {
var newStatements []Statement
for _, statement := range statements {
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
var newActions []string
for _, action := range statement.Actions {
if isAction(action, []string{"s3:GetObject"}) {
continue
}
newActions = append(newActions, action)
}
statement.Actions = newActions
}
}
if len(statement.Actions) != 0 {
newStatements = append(newStatements, statement)
}
}
return newStatements
}
// removeReadWriteObjectActions - removes readWriteObject actions if found.
func removeReadWriteObjectActions(statements []Statement, bucketName string, objectPrefix string) []Statement {
var newStatements []Statement
for _, statement := range statements {
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
var newActions []string
for _, action := range statement.Actions {
if isAction(action, readWriteObjectActions) {
continue
}
newActions = append(newActions, action)
}
statement.Actions = newActions
}
}
if len(statement.Actions) != 0 {
newStatements = append(newStatements, statement)
}
}
return newStatements
}
// Removes read write bucket policy if found.
func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName string, objectPrefix string) []Statement {
newStatements := removeReadBucketActions(statements, bucketName)
newStatements = removeListBucketAction(newStatements, bucketName)
newStatements = removeReadWriteObjectActions(newStatements, bucketName, objectPrefix)
return newStatements
}
// Removes write only bucket policy if found. // Removes write only bucket policy if found.
func removeBucketPolicyStatementWriteOnly(statements []Statement, bucketName string, objectPrefix string) []Statement { func removeBucketPolicyStatementWriteOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
var newStatements []Statement newStatements := removeReadBucketActions(statements, bucketName)
var bucketResourceStatementRemoved bool newStatements = removeWriteObjectActions(newStatements, bucketName, objectPrefix)
for _, statement := range statements {
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName && !bucketResourceStatementRemoved {
var newActions []string
for _, action := range statement.Actions {
switch action {
case "s3:GetBucketLocation", "s3:ListBucketMultipartUploads":
continue
}
newActions = append(newActions, action)
}
statement.Actions = newActions
bucketResourceStatementRemoved = true
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
var newActions []string
for _, action := range statement.Actions {
switch action {
case "s3:PutObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:DeleteObject":
continue
}
newActions = append(newActions, action)
}
statement.Actions = newActions
}
}
if len(statement.Actions) != 0 {
newStatements = append(newStatements, statement)
}
}
return newStatements return newStatements
} }
// Removes read only bucket policy if found. // Removes read only bucket policy if found.
func removeBucketPolicyStatementReadOnly(statements []Statement, bucketName string, objectPrefix string) []Statement { func removeBucketPolicyStatementReadOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
var newStatements []Statement newStatements := removeReadBucketActions(statements, bucketName)
var bucketResourceStatementRemoved bool newStatements = removeListBucketAction(newStatements, bucketName)
for _, statement := range statements { newStatements = removeReadObjectActions(newStatements, bucketName, objectPrefix)
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName && !bucketResourceStatementRemoved {
var newActions []string
for _, action := range statement.Actions {
switch action {
case "s3:GetBucketLocation", "s3:ListBucket":
continue
}
newActions = append(newActions, action)
}
statement.Actions = newActions
bucketResourceStatementRemoved = true
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
var newActions []string
for _, action := range statement.Actions {
if action == "s3:GetObject" {
continue
}
newActions = append(newActions, action)
}
statement.Actions = newActions
}
}
if len(statement.Actions) != 0 {
newStatements = append(newStatements, statement)
}
}
return newStatements return newStatements
} }
@ -455,38 +508,66 @@ func generatePolicyStatement(bucketPolicy BucketPolicy, bucketName, objectPrefix
// Obtain statements for read-write BucketPolicy. // Obtain statements for read-write BucketPolicy.
func setReadWriteStatement(bucketName, objectPrefix string) []Statement { func setReadWriteStatement(bucketName, objectPrefix string) []Statement {
bucketResourceStatement := Statement{} bucketResourceStatement := Statement{}
objectResourceStatement := Statement{}
statements := []Statement{}
bucketResourceStatement.Effect = "Allow" bucketResourceStatement.Effect = "Allow"
bucketResourceStatement.Principal.AWS = []string{"*"} bucketResourceStatement.Principal.AWS = []string{"*"}
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)} bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketResourceStatement.Actions = readWriteBucketActions bucketResourceStatement.Actions = readWriteBucketActions
bucketListResourceStatement := Statement{}
bucketListResourceStatement.Effect = "Allow"
bucketListResourceStatement.Principal.AWS = []string{"*"}
bucketListResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketListResourceStatement.Actions = []string{"s3:ListBucket"}
// Object prefix is present, make sure to set the conditions for s3:ListBucket.
if objectPrefix != "" {
bucketListResourceStatement.Conditions = map[string]map[string]string{
"StringEquals": {
"s3:prefix": objectPrefix,
},
}
}
objectResourceStatement := Statement{}
objectResourceStatement.Effect = "Allow" objectResourceStatement.Effect = "Allow"
objectResourceStatement.Principal.AWS = []string{"*"} objectResourceStatement.Principal.AWS = []string{"*"}
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")} objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
objectResourceStatement.Actions = readWriteObjectActions objectResourceStatement.Actions = readWriteObjectActions
// Save the read write policy. // Save the read write policy.
statements = append(statements, bucketResourceStatement, objectResourceStatement) statements := []Statement{}
statements = append(statements, bucketResourceStatement, bucketListResourceStatement, objectResourceStatement)
return statements return statements
} }
// Obtain statements for read only BucketPolicy. // Obtain statements for read only BucketPolicy.
func setReadOnlyStatement(bucketName, objectPrefix string) []Statement { func setReadOnlyStatement(bucketName, objectPrefix string) []Statement {
bucketResourceStatement := Statement{} bucketResourceStatement := Statement{}
objectResourceStatement := Statement{}
statements := []Statement{}
bucketResourceStatement.Effect = "Allow" bucketResourceStatement.Effect = "Allow"
bucketResourceStatement.Principal.AWS = []string{"*"} bucketResourceStatement.Principal.AWS = []string{"*"}
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)} bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketResourceStatement.Actions = readOnlyBucketActions bucketResourceStatement.Actions = readOnlyBucketActions
bucketListResourceStatement := Statement{}
bucketListResourceStatement.Effect = "Allow"
bucketListResourceStatement.Principal.AWS = []string{"*"}
bucketListResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketListResourceStatement.Actions = []string{"s3:ListBucket"}
// Object prefix is present, make sure to set the conditions for s3:ListBucket.
if objectPrefix != "" {
bucketListResourceStatement.Conditions = map[string]map[string]string{
"StringEquals": {
"s3:prefix": objectPrefix,
},
}
}
objectResourceStatement := Statement{}
objectResourceStatement.Effect = "Allow" objectResourceStatement.Effect = "Allow"
objectResourceStatement.Principal.AWS = []string{"*"} objectResourceStatement.Principal.AWS = []string{"*"}
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")} objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
objectResourceStatement.Actions = readOnlyObjectActions objectResourceStatement.Actions = readOnlyObjectActions
statements := []Statement{}
// Save the read only policy. // Save the read only policy.
statements = append(statements, bucketResourceStatement, objectResourceStatement) statements = append(statements, bucketResourceStatement, bucketListResourceStatement, objectResourceStatement)
return statements return statements
} }

View File

@ -140,6 +140,7 @@ func TestsetReadOnlyStatement(t *testing.T) {
expectedReadOnlyStatement := func(bucketName, objectPrefix string) []Statement { expectedReadOnlyStatement := func(bucketName, objectPrefix string) []Statement {
bucketResourceStatement := &Statement{} bucketResourceStatement := &Statement{}
bucketListResourceStatement := &Statement{}
objectResourceStatement := &Statement{} objectResourceStatement := &Statement{}
statements := []Statement{} statements := []Statement{}
@ -147,12 +148,23 @@ func TestsetReadOnlyStatement(t *testing.T) {
bucketResourceStatement.Principal.AWS = []string{"*"} bucketResourceStatement.Principal.AWS = []string{"*"}
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)} bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketResourceStatement.Actions = readOnlyBucketActions bucketResourceStatement.Actions = readOnlyBucketActions
bucketListResourceStatement.Effect = "Allow"
bucketListResourceStatement.Principal.AWS = []string{"*"}
bucketListResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketListResourceStatement.Actions = []string{"s3:ListBucket"}
if objectPrefix != "" {
bucketListResourceStatement.Conditions = map[string]map[string]string{
"StringEquals": {
"s3:prefix": objectPrefix,
},
}
}
objectResourceStatement.Effect = "Allow" objectResourceStatement.Effect = "Allow"
objectResourceStatement.Principal.AWS = []string{"*"} objectResourceStatement.Principal.AWS = []string{"*"}
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")} objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
objectResourceStatement.Actions = readOnlyObjectActions objectResourceStatement.Actions = readOnlyObjectActions
// Save the read only policy. // Save the read only policy.
statements = append(statements, *bucketResourceStatement, *objectResourceStatement) statements = append(statements, *bucketResourceStatement, *bucketListResourceStatement, *objectResourceStatement)
return statements return statements
} }
@ -221,6 +233,7 @@ func TestsetReadWriteStatement(t *testing.T) {
// Obtain statements for read-write BucketPolicy. // Obtain statements for read-write BucketPolicy.
expectedReadWriteStatement := func(bucketName, objectPrefix string) []Statement { expectedReadWriteStatement := func(bucketName, objectPrefix string) []Statement {
bucketResourceStatement := &Statement{} bucketResourceStatement := &Statement{}
bucketListResourceStatement := &Statement{}
objectResourceStatement := &Statement{} objectResourceStatement := &Statement{}
statements := []Statement{} statements := []Statement{}
@ -228,12 +241,23 @@ func TestsetReadWriteStatement(t *testing.T) {
bucketResourceStatement.Principal.AWS = []string{"*"} bucketResourceStatement.Principal.AWS = []string{"*"}
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)} bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketResourceStatement.Actions = readWriteBucketActions bucketResourceStatement.Actions = readWriteBucketActions
bucketListResourceStatement.Effect = "Allow"
bucketListResourceStatement.Principal.AWS = []string{"*"}
bucketListResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketListResourceStatement.Actions = []string{"s3:ListBucket"}
if objectPrefix != "" {
bucketListResourceStatement.Conditions = map[string]map[string]string{
"StringEquals": {
"s3:prefix": objectPrefix,
},
}
}
objectResourceStatement.Effect = "Allow" objectResourceStatement.Effect = "Allow"
objectResourceStatement.Principal.AWS = []string{"*"} objectResourceStatement.Principal.AWS = []string{"*"}
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")} objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
objectResourceStatement.Actions = readWriteObjectActions objectResourceStatement.Actions = readWriteObjectActions
// Save the read write policy. // Save the read write policy.
statements = append(statements, *bucketResourceStatement, *objectResourceStatement) statements = append(statements, *bucketResourceStatement, *bucketListResourceStatement, *objectResourceStatement)
return statements return statements
} }
@ -312,9 +336,9 @@ func TestUnMarshalBucketPolicy(t *testing.T) {
// Setting these values to just a string and testing the unMarshalBucketPolicy // Setting these values to just a string and testing the unMarshalBucketPolicy
func TestUnMarshalBucketPolicyUntyped(t *testing.T) { func TestUnMarshalBucketPolicyUntyped(t *testing.T) {
obtainRaw := func(v interface{}, t *testing.T) []byte { obtainRaw := func(v interface{}, t *testing.T) []byte {
rawData, e := json.Marshal(v) rawData, err := json.Marshal(v)
if e != nil { if err != nil {
t.Fatal(e.Error()) t.Fatal(err)
} }
return rawData return rawData
} }
@ -338,18 +362,24 @@ func TestUnMarshalBucketPolicyUntyped(t *testing.T) {
statements := setReadOnlyStatement("my-bucket", "Asia/") statements := setReadOnlyStatement("my-bucket", "Asia/")
expectedBucketPolicy := BucketAccessPolicy{Statements: statements} expectedBucketPolicy := BucketAccessPolicy{Statements: statements}
accessPolicyUntyped := bucketAccessPolicyUntyped{} accessPolicyUntyped := bucketAccessPolicyUntyped{}
accessPolicyUntyped.Statement = make([]untypedStatement, 2) accessPolicyUntyped.Statement = make([]untypedStatement, len(statements))
accessPolicyUntyped.Statement[0].Effect = statements[0].Effect accessPolicyUntyped.Statement[0].Effect = statements[0].Effect
accessPolicyUntyped.Statement[0].Principal.AWS = obtainRaw(statements[0].Principal.AWS, t) accessPolicyUntyped.Statement[0].Principal.AWS = obtainRaw(statements[0].Principal.AWS[0], t)
accessPolicyUntyped.Statement[0].Action = obtainRaw(statements[0].Actions, t) accessPolicyUntyped.Statement[0].Action = obtainRaw(statements[0].Actions, t)
accessPolicyUntyped.Statement[0].Resource = obtainRaw(statements[0].Resources, t) accessPolicyUntyped.Statement[0].Resource = obtainRaw(statements[0].Resources, t)
// Setting the values are strings.
accessPolicyUntyped.Statement[1].Effect = statements[1].Effect accessPolicyUntyped.Statement[1].Effect = statements[1].Effect
accessPolicyUntyped.Statement[1].Principal.AWS = obtainRaw(statements[1].Principal.AWS[0], t) accessPolicyUntyped.Statement[1].Principal.AWS = obtainRaw(statements[1].Principal.AWS[0], t)
accessPolicyUntyped.Statement[1].Action = obtainRaw(statements[1].Actions[0], t) accessPolicyUntyped.Statement[1].Action = obtainRaw(statements[1].Actions, t)
accessPolicyUntyped.Statement[1].Resource = obtainRaw(statements[1].Resources[0], t) accessPolicyUntyped.Statement[1].Resource = obtainRaw(statements[1].Resources, t)
accessPolicyUntyped.Statement[1].Condition = statements[1].Conditions
// Setting the values are strings.
accessPolicyUntyped.Statement[2].Effect = statements[2].Effect
accessPolicyUntyped.Statement[2].Principal.AWS = obtainRaw(statements[2].Principal.AWS[0], t)
accessPolicyUntyped.Statement[2].Action = obtainRaw(statements[2].Actions[0], t)
accessPolicyUntyped.Statement[2].Resource = obtainRaw(statements[2].Resources[0], t)
inputPolicyBytes := obtainRaw(accessPolicyUntyped, t) inputPolicyBytes := obtainRaw(accessPolicyUntyped, t)
actualAccessPolicy, err := unMarshalBucketPolicy(inputPolicyBytes) actualAccessPolicy, err := unMarshalBucketPolicy(inputPolicyBytes)

View File

@ -20,7 +20,7 @@ package minio
// miniPartSize - minimum part size 5MiB per object after which // miniPartSize - minimum part size 5MiB per object after which
// putObject behaves internally as multipart. // putObject behaves internally as multipart.
const minPartSize = 1024 * 1024 * 5 const minPartSize = 1024 * 1024 * 64
// maxPartsCount - maximum number of parts for a single multipart session. // maxPartsCount - maximum number of parts for a single multipart session.
const maxPartsCount = 10000 const maxPartsCount = 10000

View File

@ -49,7 +49,7 @@ func NewCopyConditions() CopyConditions {
} }
// SetMatchETag - set match etag. // SetMatchETag - set match etag.
func (c CopyConditions) SetMatchETag(etag string) error { func (c *CopyConditions) SetMatchETag(etag string) error {
if etag == "" { if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.") return ErrInvalidArgument("ETag cannot be empty.")
} }
@ -61,7 +61,7 @@ func (c CopyConditions) SetMatchETag(etag string) error {
} }
// SetMatchETagExcept - set match etag except. // SetMatchETagExcept - set match etag except.
func (c CopyConditions) SetMatchETagExcept(etag string) error { func (c *CopyConditions) SetMatchETagExcept(etag string) error {
if etag == "" { if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.") return ErrInvalidArgument("ETag cannot be empty.")
} }
@ -73,7 +73,7 @@ func (c CopyConditions) SetMatchETagExcept(etag string) error {
} }
// SetUnmodified - set unmodified time since. // SetUnmodified - set unmodified time since.
func (c CopyConditions) SetUnmodified(modTime time.Time) error { func (c *CopyConditions) SetUnmodified(modTime time.Time) error {
if modTime.IsZero() { if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.") return ErrInvalidArgument("Modified since cannot be empty.")
} }
@ -85,7 +85,7 @@ func (c CopyConditions) SetUnmodified(modTime time.Time) error {
} }
// SetModified - set modified time since. // SetModified - set modified time since.
func (c CopyConditions) SetModified(modTime time.Time) error { func (c *CopyConditions) SetModified(modTime time.Time) error {
if modTime.IsZero() { if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.") return ErrInvalidArgument("Modified since cannot be empty.")
} }

File diff suppressed because it is too large Load Diff

View File

@ -38,9 +38,12 @@ func main() {
log.Fatalln(err) log.Fatalln(err)
} }
err = s3Client.RemoveBucketPolicy("my-bucketname", "my-objectprefix") // s3Client.TraceOn(os.Stderr)
err = s3Client.DeleteBucketNotification("my-bucketname")
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
log.Println("Success")
log.Println("Bucket notification are successfully removed.")
} }

View File

@ -0,0 +1,55 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
// s3Client.TraceOn(os.Stderr)
notifications, err := s3Client.GetBucketNotification("my-bucketname")
if err != nil {
log.Fatalln(err)
}
log.Println("Bucket notification are successfully retrieved.")
for _, topicConfig := range notifications.TopicConfigs {
for _, e := range topicConfig.Events {
log.Println(e + " event is enabled.")
}
}
}

View File

@ -40,13 +40,15 @@ func main() {
// s3Client.TraceOn(os.Stderr) // s3Client.TraceOn(os.Stderr)
// Fetch the policy at 'my-objectprefix'.
policy, err := s3Client.GetBucketPolicy("my-bucketname", "my-objectprefix") policy, err := s3Client.GetBucketPolicy("my-bucketname", "my-objectprefix")
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
// Description of policy output. // Description of policy output.
// "none" - The specified bucket does not have a bucket policy. // "none" - The specified bucket does not have a bucket policy.
// "readonly" - Read only operatoins are allowed. // "readonly" - Read only operations are allowed.
// "writeonly" - Write only operations are allowed. // "writeonly" - Write only operations are allowed.
// "readwrite" - both read and write operations are allowed, the bucket is public. // "readwrite" - both read and write operations are allowed, the bucket is public.
log.Println("Success - ", policy) log.Println("Success - ", policy)

View File

@ -0,0 +1,57 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
// are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
fmt.Println(err)
return
}
// Create a done channel to control 'ListObjects' go routine.
doneCh := make(chan struct{})
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
// List all objects from a bucket-name with a matching prefix.
for object := range s3Client.ListObjectsV2("my-bucketname", "my-prefixname", true, doneCh) {
if object.Err != nil {
fmt.Println(object.Err)
return
}
fmt.Println(object)
}
return
}

View File

@ -0,0 +1,85 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
// s3Client.TraceOn(os.Stderr)
// ARN represents a notification channel that needs to be created in your S3 provider
// (e.g. http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html)
// An example of an ARN:
// arn:aws:sns:us-east-1:804064459714:UploadPhoto
// ^ ^ ^ ^ ^
// Provider __| | | | |
// | Region Account ID |_ Notification Name
// Service _|
//
// You should replace YOUR-PROVIDER, YOUR-SERVICE, YOUR-REGION, YOUR-ACCOUNT-ID and YOUR-RESOURCE
// with actual values that you receive from the S3 provider
// Here you create a new Topic notification
topicArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE")
topicConfig := minio.NewNotificationConfig(topicArn)
topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
topicConfig.AddFilterPrefix("photos/")
topicConfig.AddFilterSuffix(".jpg")
// Create a new Queue notification
queueArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE")
queueConfig := minio.NewNotificationConfig(queueArn)
queueConfig.AddEvents(minio.ObjectRemovedAll)
// Create a new Lambda (CloudFunction)
lambdaArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE")
lambdaConfig := minio.NewNotificationConfig(lambdaArn)
lambdaConfig.AddEvents(minio.ObjectRemovedAll)
lambdaConfig.AddFilterSuffix(".swp")
// Now, set all previously created notification configs
bucketNotification := minio.BucketNotification{}
bucketNotification.AddTopic(topicConfig)
bucketNotification.AddQueue(queueConfig)
bucketNotification.AddLambda(lambdaConfig)
err = s3Client.SetBucketNotification("YOUR-BUCKET", bucketNotification)
if err != nil {
log.Fatalln("Error: " + err.Error())
}
log.Println("Success")
}

View File

@ -38,6 +38,13 @@ func main() {
log.Fatalln(err) log.Fatalln(err)
} }
// s3Client.TraceOn(os.Stderr)
// Description of policy input.
// minio.BucketPolicyNone - Remove any previously applied bucket policy at a prefix.
// minio.BucketPolicyReadOnly - Set read-only operations at a prefix.
// minio.BucketPolicyWriteOnly - Set write-only operations at a prefix.
// minio.BucketPolicyReadWrite - Set read-write operations at a prefix.
err = s3Client.SetBucketPolicy("my-bucketname", "my-objectprefix", minio.BucketPolicyReadWrite) err = s3Client.SetBucketPolicy("my-bucketname", "my-objectprefix", minio.BucketPolicyReadWrite)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)

View File

@ -65,25 +65,18 @@ func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
if accessKeyID == "" || secretAccessKey == "" { if accessKeyID == "" || secretAccessKey == "" {
return &req return &req
} }
d := time.Now().UTC() d := time.Now().UTC()
// Add date if not present.
if date := req.Header.Get("Date"); date == "" {
req.Header.Set("Date", d.Format(http.TimeFormat))
}
// Get encoded URL path.
path := encodeURL2Path(req.URL)
if len(req.URL.Query()) > 0 {
// Keep the usual queries unescaped for string to sign.
query, _ := url.QueryUnescape(queryEncode(req.URL.Query()))
path = path + "?" + query
}
// Find epoch expires when the request will expire. // Find epoch expires when the request will expire.
epochExpires := d.Unix() + expires epochExpires := d.Unix() + expires
// Get string to sign. // Add expires header if not present.
stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", req.Method, epochExpires, path) if expiresStr := req.Header.Get("Expires"); expiresStr == "" {
req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10))
}
// Get presigned string to sign.
stringToSign := preStringifyHTTPReq(req)
hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign)) hm.Write([]byte(stringToSign))
@ -152,7 +145,7 @@ func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
} }
// Calculate HMAC for secretAccessKey. // Calculate HMAC for secretAccessKey.
stringToSign := getStringToSignV2(req) stringToSign := stringifyHTTPReq(req)
hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign)) hm.Write([]byte(stringToSign))
@ -174,30 +167,55 @@ func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
// StringToSign = HTTP-Verb + "\n" + // StringToSign = HTTP-Verb + "\n" +
// Content-Md5 + "\n" + // Content-Md5 + "\n" +
// Content-Type + "\n" + // Content-Type + "\n" +
// Date + "\n" + // Expires + "\n" +
// CanonicalizedProtocolHeaders + // CanonicalizedProtocolHeaders +
// CanonicalizedResource; // CanonicalizedResource;
func getStringToSignV2(req http.Request) string { func preStringifyHTTPReq(req http.Request) string {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
// Write standard headers. // Write standard headers.
writeDefaultHeaders(buf, req) writePreSignV2Headers(buf, req)
// Write canonicalized protocol headers if any. // Write canonicalized protocol headers if any.
writeCanonicalizedHeaders(buf, req) writeCanonicalizedHeaders(buf, req)
// Write canonicalized Query resources if any. // Write canonicalized Query resources if any.
writeCanonicalizedResource(buf, req) isPreSign := true
writeCanonicalizedResource(buf, req, isPreSign)
return buf.String() return buf.String()
} }
// writeDefaultHeader - write all default necessary headers // writePreSignV2Headers - write preSign v2 required headers.
func writeDefaultHeaders(buf *bytes.Buffer, req http.Request) { func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
buf.WriteString(req.Method) buf.WriteString(req.Method + "\n")
buf.WriteByte('\n') buf.WriteString(req.Header.Get("Content-Md5") + "\n")
buf.WriteString(req.Header.Get("Content-Md5")) buf.WriteString(req.Header.Get("Content-Type") + "\n")
buf.WriteByte('\n') buf.WriteString(req.Header.Get("Expires") + "\n")
buf.WriteString(req.Header.Get("Content-Type")) }
buf.WriteByte('\n')
buf.WriteString(req.Header.Get("Date")) // From the Amazon docs:
buf.WriteByte('\n') //
// StringToSign = HTTP-Verb + "\n" +
// Content-Md5 + "\n" +
// Content-Type + "\n" +
// Date + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
func stringifyHTTPReq(req http.Request) string {
buf := new(bytes.Buffer)
// Write standard headers.
writeSignV2Headers(buf, req)
// Write canonicalized protocol headers if any.
writeCanonicalizedHeaders(buf, req)
// Write canonicalized Query resources if any.
isPreSign := false
writeCanonicalizedResource(buf, req, isPreSign)
return buf.String()
}
// writeSignV2Headers - write signV2 required headers.
func writeSignV2Headers(buf *bytes.Buffer, req http.Request) {
buf.WriteString(req.Method + "\n")
buf.WriteString(req.Header.Get("Content-Md5") + "\n")
buf.WriteString(req.Header.Get("Content-Type") + "\n")
buf.WriteString(req.Header.Get("Date") + "\n")
} }
// writeCanonicalizedHeaders - write canonicalized headers. // writeCanonicalizedHeaders - write canonicalized headers.
@ -245,12 +263,6 @@ var resourceList = []string{
"partNumber", "partNumber",
"policy", "policy",
"requestPayment", "requestPayment",
"response-cache-control",
"response-content-disposition",
"response-content-encoding",
"response-content-language",
"response-content-type",
"response-expires",
"torrent", "torrent",
"uploadId", "uploadId",
"uploads", "uploads",
@ -265,13 +277,22 @@ var resourceList = []string{
// CanonicalizedResource = [ "/" + Bucket ] + // CanonicalizedResource = [ "/" + Bucket ] +
// <HTTP-Request-URI, from the protocol name up to the query string> + // <HTTP-Request-URI, from the protocol name up to the query string> +
// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; // [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) { func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign bool) {
// Save request URL. // Save request URL.
requestURL := req.URL requestURL := req.URL
// Get encoded URL path. // Get encoded URL path.
path := encodeURL2Path(requestURL) path := encodeURL2Path(requestURL)
if isPreSign {
// Get encoded URL path.
if len(requestURL.Query()) > 0 {
// Keep the usual queries unescaped for string to sign.
query, _ := url.QueryUnescape(queryEncode(requestURL.Query()))
path = path + "?" + query
}
buf.WriteString(path)
return
}
buf.WriteString(path) buf.WriteString(path)
if requestURL.RawQuery != "" { if requestURL.RawQuery != "" {
var n int var n int
vals, _ := url.ParseQuery(requestURL.RawQuery) vals, _ := url.ParseQuery(requestURL.RawQuery)

View File

@ -93,7 +93,7 @@ func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
} }
// Validate incoming endpoint URL. // Validate incoming endpoint URL.
if err := isValidEndpointURL(endpointURL); err != nil { if err := isValidEndpointURL(endpointURL.String()); err != nil {
return nil, err return nil, err
} }
return endpointURL, nil return endpointURL, nil
@ -153,12 +153,16 @@ func closeResponse(resp *http.Response) {
} }
// isVirtualHostSupported - verifies if bucketName can be part of // isVirtualHostSupported - verifies if bucketName can be part of
// virtual host. Currently only Amazon S3 and Google Cloud Storage would // virtual host. Currently only Amazon S3 and Google Cloud Storage
// support this. // would support this.
func isVirtualHostSupported(endpointURL *url.URL, bucketName string) bool { func isVirtualHostSupported(endpointURL string, bucketName string) bool {
url, err := url.Parse(endpointURL)
if err != nil {
return false
}
// bucketName can be valid but '.' in the hostname will fail SSL // bucketName can be valid but '.' in the hostname will fail SSL
// certificate validation. So do not use host-style for such buckets. // certificate validation. So do not use host-style for such buckets.
if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") { if url.Scheme == "https" && strings.Contains(bucketName, ".") {
return false return false
} }
// Return true for all other cases // Return true for all other cases
@ -166,58 +170,73 @@ func isVirtualHostSupported(endpointURL *url.URL, bucketName string) bool {
} }
// Match if it is exactly Amazon S3 endpoint. // Match if it is exactly Amazon S3 endpoint.
func isAmazonEndpoint(endpointURL *url.URL) bool { func isAmazonEndpoint(endpointURL string) bool {
if endpointURL == nil { if isAmazonChinaEndpoint(endpointURL) {
return false
}
if endpointURL.Host == "s3.amazonaws.com" {
return true return true
} }
if isAmazonChinaEndpoint(endpointURL) { url, err := url.Parse(endpointURL)
if err != nil {
return false
}
if url.Host == "s3.amazonaws.com" {
return true return true
} }
return false return false
} }
// Match if it is exactly Amazon S3 China endpoint. // Match if it is exactly Amazon S3 China endpoint.
// Customers who wish to use the new Beijing Region are required to sign up for a separate set of account credentials unique to the China (Beijing) Region. // Customers who wish to use the new Beijing Region are required
// Customers with existing AWS credentials will not be able to access resources in the new Region, and vice versa." // to sign up for a separate set of account credentials unique to
// the China (Beijing) Region. Customers with existing AWS credentials
// will not be able to access resources in the new Region, and vice versa.
// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/ // For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
func isAmazonChinaEndpoint(endpointURL *url.URL) bool { func isAmazonChinaEndpoint(endpointURL string) bool {
if endpointURL == nil { if endpointURL == "" {
return false return false
} }
if endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn" { url, err := url.Parse(endpointURL)
if err != nil {
return false
}
if url.Host == "s3.cn-north-1.amazonaws.com.cn" {
return true return true
} }
return false return false
} }
// Match if it is exactly Google cloud storage endpoint. // Match if it is exactly Google cloud storage endpoint.
func isGoogleEndpoint(endpointURL *url.URL) bool { func isGoogleEndpoint(endpointURL string) bool {
if endpointURL == nil { if endpointURL == "" {
return false return false
} }
if endpointURL.Host == "storage.googleapis.com" { url, err := url.Parse(endpointURL)
if err != nil {
return false
}
if url.Host == "storage.googleapis.com" {
return true return true
} }
return false return false
} }
// Verify if input endpoint URL is valid. // Verify if input endpoint URL is valid.
func isValidEndpointURL(endpointURL *url.URL) error { func isValidEndpointURL(endpointURL string) error {
if endpointURL == nil { if endpointURL == "" {
return ErrInvalidArgument("Endpoint url cannot be empty.") return ErrInvalidArgument("Endpoint url cannot be empty.")
} }
if endpointURL.Path != "/" && endpointURL.Path != "" { url, err := url.Parse(endpointURL)
if err != nil {
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.") return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
} }
if strings.Contains(endpointURL.Host, ".amazonaws.com") { if url.Path != "/" && url.Path != "" {
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
}
if strings.Contains(endpointURL, ".amazonaws.com") {
if !isAmazonEndpoint(endpointURL) { if !isAmazonEndpoint(endpointURL) {
return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
} }
} }
if strings.Contains(endpointURL.Host, ".googleapis.com") { if strings.Contains(endpointURL, ".googleapis.com") {
if !isGoogleEndpoint(endpointURL) { if !isGoogleEndpoint(endpointURL) {
return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
} }

View File

@ -111,9 +111,9 @@ func TestIsValidEndpointURL(t *testing.T) {
// Flag indicating whether the test is expected to pass or not. // Flag indicating whether the test is expected to pass or not.
shouldPass bool shouldPass bool
}{ }{
{"", nil, true}, {"", fmt.Errorf("Endpoint url cannot be empty."), false},
{"/", nil, true}, {"/", nil, true},
{"https://s3.amazonaws.com", nil, true}, {"https://s3.am1;4205;0cazonaws.com", nil, true},
{"https://s3.cn-north-1.amazonaws.com.cn", nil, true}, {"https://s3.cn-north-1.amazonaws.com.cn", nil, true},
{"https://s3.amazonaws.com/", nil, true}, {"https://s3.amazonaws.com/", nil, true},
{"https://storage.googleapis.com/", nil, true}, {"https://storage.googleapis.com/", nil, true},
@ -125,11 +125,7 @@ func TestIsValidEndpointURL(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
endPoint, e := url.Parse(testCase.url) err := isValidEndpointURL(testCase.url)
if e != nil {
t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
}
err := isValidEndpointURL(endPoint)
if err != nil && testCase.shouldPass { if err != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error()) t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
} }
@ -187,11 +183,7 @@ func TestIsVirtualHostSupported(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
endPoint, e := url.Parse(testCase.url) result := isVirtualHostSupported(testCase.url, testCase.bucket)
if e != nil {
t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
}
result := isVirtualHostSupported(endPoint, testCase.bucket)
if testCase.result != result { if testCase.result != result {
t.Errorf("Test %d: Expected isVirtualHostSupported to be '%v' for input url \"%s\" and bucket \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, testCase.bucket, result) t.Errorf("Test %d: Expected isVirtualHostSupported to be '%v' for input url \"%s\" and bucket \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, testCase.bucket, result)
} }
@ -220,11 +212,7 @@ func TestIsAmazonEndpoint(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
endPoint, e := url.Parse(testCase.url) result := isAmazonEndpoint(testCase.url)
if e != nil {
t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
}
result := isAmazonEndpoint(endPoint)
if testCase.result != result { if testCase.result != result {
t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result) t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
} }
@ -255,11 +243,7 @@ func TestIsAmazonChinaEndpoint(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
endPoint, e := url.Parse(testCase.url) result := isAmazonChinaEndpoint(testCase.url)
if e != nil {
t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
}
result := isAmazonChinaEndpoint(endPoint)
if testCase.result != result { if testCase.result != result {
t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result) t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
} }
@ -288,11 +272,7 @@ func TestIsGoogleEndpoint(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
endPoint, e := url.Parse(testCase.url) result := isGoogleEndpoint(testCase.url)
if e != nil {
t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
}
result := isGoogleEndpoint(endPoint)
if testCase.result != result { if testCase.result != result {
t.Errorf("Test %d: Expected isGoogleEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result) t.Errorf("Test %d: Expected isGoogleEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
} }