mirror of
https://github.com/octoleo/restic.git
synced 2024-11-26 23:06:32 +00:00
Merge pull request #507 from restic/debug-minio-on-darwin
Update minio-go
This commit is contained in:
commit
a0ab9f2fdf
@ -5,7 +5,7 @@ go:
|
|||||||
- 1.3.3
|
- 1.3.3
|
||||||
- 1.4.3
|
- 1.4.3
|
||||||
- 1.5.4
|
- 1.5.4
|
||||||
- 1.6.1
|
- 1.6.2
|
||||||
|
|
||||||
os:
|
os:
|
||||||
- linux
|
- linux
|
||||||
@ -33,6 +33,7 @@ install:
|
|||||||
- export GOBIN="$GOPATH/bin"
|
- export GOBIN="$GOPATH/bin"
|
||||||
- export PATH="$PATH:$GOBIN"
|
- export PATH="$PATH:$GOBIN"
|
||||||
- go env
|
- go env
|
||||||
|
- ulimit -n 2048
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- go run run_integration_tests.go
|
- go run run_integration_tests.go
|
||||||
|
10
Vagrantfile
vendored
10
Vagrantfile
vendored
@ -87,16 +87,16 @@ Vagrant.configure(2) do |config|
|
|||||||
# fix permissions on synced folder
|
# fix permissions on synced folder
|
||||||
config.vm.provision "fix perms", :type => :shell, :inline => fix_perms
|
config.vm.provision "fix perms", :type => :shell, :inline => fix_perms
|
||||||
|
|
||||||
# fix network card
|
|
||||||
config.vm.provider "virtualbox" do |v|
|
|
||||||
v.customize ["modifyvm", :id, "--nictype1", "virtio"]
|
|
||||||
end
|
|
||||||
|
|
||||||
config.vm.define "linux" do |b|
|
config.vm.define "linux" do |b|
|
||||||
b.vm.box = "ubuntu/trusty64"
|
b.vm.box = "ubuntu/trusty64"
|
||||||
b.vm.provision "packages linux", :type => :shell, :inline => packages_linux
|
b.vm.provision "packages linux", :type => :shell, :inline => packages_linux
|
||||||
b.vm.provision "install gimme", :type => :shell, :inline => install_gimme
|
b.vm.provision "install gimme", :type => :shell, :inline => install_gimme
|
||||||
b.vm.provision "prepare user", :type => :shell, :privileged => false, :inline => prepare_user("linux")
|
b.vm.provision "prepare user", :type => :shell, :privileged => false, :inline => prepare_user("linux")
|
||||||
|
|
||||||
|
# fix network card
|
||||||
|
config.vm.provider "virtualbox" do |v|
|
||||||
|
v.customize ["modifyvm", :id, "--nictype1", "virtio"]
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
config.vm.define "freebsd" do |b|
|
config.vm.define "freebsd" do |b|
|
||||||
|
@ -16,7 +16,7 @@ const connLimit = 10
|
|||||||
|
|
||||||
// s3 is a backend which stores the data on an S3 endpoint.
|
// s3 is a backend which stores the data on an S3 endpoint.
|
||||||
type s3 struct {
|
type s3 struct {
|
||||||
client minio.CloudStorageClient
|
client *minio.Client
|
||||||
connChan chan struct{}
|
connChan chan struct{}
|
||||||
bucketname string
|
bucketname string
|
||||||
prefix string
|
prefix string
|
||||||
@ -39,7 +39,7 @@ func Open(cfg Config) (backend.Backend, error) {
|
|||||||
debug.Log("s3.Open", "BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
|
debug.Log("s3.Open", "BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
|
||||||
|
|
||||||
// create new bucket with default ACL in default region
|
// create new bucket with default ACL in default region
|
||||||
err = client.MakeBucket(cfg.Bucket, "", "")
|
err = client.MakeBucket(cfg.Bucket, "")
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
4
vendor/manifest
vendored
4
vendor/manifest
vendored
@ -28,8 +28,8 @@
|
|||||||
{
|
{
|
||||||
"importpath": "github.com/minio/minio-go",
|
"importpath": "github.com/minio/minio-go",
|
||||||
"repository": "https://github.com/minio/minio-go",
|
"repository": "https://github.com/minio/minio-go",
|
||||||
"revision": "a4cd3caabd5f9c35ac100110eb60c2b80798f1af",
|
"revision": "867b27701ad16db4a9f4dad40d28187ca8433ec9",
|
||||||
"branch": "HEAD"
|
"branch": "master"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"importpath": "github.com/pkg/sftp",
|
"importpath": "github.com/pkg/sftp",
|
||||||
|
535
vendor/src/github.com/minio/minio-go/API.md
vendored
Normal file
535
vendor/src/github.com/minio/minio-go/API.md
vendored
Normal file
@ -0,0 +1,535 @@
|
|||||||
|
## API Documentation
|
||||||
|
|
||||||
|
### Minio client object creation
|
||||||
|
Minio client object is created using minio-go:
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
|
||||||
|
if err !!= nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
s3Client can be used to perform operations on S3 storage. APIs are described below.
|
||||||
|
|
||||||
|
### Bucket operations
|
||||||
|
|
||||||
|
* [`MakeBucket`](#MakeBucket)
|
||||||
|
* [`ListBuckets`](#ListBuckets)
|
||||||
|
* [`BucketExists`](#BucketExists)
|
||||||
|
* [`RemoveBucket`](#RemoveBucket)
|
||||||
|
* [`ListObjects`](#ListObjects)
|
||||||
|
* [`ListIncompleteUploads`](#ListIncompleteUploads)
|
||||||
|
|
||||||
|
### Object operations
|
||||||
|
|
||||||
|
* [`GetObject`](#GetObject)
|
||||||
|
* [`PutObject`](#PutObject)
|
||||||
|
* [`CopyObject`](#CopyObject)
|
||||||
|
* [`StatObject`](#StatObject)
|
||||||
|
* [`RemoveObject`](#RemoveObject)
|
||||||
|
* [`RemoveIncompleteUpload`](#RemoveIncompleteUpload)
|
||||||
|
|
||||||
|
### File operations.
|
||||||
|
|
||||||
|
* [`FPutObject`](#FPutObject)
|
||||||
|
* [`FGetObject`](#FPutObject)
|
||||||
|
|
||||||
|
### Bucket policy operations.
|
||||||
|
|
||||||
|
* [`SetBucketPolicy`](#SetBucketPolicy)
|
||||||
|
* [`GetBucketPolicy`](#GetBucketPolicy)
|
||||||
|
* [`RemoveBucketPolicy`](#RemoveBucketPolicy)
|
||||||
|
|
||||||
|
### Presigned operations
|
||||||
|
|
||||||
|
* [`PresignedGetObject`](#PresignedGetObject)
|
||||||
|
* [`PresignedPutObject`](#PresignedPutObject)
|
||||||
|
* [`PresignedPostPolicy`](#PresignedPostPolicy)
|
||||||
|
|
||||||
|
### Bucket operations
|
||||||
|
---------------------------------------
|
||||||
|
<a name="MakeBucket">
|
||||||
|
#### MakeBucket(bucketName string, location string) error
|
||||||
|
Create a new bucket.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_ - Name of the bucket.
|
||||||
|
* `location` _string_ - region valid values are _us-west-1_, _us-west-2_, _eu-west-1_, _eu-central-1_, _ap-southeast-1_, _ap-northeast-1_, _ap-southeast-2_, _sa-east-1_
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
err := s3Client.MakeBucket("mybucket", "us-west-1")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println("Successfully created mybucket.")
|
||||||
|
```
|
||||||
|
---------------------------------------
|
||||||
|
<a name="ListBuckets">
|
||||||
|
#### ListBuckets() ([]BucketInfo, error)
|
||||||
|
Lists all buckets.
|
||||||
|
|
||||||
|
`bucketList` lists bucket in the format:
|
||||||
|
* `bucket.Name` _string_: bucket name
|
||||||
|
* `bucket.CreationDate` time.Time : date when bucket was created
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
buckets, err := s3Client.ListBuckets()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, bucket := range buckets {
|
||||||
|
fmt.Println(bucket)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
---------------------------------------
|
||||||
|
<a name="BucketExists">
|
||||||
|
#### BucketExists(bucketName string) error
|
||||||
|
Check if bucket exists.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_ : name of the bucket
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
err := s3Client.BucketExists("mybucket")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
---------------------------------------
|
||||||
|
<a name="RemoveBucket">
|
||||||
|
#### RemoveBucket(bucketName string) error
|
||||||
|
Remove a bucket.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_ : name of the bucket
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
err := s3Client.RemoveBucket("mybucket")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
---------------------------------------
|
||||||
|
<a name="GetBucketPolicy">
|
||||||
|
#### GetBucketPolicy(bucketName string, objectPrefix string) error
|
||||||
|
Get access permissions on a bucket or a prefix.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_ : name of the bucket
|
||||||
|
* `objectPrefix` _string_ : name of the object prefix
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
bucketPolicy, err := s3Client.GetBucketPolicy("mybucket", "")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println("Access permissions for mybucket is", bucketPolicy)
|
||||||
|
```
|
||||||
|
---------------------------------------
|
||||||
|
<a name="SetBucketPolicy">
|
||||||
|
#### SetBucketPolicy(bucketname string, objectPrefix string, policy BucketPolicy) error
|
||||||
|
Set access permissions on bucket or an object prefix.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_: name of the bucket
|
||||||
|
* `objectPrefix` _string_ : name of the object prefix
|
||||||
|
* `policy` _BucketPolicy_: policy can be _BucketPolicyNone_, _BucketPolicyReadOnly_, _BucketPolicyReadWrite_, _BucketPolicyWriteOnly_
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
err := s3Client.SetBucketPolicy("mybucket", "myprefix", BucketPolicyReadWrite)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
---------------------------------------
|
||||||
|
<a name="RemoveBucketPolicy">
|
||||||
|
#### RemoveBucketPolicy(bucketname string, objectPrefix string) error
|
||||||
|
Remove existing permissions on bucket or an object prefix.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_: name of the bucket
|
||||||
|
* `objectPrefix` _string_ : name of the object prefix
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
err := s3Client.RemoveBucketPolicy("mybucket", "myprefix")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---------------------------------------
|
||||||
|
<a name="ListObjects">
|
||||||
|
#### ListObjects(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
|
||||||
|
List objects in a bucket.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_: name of the bucket
|
||||||
|
* `objectPrefix` _string_: the prefix of the objects that should be listed
|
||||||
|
* `recursive` _bool_: `true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'
|
||||||
|
* `doneCh` chan struct{} : channel for pro-actively closing the internal go routine
|
||||||
|
|
||||||
|
__Return Value__
|
||||||
|
* `<-chan ObjectInfo` _chan ObjectInfo_: Read channel for all the objects in the bucket, the object is of the format:
|
||||||
|
* `objectInfo.Key` _string_: name of the object
|
||||||
|
* `objectInfo.Size` _int64_: size of the object
|
||||||
|
* `objectInfo.ETag` _string_: etag of the object
|
||||||
|
* `objectInfo.LastModified` _time.Time_: modified time stamp
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
// Create a done channel to control 'ListObjects' go routine.
|
||||||
|
doneCh := make(chan struct{})
|
||||||
|
|
||||||
|
// Indicate to our routine to exit cleanly upon return.
|
||||||
|
defer close(doneCh)
|
||||||
|
|
||||||
|
isRecursive := true
|
||||||
|
objectCh := s3Client.ListObjects("mybucket", "myprefix", isRecursive, doneCh)
|
||||||
|
for object := range objectCh {
|
||||||
|
if object.Err != nil {
|
||||||
|
fmt.Println(object.Err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(object)
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
---------------------------------------
|
||||||
|
<a name="ListIncompleteUploads">
|
||||||
|
#### ListIncompleteUploads(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectMultipartInfo
|
||||||
|
List partially uploaded objects in a bucket.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketname` _string_: name of the bucket
|
||||||
|
* `prefix` _string_: prefix of the object names that are partially uploaded
|
||||||
|
* `recursive` bool: directory style listing when false, recursive listing when true
|
||||||
|
* `doneCh` chan struct{} : channel for pro-actively closing the internal go routine
|
||||||
|
|
||||||
|
__Return Value__
|
||||||
|
* `<-chan ObjectMultipartInfo` _chan ObjectMultipartInfo_ : emits multipart objects of the format:
|
||||||
|
* `multiPartObjInfo.Key` _string_: name of the incomplete object
|
||||||
|
* `multiPartObjInfo.UploadID` _string_: upload ID of the incomplete object
|
||||||
|
* `multiPartObjInfo.Size` _int64_: size of the incompletely uploaded object
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
// Create a done channel to control 'ListObjects' go routine.
|
||||||
|
doneCh := make(chan struct{})
|
||||||
|
|
||||||
|
// Indicate to our routine to exit cleanly upon return.
|
||||||
|
defer close(doneCh)
|
||||||
|
|
||||||
|
isRecursive := true
|
||||||
|
multiPartObjectCh := s3Client.ListIncompleteUploads("mybucket", "myprefix", isRecursive, doneCh)
|
||||||
|
for multiPartObject := range multiPartObjectCh {
|
||||||
|
if multiPartObject.Err != nil {
|
||||||
|
fmt.Println(multiPartObject.Err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(multiPartObject)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---------------------------------------
|
||||||
|
### Object operations
|
||||||
|
<a name="GetObject">
|
||||||
|
#### GetObject(bucketName string, objectName string) *Object
|
||||||
|
Download an object.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_: name of the bucket
|
||||||
|
* `objectName` _string_: name of the object
|
||||||
|
|
||||||
|
__Return Value__
|
||||||
|
* `object` _*Object_ : _Object_ represents object reader.
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
object, err := s3Client.GetObject("mybucket", "photo.jpg")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
localFile _ := os.Open("/tmp/local-file")
|
||||||
|
if _, err := io.Copy(localFile, object); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
---------------------------------------
|
||||||
|
---------------------------------------
|
||||||
|
<a name="FGetObject">
|
||||||
|
#### FGetObject(bucketName string, objectName string, filePath string) error
|
||||||
|
Callback is called with `error` in case of error or `null` in case of success
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_: name of the bucket
|
||||||
|
* `objectName` _string_: name of the object
|
||||||
|
* `filePath` _string_: path to which the object data will be written to
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
err := s3Client.FGetObject("mybucket", "photo.jpg", "/tmp/photo.jpg")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
---------------------------------------
|
||||||
|
<a name="PutObject">
|
||||||
|
#### PutObject(bucketName string, objectName string, reader io.Reader, contentType string) (n int, err error)
|
||||||
|
Upload contents from `io.Reader` to objectName.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_: name of the bucket
|
||||||
|
* `objectName` _string_: name of the object
|
||||||
|
* `reader` _io.Reader_: Any golang object implementing io.Reader
|
||||||
|
* `contentType` _string_: content type of the object.
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
file, err := os.Open("my-testfile")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---------------------------------------
|
||||||
|
<a name="CopyObject">
|
||||||
|
#### CopyObject(bucketName string, objectName string, objectSource string, conditions CopyConditions) error
|
||||||
|
Copy a source object into a new object with the provided name in the provided bucket.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_: name of the bucket
|
||||||
|
* `objectName` _string_: name of the object
|
||||||
|
* `objectSource` _string_: name of the object source.
|
||||||
|
* `conditions` _CopyConditions_: Collection of supported CopyObject conditions. ['x-amz-copy-source', 'x-amz-copy-source-if-match', 'x-amz-copy-source-if-none-match', 'x-amz-copy-source-if-unmodified-since', 'x-amz-copy-source-if-modified-since']
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
// All following conditions are allowed and can be combined together.
|
||||||
|
|
||||||
|
// Set copy conditions.
|
||||||
|
var copyConds = minio.NewCopyConditions()
|
||||||
|
// Set modified condition, copy object modified since 2014 April.
|
||||||
|
copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
|
||||||
|
|
||||||
|
// Set unmodified condition, copy object unmodified since 2014 April.
|
||||||
|
// copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
|
||||||
|
|
||||||
|
// Set matching ETag condition, copy object which matches the following ETag.
|
||||||
|
// copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
|
||||||
|
|
||||||
|
// Set matching ETag except condition, copy object which does not match the following ETag.
|
||||||
|
// copyConds.SetMatchETagExcept("31624deb84149d2f8ef9c385918b653a")
|
||||||
|
|
||||||
|
err := s3Client.CopyObject("my-bucketname", "my-objectname", "/my-sourcebucketname/my-sourceobjectname", copyConds)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---------------------------------------
|
||||||
|
<a name="FPutObject">
|
||||||
|
#### FPutObject(bucketName string, objectName string, filePath string, contentType string) error
|
||||||
|
Uploads the object using contents from a file
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_: name of the bucket
|
||||||
|
* `objectName` _string_: name of the object
|
||||||
|
* `filePath` _string_: file path of the file to be uploaded
|
||||||
|
* `contentType` _string_: content type of the object
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
n, err := s3Client.FPutObject("my-bucketname", "my-objectname", "/tmp/my-filename.csv", "application/csv")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
---------------------------------------
|
||||||
|
<a name="StatObject">
|
||||||
|
#### StatObject(bucketName string, objectName string) (ObjectInfo, error)
|
||||||
|
Get metadata of an object.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_: name of the bucket
|
||||||
|
* `objectName` _string_: name of the object
|
||||||
|
|
||||||
|
__Return Value__
|
||||||
|
`objInfo` _ObjectInfo_ : object stat info for following format:
|
||||||
|
* `objInfo.Size` _int64_: size of the object
|
||||||
|
* `objInfo.ETag` _string_: etag of the object
|
||||||
|
* `objInfo.ContentType` _string_: Content-Type of the object
|
||||||
|
* `objInfo.LastModified` _string_: modified time stamp
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
objInfo, err := s3Client.StatObject("mybucket", "photo.jpg")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(objInfo)
|
||||||
|
```
|
||||||
|
---------------------------------------
|
||||||
|
<a name="RemoveObject">
|
||||||
|
#### RemoveObject(bucketName string, objectName string) error
|
||||||
|
Remove an object.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_: name of the bucket
|
||||||
|
* `objectName` _string_: name of the object
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
err := s3Client.RemoveObject("mybucket", "photo.jpg")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
---------------------------------------
|
||||||
|
<a name="RemoveIncompleteUpload">
|
||||||
|
#### RemoveIncompleteUpload(bucketName string, objectName string) error
|
||||||
|
Remove an partially uploaded object.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_: name of the bucket
|
||||||
|
* `objectName` _string_: name of the object
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
err := s3Client.RemoveIncompleteUpload("mybucket", "photo.jpg")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Presigned operations
|
||||||
|
---------------------------------------
|
||||||
|
<a name="PresignedGetObject">
|
||||||
|
#### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) error
|
||||||
|
Generate a presigned URL for GET.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_: name of the bucket.
|
||||||
|
* `objectName` _string_: name of the object.
|
||||||
|
* `expiry` _time.Duration_: expiry in seconds.
|
||||||
|
* `reqParams` _url.Values_ : additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
// Set request parameters for content-disposition.
|
||||||
|
reqParams := make(url.Values)
|
||||||
|
reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
|
||||||
|
|
||||||
|
// Generates a presigned url which expires in a day.
|
||||||
|
presignedURL, err := s3Client.PresignedGetObject("mybucket", "photo.jpg", time.Second * 24 * 60 * 60, reqParams)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---------------------------------------
|
||||||
|
<a name="PresignedPutObject">
|
||||||
|
#### PresignedPutObject(bucketName string, objectName string, expiry time.Duration) (string, error)
|
||||||
|
Generate a presigned URL for PUT.
|
||||||
|
<blockquote>
|
||||||
|
NOTE: you can upload to S3 only with specified object name.
|
||||||
|
</blockquote>
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
* `bucketName` _string_: name of the bucket
|
||||||
|
* `objectName` _string_: name of the object
|
||||||
|
* `expiry` _time.Duration_: expiry in seconds
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
```go
|
||||||
|
// Generates a url which expires in a day.
|
||||||
|
presignedURL, err := s3Client.PresignedPutObject("mybucket", "photo.jpg", time.Second * 24 * 60 * 60)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---------------------------------------
|
||||||
|
<a name="PresignedPostPolicy">
|
||||||
|
#### PresignedPostPolicy(policy PostPolicy) (map[string]string, error)
|
||||||
|
PresignedPostPolicy we can provide policies specifying conditions restricting
|
||||||
|
what you want to allow in a POST request, such as bucket name where objects can be
|
||||||
|
uploaded, key name prefixes that you want to allow for the object being created and more.
|
||||||
|
|
||||||
|
We need to create our policy first:
|
||||||
|
```go
|
||||||
|
policy := minio.NewPostPolicy()
|
||||||
|
```
|
||||||
|
Apply upload policy restrictions:
|
||||||
|
```go
|
||||||
|
policy.SetBucket("my-bucketname")
|
||||||
|
policy.SetKey("my-objectname")
|
||||||
|
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
|
||||||
|
|
||||||
|
// Only allow 'png' images.
|
||||||
|
policy.SetContentType("image/png")
|
||||||
|
|
||||||
|
// Only allow content size in range 1KB to 1MB.
|
||||||
|
policy.SetContentLengthRange(1024, 1024*1024)
|
||||||
|
```
|
||||||
|
Get the POST form key/value object:
|
||||||
|
```go
|
||||||
|
formData, err := s3Client.PresignedPostPolicy(policy)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
POST your content from the command line using `curl`:
|
||||||
|
```go
|
||||||
|
fmt.Printf("curl ")
|
||||||
|
for k, v := range m {
|
||||||
|
fmt.Printf("-F %s=%s ", k, v)
|
||||||
|
}
|
||||||
|
fmt.Printf("-F file=@/etc/bash.bashrc ")
|
||||||
|
fmt.Printf("https://my-bucketname.s3.amazonaws.com\n")
|
||||||
|
```
|
@ -15,6 +15,8 @@
|
|||||||
- Run `go fmt`
|
- Run `go fmt`
|
||||||
- Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
|
- Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
|
||||||
- Make sure `go test -race ./...` and `go build` completes.
|
- Make sure `go test -race ./...` and `go build` completes.
|
||||||
|
NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables
|
||||||
|
``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...``
|
||||||
|
|
||||||
* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
|
* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
|
||||||
- `minio-go` project is strictly conformant with Golang style
|
- `minio-go` project is strictly conformant with Golang style
|
||||||
|
@ -71,7 +71,7 @@ export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec
|
|||||||
export PATH=$PATH:${GOPATH}/bin
|
export PATH=$PATH:${GOPATH}/bin
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Source the new enviornment
|
##### Source the new environment
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ source ~/.bash_profile
|
$ source ~/.bash_profile
|
||||||
|
15
vendor/src/github.com/minio/minio-go/README.md
vendored
15
vendor/src/github.com/minio/minio-go/README.md
vendored
@ -61,12 +61,14 @@ func main() {
|
|||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
|
[API documentation](./API.md)
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
### Bucket Operations.
|
### Bucket Operations.
|
||||||
* [MakeBucket(bucketName, BucketACL, location) error](examples/s3/makebucket.go)
|
* [MakeBucket(bucketName, location) error](examples/s3/makebucket.go)
|
||||||
* [BucketExists(bucketName) error](examples/s3/bucketexists.go)
|
* [BucketExists(bucketName) error](examples/s3/bucketexists.go)
|
||||||
* [RemoveBucket(bucketName) error](examples/s3/removebucket.go)
|
* [RemoveBucket(bucketName) error](examples/s3/removebucket.go)
|
||||||
* [GetBucketACL(bucketName) (BucketACL, error)](examples/s3/getbucketacl.go)
|
|
||||||
* [SetBucketACL(bucketName, BucketACL) error)](examples/s3/setbucketacl.go)
|
|
||||||
* [ListBuckets() []BucketInfo](examples/s3/listbuckets.go)
|
* [ListBuckets() []BucketInfo](examples/s3/listbuckets.go)
|
||||||
* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectInfo](examples/s3/listobjects.go)
|
* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectInfo](examples/s3/listobjects.go)
|
||||||
* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartInfo](examples/s3/listincompleteuploads.go)
|
* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartInfo](examples/s3/listincompleteuploads.go)
|
||||||
@ -83,10 +85,15 @@ func main() {
|
|||||||
* [FGetObject(bucketName, objectName, filePath) error](examples/s3/fgetobject.go)
|
* [FGetObject(bucketName, objectName, filePath) error](examples/s3/fgetobject.go)
|
||||||
|
|
||||||
### Presigned Operations.
|
### Presigned Operations.
|
||||||
* [PresignedGetObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedgetobject.go)
|
* [PresignedGetObject(bucketName, objectName, time.Duration, url.Values) (string, error)](examples/s3/presignedgetobject.go)
|
||||||
* [PresignedPutObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedputobject.go)
|
* [PresignedPutObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedputobject.go)
|
||||||
* [PresignedPostPolicy(NewPostPolicy()) (map[string]string, error)](examples/s3/presignedpostpolicy.go)
|
* [PresignedPostPolicy(NewPostPolicy()) (map[string]string, error)](examples/s3/presignedpostpolicy.go)
|
||||||
|
|
||||||
|
### Bucket Policy Operations.
|
||||||
|
* [SetBucketPolicy(bucketName, objectPrefix, BucketPolicy) error](examples/s3/setbucketpolicy.go)
|
||||||
|
* [GetBucketPolicy(bucketName, objectPrefix) (BucketPolicy, error)](examples/s3/getbucketpolicy.go)
|
||||||
|
* [RemoveBucketPolicy(bucketName, objectPrefix) error](examples/s3/removebucketpolicy.go)
|
||||||
|
|
||||||
### API Reference
|
### API Reference
|
||||||
|
|
||||||
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/minio/minio-go)
|
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/minio/minio-go)
|
||||||
|
@ -47,7 +47,7 @@ type ErrorResponse struct {
|
|||||||
|
|
||||||
// Region where the bucket is located. This header is returned
|
// Region where the bucket is located. This header is returned
|
||||||
// only in HEAD bucket and ListObjects response.
|
// only in HEAD bucket and ListObjects response.
|
||||||
AmzBucketRegion string
|
Region string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToErrorResponse - Returns parsed ErrorResponse struct from body and
|
// ToErrorResponse - Returns parsed ErrorResponse struct from body and
|
||||||
@ -103,7 +103,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
|||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
errResp = ErrorResponse{
|
errResp = ErrorResponse{
|
||||||
@ -113,7 +113,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
|||||||
Key: objectName,
|
Key: objectName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case http.StatusForbidden:
|
case http.StatusForbidden:
|
||||||
@ -124,7 +124,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
|||||||
Key: objectName,
|
Key: objectName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
case http.StatusConflict:
|
case http.StatusConflict:
|
||||||
errResp = ErrorResponse{
|
errResp = ErrorResponse{
|
||||||
@ -133,7 +133,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
|||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
errResp = ErrorResponse{
|
errResp = ErrorResponse{
|
||||||
@ -142,21 +142,10 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
|||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AccessDenied without a signature mismatch code, usually means
|
|
||||||
// that the bucket policy has certain restrictions where some API
|
|
||||||
// operations are not allowed. Handle this case so that top level
|
|
||||||
// callers can interpret this easily and fall back if needed to a
|
|
||||||
// lower functionality call. Read each individual API specific
|
|
||||||
// code for such fallbacks.
|
|
||||||
if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied" {
|
|
||||||
errResp.Code = "NotImplemented"
|
|
||||||
errResp.Message = "Operation is not allowed according to your bucket policy."
|
|
||||||
}
|
|
||||||
return errResp
|
return errResp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
277
vendor/src/github.com/minio/minio-go/api-error-response_test.go
vendored
Normal file
277
vendor/src/github.com/minio/minio-go/api-error-response_test.go
vendored
Normal file
@ -0,0 +1,277 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required bZy applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests validate the Error generator function for http response with error.
|
||||||
|
func TestHttpRespToErrorResponse(t *testing.T) {
|
||||||
|
// 'genAPIErrorResponse' generates ErrorResponse for given APIError.
|
||||||
|
// provides a encodable populated response values.
|
||||||
|
genAPIErrorResponse := func(err APIError, bucketName string) ErrorResponse {
|
||||||
|
var errResp = ErrorResponse{}
|
||||||
|
errResp.Code = err.Code
|
||||||
|
errResp.Message = err.Description
|
||||||
|
errResp.BucketName = bucketName
|
||||||
|
return errResp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encodes the response headers into XML format.
|
||||||
|
encodeErr := func(response interface{}) []byte {
|
||||||
|
var bytesBuffer bytes.Buffer
|
||||||
|
bytesBuffer.WriteString(xml.Header)
|
||||||
|
encode := xml.NewEncoder(&bytesBuffer)
|
||||||
|
encode.Encode(response)
|
||||||
|
return bytesBuffer.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// `createAPIErrorResponse` Mocks XML error response from the server.
|
||||||
|
createAPIErrorResponse := func(APIErr APIError, bucketName string) *http.Response {
|
||||||
|
// generate error response.
|
||||||
|
// response body contains the XML error message.
|
||||||
|
resp := &http.Response{}
|
||||||
|
errorResponse := genAPIErrorResponse(APIErr, bucketName)
|
||||||
|
encodedErrorResponse := encodeErr(errorResponse)
|
||||||
|
// write Header.
|
||||||
|
resp.StatusCode = APIErr.HTTPStatusCode
|
||||||
|
resp.Body = ioutil.NopCloser(bytes.NewBuffer(encodedErrorResponse))
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// 'genErrResponse' contructs error response based http Status Code
|
||||||
|
genErrResponse := func(resp *http.Response, code, message, bucketName, objectName string) ErrorResponse {
|
||||||
|
errResp := ErrorResponse{
|
||||||
|
Code: code,
|
||||||
|
Message: message,
|
||||||
|
BucketName: bucketName,
|
||||||
|
Key: objectName,
|
||||||
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
|
}
|
||||||
|
return errResp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate invalid argument error.
|
||||||
|
genInvalidError := func(message string) error {
|
||||||
|
errResp := ErrorResponse{
|
||||||
|
Code: "InvalidArgument",
|
||||||
|
Message: message,
|
||||||
|
RequestID: "minio",
|
||||||
|
}
|
||||||
|
return errResp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set common http response headers.
|
||||||
|
setCommonHeaders := func(resp *http.Response) *http.Response {
|
||||||
|
// set headers.
|
||||||
|
resp.Header = make(http.Header)
|
||||||
|
resp.Header.Set("x-amz-request-id", "xyz")
|
||||||
|
resp.Header.Set("x-amz-id-2", "abc")
|
||||||
|
resp.Header.Set("x-amz-bucket-region", "us-east-1")
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate http response with empty body.
|
||||||
|
// Set the StatusCode to the arugment supplied.
|
||||||
|
// Sets common headers.
|
||||||
|
genEmptyBodyResponse := func(statusCode int) *http.Response {
|
||||||
|
resp := &http.Response{}
|
||||||
|
// set empty response body.
|
||||||
|
resp.Body = ioutil.NopCloser(bytes.NewBuffer([]byte("")))
|
||||||
|
// set headers.
|
||||||
|
setCommonHeaders(resp)
|
||||||
|
// set status code.
|
||||||
|
resp.StatusCode = statusCode
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode XML error message from the http response body.
|
||||||
|
decodeXMLError := func(resp *http.Response, t *testing.T) error {
|
||||||
|
var errResp ErrorResponse
|
||||||
|
err := xmlDecoder(resp.Body, &errResp)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("XML decoding of response body failed")
|
||||||
|
}
|
||||||
|
return errResp
|
||||||
|
}
|
||||||
|
|
||||||
|
// List of APIErrors used to generate/mock server side XML error response.
|
||||||
|
APIErrors := []APIError{
|
||||||
|
{
|
||||||
|
Code: "NoSuchBucketPolicy",
|
||||||
|
Description: "The specified bucket does not have a bucket policy.",
|
||||||
|
HTTPStatusCode: http.StatusNotFound,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// List of expected response.
|
||||||
|
// Used for asserting the actual response.
|
||||||
|
expectedErrResponse := []error{
|
||||||
|
genInvalidError("Response is empty. " + "Please report this issue at https://github.com/minio/minio-go/issues."),
|
||||||
|
decodeXMLError(createAPIErrorResponse(APIErrors[0], "minio-bucket"), t),
|
||||||
|
genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchBucket", "The specified bucket does not exist.", "minio-bucket", ""),
|
||||||
|
genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchKey", "The specified key does not exist.", "minio-bucket", "Asia/"),
|
||||||
|
genErrResponse(setCommonHeaders(&http.Response{}), "AccessDenied", "Access Denied.", "minio-bucket", ""),
|
||||||
|
genErrResponse(setCommonHeaders(&http.Response{}), "Conflict", "Bucket not empty.", "minio-bucket", ""),
|
||||||
|
genErrResponse(setCommonHeaders(&http.Response{}), "Bad Request", "Bad Request", "minio-bucket", ""),
|
||||||
|
}
|
||||||
|
|
||||||
|
// List of http response to be used as input.
|
||||||
|
inputResponses := []*http.Response{
|
||||||
|
nil,
|
||||||
|
createAPIErrorResponse(APIErrors[0], "minio-bucket"),
|
||||||
|
genEmptyBodyResponse(http.StatusNotFound),
|
||||||
|
genEmptyBodyResponse(http.StatusNotFound),
|
||||||
|
genEmptyBodyResponse(http.StatusForbidden),
|
||||||
|
genEmptyBodyResponse(http.StatusConflict),
|
||||||
|
genEmptyBodyResponse(http.StatusBadRequest),
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
bucketName string
|
||||||
|
objectName string
|
||||||
|
inputHTTPResp *http.Response
|
||||||
|
// expected results.
|
||||||
|
expectedResult error
|
||||||
|
// flag indicating whether tests should pass.
|
||||||
|
|
||||||
|
}{
|
||||||
|
{"minio-bucket", "", inputResponses[0], expectedErrResponse[0]},
|
||||||
|
{"minio-bucket", "", inputResponses[1], expectedErrResponse[1]},
|
||||||
|
{"minio-bucket", "", inputResponses[2], expectedErrResponse[2]},
|
||||||
|
{"minio-bucket", "Asia/", inputResponses[3], expectedErrResponse[3]},
|
||||||
|
{"minio-bucket", "", inputResponses[4], expectedErrResponse[4]},
|
||||||
|
{"minio-bucket", "", inputResponses[5], expectedErrResponse[5]},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualResult := httpRespToErrorResponse(testCase.inputHTTPResp, testCase.bucketName, testCase.objectName)
|
||||||
|
if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
|
||||||
|
t.Errorf("Test %d: Expected result to be '%+v', but instead got '%+v'", i+1, testCase.expectedResult, actualResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test validates 'ErrEntityTooLarge' error response.
|
||||||
|
func TestErrEntityTooLarge(t *testing.T) {
|
||||||
|
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", 1000000, 99999)
|
||||||
|
expectedResult := ErrorResponse{
|
||||||
|
Code: "EntityTooLarge",
|
||||||
|
Message: msg,
|
||||||
|
BucketName: "minio-bucket",
|
||||||
|
Key: "Asia/",
|
||||||
|
}
|
||||||
|
actualResult := ErrEntityTooLarge(1000000, 99999, "minio-bucket", "Asia/")
|
||||||
|
if !reflect.DeepEqual(expectedResult, actualResult) {
|
||||||
|
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test validates 'ErrEntityTooSmall' error response.
|
||||||
|
func TestErrEntityTooSmall(t *testing.T) {
|
||||||
|
msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", -1)
|
||||||
|
expectedResult := ErrorResponse{
|
||||||
|
Code: "EntityTooLarge",
|
||||||
|
Message: msg,
|
||||||
|
BucketName: "minio-bucket",
|
||||||
|
Key: "Asia/",
|
||||||
|
}
|
||||||
|
actualResult := ErrEntityTooSmall(-1, "minio-bucket", "Asia/")
|
||||||
|
if !reflect.DeepEqual(expectedResult, actualResult) {
|
||||||
|
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test validates 'ErrUnexpectedEOF' error response.
|
||||||
|
func TestErrUnexpectedEOF(t *testing.T) {
|
||||||
|
msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.",
|
||||||
|
strconv.FormatInt(100, 10), strconv.FormatInt(101, 10))
|
||||||
|
expectedResult := ErrorResponse{
|
||||||
|
Code: "UnexpectedEOF",
|
||||||
|
Message: msg,
|
||||||
|
BucketName: "minio-bucket",
|
||||||
|
Key: "Asia/",
|
||||||
|
}
|
||||||
|
actualResult := ErrUnexpectedEOF(100, 101, "minio-bucket", "Asia/")
|
||||||
|
if !reflect.DeepEqual(expectedResult, actualResult) {
|
||||||
|
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test validates 'ErrInvalidBucketName' error response.
|
||||||
|
func TestErrInvalidBucketName(t *testing.T) {
|
||||||
|
expectedResult := ErrorResponse{
|
||||||
|
Code: "InvalidBucketName",
|
||||||
|
Message: "Invalid Bucket name",
|
||||||
|
RequestID: "minio",
|
||||||
|
}
|
||||||
|
actualResult := ErrInvalidBucketName("Invalid Bucket name")
|
||||||
|
if !reflect.DeepEqual(expectedResult, actualResult) {
|
||||||
|
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test validates 'ErrInvalidObjectName' error response.
|
||||||
|
func TestErrInvalidObjectName(t *testing.T) {
|
||||||
|
expectedResult := ErrorResponse{
|
||||||
|
Code: "NoSuchKey",
|
||||||
|
Message: "Invalid Object Key",
|
||||||
|
RequestID: "minio",
|
||||||
|
}
|
||||||
|
actualResult := ErrInvalidObjectName("Invalid Object Key")
|
||||||
|
if !reflect.DeepEqual(expectedResult, actualResult) {
|
||||||
|
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test validates 'ErrInvalidParts' error response.
|
||||||
|
func TestErrInvalidParts(t *testing.T) {
|
||||||
|
msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", 10, 9)
|
||||||
|
expectedResult := ErrorResponse{
|
||||||
|
Code: "InvalidParts",
|
||||||
|
Message: msg,
|
||||||
|
RequestID: "minio",
|
||||||
|
}
|
||||||
|
actualResult := ErrInvalidParts(10, 9)
|
||||||
|
if !reflect.DeepEqual(expectedResult, actualResult) {
|
||||||
|
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test validates 'ErrInvalidArgument' response.
|
||||||
|
func TestErrInvalidArgument(t *testing.T) {
|
||||||
|
expectedResult := ErrorResponse{
|
||||||
|
Code: "InvalidArgument",
|
||||||
|
Message: "Invalid Argument",
|
||||||
|
RequestID: "minio",
|
||||||
|
}
|
||||||
|
actualResult := ErrInvalidArgument("Invalid Argument")
|
||||||
|
if !reflect.DeepEqual(expectedResult, actualResult) {
|
||||||
|
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||||
|
}
|
||||||
|
}
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -20,124 +20,12 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetBucketACL - Get the permissions on an existing bucket.
|
|
||||||
//
|
|
||||||
// Returned values are:
|
|
||||||
//
|
|
||||||
// private - Owner gets full access.
|
|
||||||
// public-read - Owner gets full access, others get read access.
|
|
||||||
// public-read-write - Owner gets full access, others get full access
|
|
||||||
// too.
|
|
||||||
// authenticated-read - Owner gets full access, authenticated users
|
|
||||||
// get read access.
|
|
||||||
func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
|
|
||||||
// Input validation.
|
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set acl query.
|
|
||||||
urlValues := make(url.Values)
|
|
||||||
urlValues.Set("acl", "")
|
|
||||||
|
|
||||||
// Instantiate a new request.
|
|
||||||
req, err := c.newRequest("GET", requestMetadata{
|
|
||||||
bucketName: bucketName,
|
|
||||||
queryValues: urlValues,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if resp != nil {
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return "", httpRespToErrorResponse(resp, bucketName, "")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode access control policy.
|
|
||||||
policy := accessControlPolicy{}
|
|
||||||
err = xmlDecoder(resp.Body, &policy)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need to avoid following de-serialization check for Google
|
|
||||||
// Cloud Storage. On Google Cloud Storage "private" canned ACL's
|
|
||||||
// policy do not have grant list. Treat it as a valid case, check
|
|
||||||
// for all other vendors.
|
|
||||||
if !isGoogleEndpoint(c.endpointURL) {
|
|
||||||
if policy.AccessControlList.Grant == nil {
|
|
||||||
errorResponse := ErrorResponse{
|
|
||||||
Code: "InternalError",
|
|
||||||
Message: "Access control Grant list is empty. " + reportIssue,
|
|
||||||
BucketName: bucketName,
|
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
|
||||||
}
|
|
||||||
return "", errorResponse
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Boolean cues to indentify right canned acls.
|
|
||||||
var publicRead, publicWrite, authenticatedRead bool
|
|
||||||
|
|
||||||
// Handle grants.
|
|
||||||
grants := policy.AccessControlList.Grant
|
|
||||||
for _, g := range grants {
|
|
||||||
if g.Grantee.URI == "" && g.Permission == "FULL_CONTROL" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
|
|
||||||
authenticatedRead = true
|
|
||||||
break
|
|
||||||
} else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
|
|
||||||
publicWrite = true
|
|
||||||
} else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
|
|
||||||
publicRead = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify if acl is authenticated read.
|
|
||||||
if authenticatedRead {
|
|
||||||
return BucketACL("authenticated-read"), nil
|
|
||||||
}
|
|
||||||
// Verify if acl is private.
|
|
||||||
if !publicWrite && !publicRead {
|
|
||||||
return BucketACL("private"), nil
|
|
||||||
}
|
|
||||||
// Verify if acl is public-read.
|
|
||||||
if !publicWrite && publicRead {
|
|
||||||
return BucketACL("public-read"), nil
|
|
||||||
}
|
|
||||||
// Verify if acl is public-read-write.
|
|
||||||
if publicRead && publicWrite {
|
|
||||||
return BucketACL("public-read-write"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", ErrorResponse{
|
|
||||||
Code: "NoSuchBucketPolicy",
|
|
||||||
Message: "The specified bucket does not have a bucket policy.",
|
|
||||||
BucketName: bucketName,
|
|
||||||
RequestID: "minio",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetObject - returns an seekable, readable object.
|
// GetObject - returns an seekable, readable object.
|
||||||
func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
@ -147,8 +35,9 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
|||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := isValidObjectName(objectName); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Send an explicit info to get the actual object size.
|
|
||||||
objectInfo, err := c.StatObject(bucketName, objectName)
|
// Start the request as soon Get is initiated.
|
||||||
|
httpReader, objectInfo, err := c.getObject(bucketName, objectName, 0, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -160,8 +49,7 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
|||||||
// Create done channel.
|
// Create done channel.
|
||||||
doneCh := make(chan struct{})
|
doneCh := make(chan struct{})
|
||||||
|
|
||||||
// This routine feeds partial object data as and when the caller
|
// This routine feeds partial object data as and when the caller reads.
|
||||||
// reads.
|
|
||||||
go func() {
|
go func() {
|
||||||
defer close(reqCh)
|
defer close(reqCh)
|
||||||
defer close(resCh)
|
defer close(resCh)
|
||||||
@ -174,23 +62,27 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
|||||||
return
|
return
|
||||||
// Request message.
|
// Request message.
|
||||||
case req := <-reqCh:
|
case req := <-reqCh:
|
||||||
// Get shortest length.
|
// Offset changes fetch the new object at an Offset.
|
||||||
// NOTE: Last remaining bytes are usually smaller than
|
if req.DidOffsetChange {
|
||||||
// req.Buffer size. Use that as the final length.
|
// Read from offset.
|
||||||
length := math.Min(float64(len(req.Buffer)), float64(objectInfo.Size-req.Offset))
|
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, 0)
|
||||||
httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resCh <- readResponse{
|
resCh <- readResponse{
|
||||||
Error: err,
|
Error: err,
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read at least req.Buffer bytes, if not we have
|
||||||
|
// reached our EOF.
|
||||||
size, err := io.ReadFull(httpReader, req.Buffer)
|
size, err := io.ReadFull(httpReader, req.Buffer)
|
||||||
if err == io.ErrUnexpectedEOF {
|
if err == io.ErrUnexpectedEOF {
|
||||||
// If an EOF happens after reading some but not
|
// If an EOF happens after reading some but not
|
||||||
// all the bytes ReadFull returns ErrUnexpectedEOF
|
// all the bytes ReadFull returns ErrUnexpectedEOF
|
||||||
err = io.EOF
|
err = io.EOF
|
||||||
}
|
}
|
||||||
|
// Reply back how much was read.
|
||||||
resCh <- readResponse{
|
resCh <- readResponse{
|
||||||
Size: int(size),
|
Size: int(size),
|
||||||
Error: err,
|
Error: err,
|
||||||
@ -213,6 +105,7 @@ type readResponse struct {
|
|||||||
type readRequest struct {
|
type readRequest struct {
|
||||||
Buffer []byte
|
Buffer []byte
|
||||||
Offset int64 // readAt offset.
|
Offset int64 // readAt offset.
|
||||||
|
DidOffsetChange bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object represents an open object. It implements Read, ReadAt,
|
// Object represents an open object. It implements Read, ReadAt,
|
||||||
@ -225,6 +118,7 @@ type Object struct {
|
|||||||
reqCh chan<- readRequest
|
reqCh chan<- readRequest
|
||||||
resCh <-chan readResponse
|
resCh <-chan readResponse
|
||||||
doneCh chan<- struct{}
|
doneCh chan<- struct{}
|
||||||
|
prevOffset int64
|
||||||
currOffset int64
|
currOffset int64
|
||||||
objectInfo ObjectInfo
|
objectInfo ObjectInfo
|
||||||
|
|
||||||
@ -247,7 +141,7 @@ func (o *Object) Read(b []byte) (n int, err error) {
|
|||||||
o.mutex.Lock()
|
o.mutex.Lock()
|
||||||
defer o.mutex.Unlock()
|
defer o.mutex.Unlock()
|
||||||
|
|
||||||
// Previous prevErr is which was saved in previous operation.
|
// prevErr is previous error saved from previous operation.
|
||||||
if o.prevErr != nil || o.isClosed {
|
if o.prevErr != nil || o.isClosed {
|
||||||
return 0, o.prevErr
|
return 0, o.prevErr
|
||||||
}
|
}
|
||||||
@ -257,13 +151,27 @@ func (o *Object) Read(b []byte) (n int, err error) {
|
|||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send current information over control channel to indicate we
|
// Send current information over control channel to indicate we are ready.
|
||||||
// are ready.
|
|
||||||
reqMsg := readRequest{}
|
reqMsg := readRequest{}
|
||||||
|
// Send the pointer to the buffer over the channel.
|
||||||
// Send the offset and pointer to the buffer over the channel.
|
|
||||||
reqMsg.Buffer = b
|
reqMsg.Buffer = b
|
||||||
|
|
||||||
|
// Verify if offset has changed and currOffset is greater than
|
||||||
|
// previous offset. Perhaps due to Seek().
|
||||||
|
offsetChange := o.prevOffset - o.currOffset
|
||||||
|
if offsetChange < 0 {
|
||||||
|
offsetChange = -offsetChange
|
||||||
|
}
|
||||||
|
if offsetChange > 0 {
|
||||||
|
// Fetch the new reader at the current offset again.
|
||||||
reqMsg.Offset = o.currOffset
|
reqMsg.Offset = o.currOffset
|
||||||
|
reqMsg.DidOffsetChange = true
|
||||||
|
} else {
|
||||||
|
// No offset changes no need to fetch new reader, continue
|
||||||
|
// reading.
|
||||||
|
reqMsg.DidOffsetChange = false
|
||||||
|
reqMsg.Offset = 0
|
||||||
|
}
|
||||||
|
|
||||||
// Send read request over the control channel.
|
// Send read request over the control channel.
|
||||||
o.reqCh <- reqMsg
|
o.reqCh <- reqMsg
|
||||||
@ -277,6 +185,9 @@ func (o *Object) Read(b []byte) (n int, err error) {
|
|||||||
// Update current offset.
|
// Update current offset.
|
||||||
o.currOffset += bytesRead
|
o.currOffset += bytesRead
|
||||||
|
|
||||||
|
// Save the current offset as previous offset.
|
||||||
|
o.prevOffset = o.currOffset
|
||||||
|
|
||||||
if dataMsg.Error == nil {
|
if dataMsg.Error == nil {
|
||||||
// If currOffset read is equal to objectSize
|
// If currOffset read is equal to objectSize
|
||||||
// We have reached end of file, we return io.EOF.
|
// We have reached end of file, we return io.EOF.
|
||||||
@ -320,7 +231,7 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
|
|||||||
o.mutex.Lock()
|
o.mutex.Lock()
|
||||||
defer o.mutex.Unlock()
|
defer o.mutex.Unlock()
|
||||||
|
|
||||||
// prevErr is which was saved in previous operation.
|
// prevErr is error which was saved in previous operation.
|
||||||
if o.prevErr != nil || o.isClosed {
|
if o.prevErr != nil || o.isClosed {
|
||||||
return 0, o.prevErr
|
return 0, o.prevErr
|
||||||
}
|
}
|
||||||
@ -337,7 +248,16 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
|
|||||||
|
|
||||||
// Send the offset and pointer to the buffer over the channel.
|
// Send the offset and pointer to the buffer over the channel.
|
||||||
reqMsg.Buffer = b
|
reqMsg.Buffer = b
|
||||||
|
|
||||||
|
// For ReadAt offset always changes, minor optimization where
|
||||||
|
// offset same as currOffset we don't change the offset.
|
||||||
|
reqMsg.DidOffsetChange = offset != o.currOffset
|
||||||
|
if reqMsg.DidOffsetChange {
|
||||||
|
// Set new offset.
|
||||||
reqMsg.Offset = offset
|
reqMsg.Offset = offset
|
||||||
|
// Save new offset as current offset.
|
||||||
|
o.currOffset = offset
|
||||||
|
}
|
||||||
|
|
||||||
// Send read request over the control channel.
|
// Send read request over the control channel.
|
||||||
o.reqCh <- reqMsg
|
o.reqCh <- reqMsg
|
||||||
@ -348,10 +268,16 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
|
|||||||
// Bytes read.
|
// Bytes read.
|
||||||
bytesRead := int64(dataMsg.Size)
|
bytesRead := int64(dataMsg.Size)
|
||||||
|
|
||||||
|
// Update current offset.
|
||||||
|
o.currOffset += bytesRead
|
||||||
|
|
||||||
|
// Save current offset as previous offset before returning.
|
||||||
|
o.prevOffset = o.currOffset
|
||||||
|
|
||||||
if dataMsg.Error == nil {
|
if dataMsg.Error == nil {
|
||||||
// If offset+bytes read is equal to objectSize
|
// If currentOffset is equal to objectSize
|
||||||
// we have reached end of file, we return io.EOF.
|
// we have reached end of file, we return io.EOF.
|
||||||
if offset+bytesRead == o.objectInfo.Size {
|
if o.currOffset >= o.objectInfo.Size {
|
||||||
return dataMsg.Size, io.EOF
|
return dataMsg.Size, io.EOF
|
||||||
}
|
}
|
||||||
return dataMsg.Size, nil
|
return dataMsg.Size, nil
|
||||||
@ -381,7 +307,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
|
|||||||
defer o.mutex.Unlock()
|
defer o.mutex.Unlock()
|
||||||
|
|
||||||
if o.prevErr != nil {
|
if o.prevErr != nil {
|
||||||
// At EOF seeking is legal, for any other errors we return.
|
// At EOF seeking is legal allow only io.EOF, for any other errors we return.
|
||||||
if o.prevErr != io.EOF {
|
if o.prevErr != io.EOF {
|
||||||
return 0, o.prevErr
|
return 0, o.prevErr
|
||||||
}
|
}
|
||||||
@ -391,6 +317,11 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
|
|||||||
if offset < 0 && whence != 2 {
|
if offset < 0 && whence != 2 {
|
||||||
return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence))
|
return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Save current offset as previous offset.
|
||||||
|
o.prevOffset = o.currOffset
|
||||||
|
|
||||||
|
// Switch through whence.
|
||||||
switch whence {
|
switch whence {
|
||||||
default:
|
default:
|
||||||
return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
|
return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
|
||||||
@ -484,8 +415,8 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
|
|||||||
customHeader.Set("Range", fmt.Sprintf("bytes=%d", length))
|
customHeader.Set("Range", fmt.Sprintf("bytes=%d", length))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate a new request.
|
// Execute GET on objectName.
|
||||||
req, err := c.newRequest("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
customHeader: customHeader,
|
customHeader: customHeader,
|
||||||
@ -493,11 +424,6 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ObjectInfo{}, err
|
return nil, ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
// Execute the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, ObjectInfo{}, err
|
|
||||||
}
|
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
||||||
return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||||
@ -517,7 +443,7 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
|
|||||||
Message: msg,
|
Message: msg,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Get content-type.
|
// Get content-type.
|
92
vendor/src/github.com/minio/minio-go/api-get-policy.go
vendored
Normal file
92
vendor/src/github.com/minio/minio-go/api-get-policy.go
vendored
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetBucketPolicy - get bucket policy at a given path.
|
||||||
|
func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy BucketPolicy, err error) {
|
||||||
|
// Input validation.
|
||||||
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
|
return BucketPolicyNone, err
|
||||||
|
}
|
||||||
|
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
||||||
|
return BucketPolicyNone, err
|
||||||
|
}
|
||||||
|
policy, err := c.getBucketPolicy(bucketName, objectPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return BucketPolicyNone, err
|
||||||
|
}
|
||||||
|
return identifyPolicyType(policy, bucketName, objectPrefix), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request server for policy.
|
||||||
|
func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (BucketAccessPolicy, error) {
|
||||||
|
// Get resources properly escaped and lined up before
|
||||||
|
// using them in http request.
|
||||||
|
urlValues := make(url.Values)
|
||||||
|
urlValues.Set("policy", "")
|
||||||
|
|
||||||
|
// Execute GET on bucket to list objects.
|
||||||
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
|
bucketName: bucketName,
|
||||||
|
queryValues: urlValues,
|
||||||
|
})
|
||||||
|
|
||||||
|
defer closeResponse(resp)
|
||||||
|
if err != nil {
|
||||||
|
return BucketAccessPolicy{}, err
|
||||||
|
}
|
||||||
|
return processBucketPolicyResponse(bucketName, resp)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// processes the GetPolicy http response from the server.
|
||||||
|
func processBucketPolicyResponse(bucketName string, resp *http.Response) (BucketAccessPolicy, error) {
|
||||||
|
if resp != nil {
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
errResponse := httpRespToErrorResponse(resp, bucketName, "")
|
||||||
|
if ToErrorResponse(errResponse).Code == "NoSuchBucketPolicy" {
|
||||||
|
return BucketAccessPolicy{Version: "2012-10-17"}, nil
|
||||||
|
}
|
||||||
|
return BucketAccessPolicy{}, errResponse
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Read access policy up to maxAccessPolicySize.
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
|
||||||
|
// bucket policies are limited to 20KB in size, using a limit reader.
|
||||||
|
bucketPolicyBuf, err := ioutil.ReadAll(io.LimitReader(resp.Body, maxAccessPolicySize))
|
||||||
|
if err != nil {
|
||||||
|
return BucketAccessPolicy{}, err
|
||||||
|
}
|
||||||
|
policy, err := unMarshalBucketPolicy(bucketPolicyBuf)
|
||||||
|
if err != nil {
|
||||||
|
return BucketAccessPolicy{}, err
|
||||||
|
}
|
||||||
|
// Sort the policy actions and resources for convenience.
|
||||||
|
for _, statement := range policy.Statements {
|
||||||
|
sort.Strings(statement.Actions)
|
||||||
|
sort.Strings(statement.Resources)
|
||||||
|
}
|
||||||
|
return policy, nil
|
||||||
|
}
|
102
vendor/src/github.com/minio/minio-go/api-get-policy_test.go
vendored
Normal file
102
vendor/src/github.com/minio/minio-go/api-get-policy_test.go
vendored
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mocks valid http response containing bucket policy from server.
|
||||||
|
func generatePolicyResponse(resp *http.Response, policy BucketAccessPolicy) (*http.Response, error) {
|
||||||
|
policyBytes, err := json.Marshal(policy)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp.StatusCode = http.StatusOK
|
||||||
|
resp.Body = ioutil.NopCloser(bytes.NewBuffer(policyBytes))
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests the processing of GetPolicy response from server.
|
||||||
|
func TestProcessBucketPolicyResopnse(t *testing.T) {
|
||||||
|
bucketAccesPolicies := []BucketAccessPolicy{
|
||||||
|
{Version: "1.0"},
|
||||||
|
{Version: "1.0", Statements: setReadOnlyStatement("minio-bucket", "")},
|
||||||
|
{Version: "1.0", Statements: setReadWriteStatement("minio-bucket", "Asia/")},
|
||||||
|
{Version: "1.0", Statements: setWriteOnlyStatement("minio-bucket", "Asia/India/")},
|
||||||
|
}
|
||||||
|
|
||||||
|
APIErrors := []APIError{
|
||||||
|
{
|
||||||
|
Code: "NoSuchBucketPolicy",
|
||||||
|
Description: "The specified bucket does not have a bucket policy.",
|
||||||
|
HTTPStatusCode: http.StatusNotFound,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
testCases := []struct {
|
||||||
|
bucketName string
|
||||||
|
isAPIError bool
|
||||||
|
apiErr APIError
|
||||||
|
// expected results.
|
||||||
|
expectedResult BucketAccessPolicy
|
||||||
|
err error
|
||||||
|
// flag indicating whether tests should pass.
|
||||||
|
shouldPass bool
|
||||||
|
}{
|
||||||
|
{"my-bucket", true, APIErrors[0], BucketAccessPolicy{Version: "2012-10-17"}, nil, true},
|
||||||
|
{"my-bucket", false, APIError{}, bucketAccesPolicies[0], nil, true},
|
||||||
|
{"my-bucket", false, APIError{}, bucketAccesPolicies[1], nil, true},
|
||||||
|
{"my-bucket", false, APIError{}, bucketAccesPolicies[2], nil, true},
|
||||||
|
{"my-bucket", false, APIError{}, bucketAccesPolicies[3], nil, true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
inputResponse := &http.Response{}
|
||||||
|
var err error
|
||||||
|
if testCase.isAPIError {
|
||||||
|
inputResponse = generateErrorResponse(inputResponse, testCase.apiErr, testCase.bucketName)
|
||||||
|
} else {
|
||||||
|
inputResponse, err = generatePolicyResponse(inputResponse, testCase.expectedResult)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d: Creation of valid response failed", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
actualResult, err := processBucketPolicyResponse("my-bucket", inputResponse)
|
||||||
|
if err != nil && testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
if err == nil && !testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||||
|
}
|
||||||
|
// Failed as expected, but does it fail for the expected reason.
|
||||||
|
if err != nil && !testCase.shouldPass {
|
||||||
|
if err.Error() != testCase.err.Error() {
|
||||||
|
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil && testCase.shouldPass {
|
||||||
|
if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
|
||||||
|
t.Errorf("Test %d: The expected BucketPolicy doesnt match the actual BucketPolicy", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
49
vendor/src/github.com/minio/minio-go/api-list.go
vendored
49
vendor/src/github.com/minio/minio-go/api-list.go
vendored
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -34,13 +34,8 @@ import (
|
|||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
func (c Client) ListBuckets() ([]BucketInfo, error) {
|
func (c Client) ListBuckets() ([]BucketInfo, error) {
|
||||||
// Instantiate a new request.
|
// Execute GET on service.
|
||||||
req, err := c.newRequest("GET", requestMetadata{})
|
resp, err := c.executeMethod("GET", requestMetadata{})
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -82,7 +77,7 @@ func (c Client) ListBuckets() ([]BucketInfo, error) {
|
|||||||
//
|
//
|
||||||
func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
|
func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
|
||||||
// Allocate new list objects channel.
|
// Allocate new list objects channel.
|
||||||
objectStatCh := make(chan ObjectInfo, 1000)
|
objectStatCh := make(chan ObjectInfo)
|
||||||
// Default listing is delimited at "/"
|
// Default listing is delimited at "/"
|
||||||
delimiter := "/"
|
delimiter := "/"
|
||||||
if recursive {
|
if recursive {
|
||||||
@ -188,11 +183,11 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
|
|||||||
urlValues := make(url.Values)
|
urlValues := make(url.Values)
|
||||||
// Set object prefix.
|
// Set object prefix.
|
||||||
if objectPrefix != "" {
|
if objectPrefix != "" {
|
||||||
urlValues.Set("prefix", urlEncodePath(objectPrefix))
|
urlValues.Set("prefix", objectPrefix)
|
||||||
}
|
}
|
||||||
// Set object marker.
|
// Set object marker.
|
||||||
if objectMarker != "" {
|
if objectMarker != "" {
|
||||||
urlValues.Set("marker", urlEncodePath(objectMarker))
|
urlValues.Set("marker", objectMarker)
|
||||||
}
|
}
|
||||||
// Set delimiter.
|
// Set delimiter.
|
||||||
if delimiter != "" {
|
if delimiter != "" {
|
||||||
@ -206,16 +201,11 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
|
|||||||
// Set max keys.
|
// Set max keys.
|
||||||
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
|
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
|
||||||
|
|
||||||
// Initialize a new request.
|
// Execute GET on bucket to list objects.
|
||||||
req, err := c.newRequest("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return listBucketResult{}, err
|
|
||||||
}
|
|
||||||
// Execute list buckets.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listBucketResult{}, err
|
return listBucketResult{}, err
|
||||||
@ -264,7 +254,7 @@ func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive
|
|||||||
// listIncompleteUploads lists all incomplete uploads.
|
// listIncompleteUploads lists all incomplete uploads.
|
||||||
func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
|
func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
|
||||||
// Allocate channel for multipart uploads.
|
// Allocate channel for multipart uploads.
|
||||||
objectMultipartStatCh := make(chan ObjectMultipartInfo, 1000)
|
objectMultipartStatCh := make(chan ObjectMultipartInfo)
|
||||||
// Delimiter is set to "/" by default.
|
// Delimiter is set to "/" by default.
|
||||||
delimiter := "/"
|
delimiter := "/"
|
||||||
if recursive {
|
if recursive {
|
||||||
@ -366,7 +356,7 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
|
|||||||
urlValues.Set("uploads", "")
|
urlValues.Set("uploads", "")
|
||||||
// Set object key marker.
|
// Set object key marker.
|
||||||
if keyMarker != "" {
|
if keyMarker != "" {
|
||||||
urlValues.Set("key-marker", urlEncodePath(keyMarker))
|
urlValues.Set("key-marker", keyMarker)
|
||||||
}
|
}
|
||||||
// Set upload id marker.
|
// Set upload id marker.
|
||||||
if uploadIDMarker != "" {
|
if uploadIDMarker != "" {
|
||||||
@ -374,7 +364,7 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
|
|||||||
}
|
}
|
||||||
// Set prefix marker.
|
// Set prefix marker.
|
||||||
if prefix != "" {
|
if prefix != "" {
|
||||||
urlValues.Set("prefix", urlEncodePath(prefix))
|
urlValues.Set("prefix", prefix)
|
||||||
}
|
}
|
||||||
// Set delimiter.
|
// Set delimiter.
|
||||||
if delimiter != "" {
|
if delimiter != "" {
|
||||||
@ -388,16 +378,11 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
|
|||||||
// Set max-uploads.
|
// Set max-uploads.
|
||||||
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
|
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
|
||||||
|
|
||||||
// Instantiate a new request.
|
// Execute GET on bucketName to list multipart uploads.
|
||||||
req, err := c.newRequest("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return listMultipartUploadsResult{}, err
|
|
||||||
}
|
|
||||||
// Execute list multipart uploads request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listMultipartUploadsResult{}, err
|
return listMultipartUploadsResult{}, err
|
||||||
@ -510,16 +495,12 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa
|
|||||||
// Set max parts.
|
// Set max parts.
|
||||||
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
|
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
|
||||||
|
|
||||||
req, err := c.newRequest("GET", requestMetadata{
|
// Execute GET on objectName to get list of parts.
|
||||||
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return listObjectPartsResult{}, err
|
|
||||||
}
|
|
||||||
// Exectue list object parts.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listObjectPartsResult{}, err
|
return listObjectPartsResult{}, err
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -18,13 +18,26 @@ package minio
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// supportedGetReqParams - supported request parameters for GET
|
||||||
|
// presigned request.
|
||||||
|
var supportedGetReqParams = map[string]struct{}{
|
||||||
|
"response-expires": {},
|
||||||
|
"response-content-type": {},
|
||||||
|
"response-cache-control": {},
|
||||||
|
"response-content-disposition": {},
|
||||||
|
}
|
||||||
|
|
||||||
// presignURL - Returns a presigned URL for an input 'method'.
|
// presignURL - Returns a presigned URL for an input 'method'.
|
||||||
// Expires maximum is 7days - ie. 604800 and minimum is 1.
|
// Expires maximum is 7days - ie. 604800 and minimum is 1.
|
||||||
func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration) (url string, err error) {
|
func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (urlStr string, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
|
if method == "" {
|
||||||
|
return "", ErrInvalidArgument("method cannot be empty.")
|
||||||
|
}
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -35,35 +48,50 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if method == "" {
|
// Convert expires into seconds.
|
||||||
return "", ErrInvalidArgument("method cannot be empty.")
|
|
||||||
}
|
|
||||||
|
|
||||||
expireSeconds := int64(expires / time.Second)
|
expireSeconds := int64(expires / time.Second)
|
||||||
// Instantiate a new request.
|
reqMetadata := requestMetadata{
|
||||||
// Since expires is set newRequest will presign the request.
|
|
||||||
req, err := c.newRequest(method, requestMetadata{
|
|
||||||
presignURL: true,
|
presignURL: true,
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
expires: expireSeconds,
|
expires: expireSeconds,
|
||||||
})
|
}
|
||||||
|
|
||||||
|
// For "GET" we are handling additional request parameters to
|
||||||
|
// override its response headers.
|
||||||
|
if method == "GET" {
|
||||||
|
// Verify if input map has unsupported params, if yes exit.
|
||||||
|
for k := range reqParams {
|
||||||
|
if _, ok := supportedGetReqParams[k]; !ok {
|
||||||
|
return "", ErrInvalidArgument(k + " unsupported request parameter for presigned GET.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Save the request parameters to be used in presigning for
|
||||||
|
// GET request.
|
||||||
|
reqMetadata.queryValues = reqParams
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instantiate a new request.
|
||||||
|
// Since expires is set newRequest will presign the request.
|
||||||
|
req, err := c.newRequest(method, reqMetadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return req.URL.String(), nil
|
return req.URL.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PresignedGetObject - Returns a presigned URL to access an object without credentials.
|
// PresignedGetObject - Returns a presigned URL to access an object
|
||||||
// Expires maximum is 7days - ie. 604800 and minimum is 1.
|
// without credentials. Expires maximum is 7days - ie. 604800 and
|
||||||
func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration) (url string, err error) {
|
// minimum is 1. Additionally you can override a set of response
|
||||||
return c.presignURL("GET", bucketName, objectName, expires)
|
// headers using the query parameters.
|
||||||
|
func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (url string, err error) {
|
||||||
|
return c.presignURL("GET", bucketName, objectName, expires, reqParams)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PresignedPutObject - Returns a presigned URL to upload an object without credentials.
|
// PresignedPutObject - Returns a presigned URL to upload an object without credentials.
|
||||||
// Expires maximum is 7days - ie. 604800 and minimum is 1.
|
// Expires maximum is 7days - ie. 604800 and minimum is 1.
|
||||||
func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (url string, err error) {
|
func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (url string, err error) {
|
||||||
return c.presignURL("PUT", bucketName, objectName, expires)
|
return c.presignURL("PUT", bucketName, objectName, expires, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PresignedPostPolicy - Returns POST form data to upload an object at a location.
|
// PresignedPostPolicy - Returns POST form data to upload an object at a location.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -18,8 +18,11 @@ package minio
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
@ -27,28 +30,18 @@ import (
|
|||||||
|
|
||||||
/// Bucket operations
|
/// Bucket operations
|
||||||
|
|
||||||
// MakeBucket makes a new bucket.
|
// MakeBucket creates a new bucket with bucketName.
|
||||||
//
|
//
|
||||||
// Optional arguments are acl and location - by default all buckets are created
|
// Location is an optional argument, by default all buckets are
|
||||||
// with ``private`` acl and in US Standard region.
|
// created in US Standard Region.
|
||||||
//
|
|
||||||
// ACL valid values - http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
|
|
||||||
//
|
|
||||||
// private - owner gets full access [default].
|
|
||||||
// public-read - owner gets full access, all others get read access.
|
|
||||||
// public-read-write - owner gets full access, all others get full access too.
|
|
||||||
// authenticated-read - owner gets full access, authenticated users get read access.
|
|
||||||
//
|
//
|
||||||
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
|
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||||
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
|
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
|
||||||
func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) error {
|
func (c Client) MakeBucket(bucketName string, location string) error {
|
||||||
// Validate the input arguments.
|
// Validate the input arguments.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !acl.isValidBucketACL() {
|
|
||||||
return ErrInvalidArgument("Unrecognized ACL " + acl.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// If location is empty, treat is a default region 'us-east-1'.
|
// If location is empty, treat is a default region 'us-east-1'.
|
||||||
if location == "" {
|
if location == "" {
|
||||||
@ -56,7 +49,7 @@ func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate the request.
|
// Instantiate the request.
|
||||||
req, err := c.makeBucketRequest(bucketName, acl, location)
|
req, err := c.makeBucketRequest(bucketName, location)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -74,7 +67,7 @@ func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) er
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the location into cache on a successfull makeBucket response.
|
// Save the location into cache on a successful makeBucket response.
|
||||||
c.bucketLocCache.Set(bucketName, location)
|
c.bucketLocCache.Set(bucketName, location)
|
||||||
|
|
||||||
// Return.
|
// Return.
|
||||||
@ -82,14 +75,11 @@ func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// makeBucketRequest constructs request for makeBucket.
|
// makeBucketRequest constructs request for makeBucket.
|
||||||
func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location string) (*http.Request, error) {
|
func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) {
|
||||||
// Validate input arguments.
|
// Validate input arguments.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if !acl.isValidBucketACL() {
|
|
||||||
return nil, ErrInvalidArgument("Unrecognized ACL " + acl.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// In case of Amazon S3. The make bucket issued on already
|
// In case of Amazon S3. The make bucket issued on already
|
||||||
// existing bucket would fail with 'AuthorizationMalformed' error
|
// existing bucket would fail with 'AuthorizationMalformed' error
|
||||||
@ -106,12 +96,6 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// by default bucket acl is set to private.
|
|
||||||
req.Header.Set("x-amz-acl", "private")
|
|
||||||
if acl != "" {
|
|
||||||
req.Header.Set("x-amz-acl", string(acl))
|
|
||||||
}
|
|
||||||
|
|
||||||
// set UserAgent for the request.
|
// set UserAgent for the request.
|
||||||
c.setUserAgent(req)
|
c.setUserAgent(req)
|
||||||
|
|
||||||
@ -131,9 +115,12 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str
|
|||||||
}
|
}
|
||||||
createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
|
createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
|
||||||
req.Body = ioutil.NopCloser(createBucketConfigBuffer)
|
req.Body = ioutil.NopCloser(createBucketConfigBuffer)
|
||||||
req.ContentLength = int64(createBucketConfigBuffer.Len())
|
req.ContentLength = int64(len(createBucketConfigBytes))
|
||||||
|
// Set content-md5.
|
||||||
|
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
|
||||||
if c.signature.isV4() {
|
if c.signature.isV4() {
|
||||||
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBuffer.Bytes())))
|
// Set sha256.
|
||||||
|
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,60 +137,89 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str
|
|||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetBucketACL set the permissions on an existing bucket using access control lists (ACL).
|
// SetBucketPolicy set the access permissions on an existing bucket.
|
||||||
//
|
//
|
||||||
// For example
|
// For example
|
||||||
//
|
//
|
||||||
// private - owner gets full access [default].
|
// none - owner gets full access [default].
|
||||||
// public-read - owner gets full access, all others get read access.
|
// readonly - anonymous get access for everyone at a given object prefix.
|
||||||
// public-read-write - owner gets full access, all others get full access too.
|
// readwrite - anonymous list/put/delete access to a given object prefix.
|
||||||
// authenticated-read - owner gets full access, authenticated users get read access.
|
// writeonly - anonymous put/delete access to a given object prefix.
|
||||||
func (c Client) SetBucketACL(bucketName string, acl BucketACL) error {
|
func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy BucketPolicy) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !acl.isValidBucketACL() {
|
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
||||||
return ErrInvalidArgument("Unrecognized ACL " + acl.String())
|
return err
|
||||||
|
}
|
||||||
|
if !bucketPolicy.isValidBucketPolicy() {
|
||||||
|
return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
|
||||||
|
}
|
||||||
|
policy, err := c.getBucketPolicy(bucketName, objectPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// For bucket policy set to 'none' we need to remove the policy.
|
||||||
|
if bucketPolicy == BucketPolicyNone && policy.Statements == nil {
|
||||||
|
// No policies to set, return success.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Remove any previous policies at this path.
|
||||||
|
policy.Statements = removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
|
||||||
|
|
||||||
|
// generating []Statement for the given bucketPolicy.
|
||||||
|
statements, err := generatePolicyStatement(bucketPolicy, bucketName, objectPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
policy.Statements = append(policy.Statements, statements...)
|
||||||
|
// Save the updated policies.
|
||||||
|
return c.putBucketPolicy(bucketName, policy)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Saves a new bucket policy.
|
||||||
|
func (c Client) putBucketPolicy(bucketName string, policy BucketAccessPolicy) error {
|
||||||
|
// Input validation.
|
||||||
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set acl query.
|
// If there are no policy statements, we should remove entire policy.
|
||||||
|
if len(policy.Statements) == 0 {
|
||||||
|
return c.removeBucketPolicy(bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get resources properly escaped and lined up before
|
||||||
|
// using them in http request.
|
||||||
urlValues := make(url.Values)
|
urlValues := make(url.Values)
|
||||||
urlValues.Set("acl", "")
|
urlValues.Set("policy", "")
|
||||||
|
|
||||||
// Add misc headers.
|
policyBytes, err := json.Marshal(&policy)
|
||||||
customHeader := make(http.Header)
|
|
||||||
|
|
||||||
if acl != "" {
|
|
||||||
customHeader.Set("x-amz-acl", acl.String())
|
|
||||||
} else {
|
|
||||||
customHeader.Set("x-amz-acl", "private")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Instantiate a new request.
|
|
||||||
req, err := c.newRequest("PUT", requestMetadata{
|
|
||||||
bucketName: bucketName,
|
|
||||||
queryValues: urlValues,
|
|
||||||
customHeader: customHeader,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initiate the request.
|
policyBuffer := bytes.NewReader(policyBytes)
|
||||||
resp, err := c.do(req)
|
reqMetadata := requestMetadata{
|
||||||
|
bucketName: bucketName,
|
||||||
|
queryValues: urlValues,
|
||||||
|
contentBody: policyBuffer,
|
||||||
|
contentLength: int64(len(policyBytes)),
|
||||||
|
contentMD5Bytes: sumMD5(policyBytes),
|
||||||
|
contentSHA256Bytes: sum256(policyBytes),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute PUT to upload a new bucket policy.
|
||||||
|
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
// if error return.
|
if resp.StatusCode != http.StatusNoContent {
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return httpRespToErrorResponse(resp, bucketName, "")
|
return httpRespToErrorResponse(resp, bucketName, "")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// return
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
270
vendor/src/github.com/minio/minio-go/api-put-bucket_test.go
vendored
Normal file
270
vendor/src/github.com/minio/minio-go/api-put-bucket_test.go
vendored
Normal file
@ -0,0 +1,270 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/xml"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests validate http request formulated for creation of bucket.
|
||||||
|
func TestMakeBucketRequest(t *testing.T) {
|
||||||
|
// Generates expected http request for bucket creation.
|
||||||
|
// Used for asserting with the actual request generated.
|
||||||
|
createExpectedRequest := func(c *Client, bucketName string, location string, req *http.Request) (*http.Request, error) {
|
||||||
|
|
||||||
|
targetURL := *c.endpointURL
|
||||||
|
targetURL.Path = "/" + bucketName + "/"
|
||||||
|
|
||||||
|
// get a new HTTP request for the method.
|
||||||
|
req, err := http.NewRequest("PUT", targetURL.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// set UserAgent for the request.
|
||||||
|
c.setUserAgent(req)
|
||||||
|
|
||||||
|
// set sha256 sum for signature calculation only with signature version '4'.
|
||||||
|
if c.signature.isV4() {
|
||||||
|
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
|
||||||
|
}
|
||||||
|
|
||||||
|
// If location is not 'us-east-1' create bucket location config.
|
||||||
|
if location != "us-east-1" && location != "" {
|
||||||
|
createBucketConfig := createBucketConfiguration{}
|
||||||
|
createBucketConfig.Location = location
|
||||||
|
var createBucketConfigBytes []byte
|
||||||
|
createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
|
||||||
|
req.Body = ioutil.NopCloser(createBucketConfigBuffer)
|
||||||
|
req.ContentLength = int64(len(createBucketConfigBytes))
|
||||||
|
// Set content-md5.
|
||||||
|
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
|
||||||
|
if c.signature.isV4() {
|
||||||
|
// Set sha256.
|
||||||
|
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign the request.
|
||||||
|
if c.signature.isV4() {
|
||||||
|
// Signature calculated for MakeBucket request should be for 'us-east-1',
|
||||||
|
// regardless of the bucket's location constraint.
|
||||||
|
req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
|
||||||
|
} else if c.signature.isV2() {
|
||||||
|
req = signV2(*req, c.accessKeyID, c.secretAccessKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return signed request.
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get Request body.
|
||||||
|
getReqBody := func(reqBody io.ReadCloser) (string, error) {
|
||||||
|
contents, err := ioutil.ReadAll(reqBody)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(contents), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info for 'Client' creation.
|
||||||
|
// Will be used as arguments for 'NewClient'.
|
||||||
|
type infoForClient struct {
|
||||||
|
endPoint string
|
||||||
|
accessKey string
|
||||||
|
secretKey string
|
||||||
|
enableInsecure bool
|
||||||
|
}
|
||||||
|
// dataset for 'NewClient' call.
|
||||||
|
info := []infoForClient{
|
||||||
|
// endpoint localhost.
|
||||||
|
// both access-key and secret-key are empty.
|
||||||
|
{"localhost:9000", "", "", false},
|
||||||
|
// both access-key are secret-key exists.
|
||||||
|
{"localhost:9000", "my-access-key", "my-secret-key", false},
|
||||||
|
// one of acess-key and secret-key are empty.
|
||||||
|
{"localhost:9000", "", "my-secret-key", false},
|
||||||
|
|
||||||
|
// endpoint amazon s3.
|
||||||
|
{"s3.amazonaws.com", "", "", false},
|
||||||
|
{"s3.amazonaws.com", "my-access-key", "my-secret-key", false},
|
||||||
|
{"s3.amazonaws.com", "my-acess-key", "", false},
|
||||||
|
|
||||||
|
// endpoint google cloud storage.
|
||||||
|
{"storage.googleapis.com", "", "", false},
|
||||||
|
{"storage.googleapis.com", "my-access-key", "my-secret-key", false},
|
||||||
|
{"storage.googleapis.com", "", "my-secret-key", false},
|
||||||
|
|
||||||
|
// endpoint custom domain running Minio server.
|
||||||
|
{"play.minio.io", "", "", false},
|
||||||
|
{"play.minio.io", "my-access-key", "my-secret-key", false},
|
||||||
|
{"play.minio.io", "my-acess-key", "", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
bucketName string
|
||||||
|
location string
|
||||||
|
// data for new client creation.
|
||||||
|
info infoForClient
|
||||||
|
// error in the output.
|
||||||
|
err error
|
||||||
|
// flag indicating whether tests should pass.
|
||||||
|
shouldPass bool
|
||||||
|
}{
|
||||||
|
// Test cases with Invalid bucket name.
|
||||||
|
{".mybucket", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
|
||||||
|
{"mybucket.", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
|
||||||
|
{"mybucket-", "", infoForClient{}, ErrInvalidBucketName("Bucket name contains invalid characters."), false},
|
||||||
|
{"my", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters."), false},
|
||||||
|
{"", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot be empty."), false},
|
||||||
|
{"my..bucket", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot have successive periods."), false},
|
||||||
|
|
||||||
|
// Test case with all valid values for S3 bucket location.
|
||||||
|
// Client is constructed using the info struct.
|
||||||
|
// case with empty location.
|
||||||
|
{"my-bucket", "", info[0], nil, true},
|
||||||
|
// case with location set to standard 'us-east-1'.
|
||||||
|
{"my-bucket", "us-east-1", info[0], nil, true},
|
||||||
|
// case with location set to a value different from 'us-east-1'.
|
||||||
|
{"my-bucket", "eu-central-1", info[0], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", "", info[1], nil, true},
|
||||||
|
{"my-bucket", "us-east-1", info[1], nil, true},
|
||||||
|
{"my-bucket", "eu-central-1", info[1], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", "", info[2], nil, true},
|
||||||
|
{"my-bucket", "us-east-1", info[2], nil, true},
|
||||||
|
{"my-bucket", "eu-central-1", info[2], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", "", info[3], nil, true},
|
||||||
|
{"my-bucket", "us-east-1", info[3], nil, true},
|
||||||
|
{"my-bucket", "eu-central-1", info[3], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", "", info[4], nil, true},
|
||||||
|
{"my-bucket", "us-east-1", info[4], nil, true},
|
||||||
|
{"my-bucket", "eu-central-1", info[4], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", "", info[5], nil, true},
|
||||||
|
{"my-bucket", "us-east-1", info[5], nil, true},
|
||||||
|
{"my-bucket", "eu-central-1", info[5], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", "", info[6], nil, true},
|
||||||
|
{"my-bucket", "us-east-1", info[6], nil, true},
|
||||||
|
{"my-bucket", "eu-central-1", info[6], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", "", info[7], nil, true},
|
||||||
|
{"my-bucket", "us-east-1", info[7], nil, true},
|
||||||
|
{"my-bucket", "eu-central-1", info[7], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", "", info[8], nil, true},
|
||||||
|
{"my-bucket", "us-east-1", info[8], nil, true},
|
||||||
|
{"my-bucket", "eu-central-1", info[8], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", "", info[9], nil, true},
|
||||||
|
{"my-bucket", "us-east-1", info[9], nil, true},
|
||||||
|
{"my-bucket", "eu-central-1", info[9], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", "", info[10], nil, true},
|
||||||
|
{"my-bucket", "us-east-1", info[10], nil, true},
|
||||||
|
{"my-bucket", "eu-central-1", info[10], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", "", info[11], nil, true},
|
||||||
|
{"my-bucket", "us-east-1", info[11], nil, true},
|
||||||
|
{"my-bucket", "eu-central-1", info[11], nil, true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
// cannot create a newclient with empty endPoint value.
|
||||||
|
// validates and creates a new client only if the endPoint value is not empty.
|
||||||
|
client := &Client{}
|
||||||
|
var err error
|
||||||
|
if testCase.info.endPoint != "" {
|
||||||
|
|
||||||
|
client, err = New(testCase.info.endPoint, testCase.info.accessKey, testCase.info.secretKey, testCase.info.enableInsecure)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d: Failed to create new Client: %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
actualReq, err := client.makeBucketRequest(testCase.bucketName, testCase.location)
|
||||||
|
if err != nil && testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
if err == nil && !testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||||
|
}
|
||||||
|
// Failed as expected, but does it fail for the expected reason.
|
||||||
|
if err != nil && !testCase.shouldPass {
|
||||||
|
if err.Error() != testCase.err.Error() {
|
||||||
|
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test passes as expected, but the output values are verified for correctness here.
|
||||||
|
if err == nil && testCase.shouldPass {
|
||||||
|
expectedReq := &http.Request{}
|
||||||
|
expectedReq, err = createExpectedRequest(client, testCase.bucketName, testCase.location, expectedReq)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d: Expected request Creation failed", i+1)
|
||||||
|
}
|
||||||
|
if expectedReq.Method != actualReq.Method {
|
||||||
|
t.Errorf("Test %d: The expected Request method doesn't match with the actual one", i+1)
|
||||||
|
}
|
||||||
|
if expectedReq.URL.String() != actualReq.URL.String() {
|
||||||
|
t.Errorf("Test %d: Expected the request URL to be '%s', but instead found '%s'", i+1, expectedReq.URL.String(), actualReq.URL.String())
|
||||||
|
}
|
||||||
|
if expectedReq.ContentLength != actualReq.ContentLength {
|
||||||
|
t.Errorf("Test %d: Expected the request body Content-Length to be '%d', but found '%d' instead", i+1, expectedReq.ContentLength, actualReq.ContentLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
if expectedReq.Header.Get("X-Amz-Content-Sha256") != actualReq.Header.Get("X-Amz-Content-Sha256") {
|
||||||
|
t.Errorf("Test %d: 'X-Amz-Content-Sha256' header of the expected request doesn't match with that of the actual request", i+1)
|
||||||
|
}
|
||||||
|
if expectedReq.Header.Get("User-Agent") != actualReq.Header.Get("User-Agent") {
|
||||||
|
t.Errorf("Test %d: Expected 'User-Agent' header to be \"%s\",but found \"%s\" instead", i+1, expectedReq.Header.Get("User-Agent"), actualReq.Header.Get("User-Agent"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if testCase.location != "us-east-1" && testCase.location != "" {
|
||||||
|
expectedContent, err := getReqBody(expectedReq.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d: Coudln't parse request body", i+1)
|
||||||
|
}
|
||||||
|
actualContent, err := getReqBody(actualReq.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d: Coudln't parse request body", i+1)
|
||||||
|
}
|
||||||
|
if expectedContent != actualContent {
|
||||||
|
t.Errorf("Test %d: Expected request body doesn't match actual content body", i+1)
|
||||||
|
}
|
||||||
|
if expectedReq.Header.Get("Content-Md5") != actualReq.Header.Get("Content-Md5") {
|
||||||
|
t.Errorf("Test %d: Request body Md5 differs from the expected result", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -19,6 +19,7 @@ package minio
|
|||||||
import (
|
import (
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
@ -55,7 +56,7 @@ func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// if md5sum mismatches should upload the part.
|
// if md5sum mismatches should upload the part.
|
||||||
if objPart.ETag == uploadedPart.ETag {
|
if objPart.ETag != uploadedPart.ETag {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
@ -94,62 +95,13 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las
|
|||||||
return totalPartsCount, partSize, lastPartSize, nil
|
return totalPartsCount, partSize, lastPartSize, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compatibility code for Golang < 1.5.x.
|
// hashCopyBuffer is identical to hashCopyN except that it doesn't take
|
||||||
// copyBuffer is identical to io.CopyBuffer, since such a function is
|
// any size argument but takes a buffer argument and reader should be
|
||||||
// not available/implemented in Golang version < 1.5.x, we use a
|
// of io.ReaderAt interface.
|
||||||
// custom call exactly implementng io.CopyBuffer from Golang > 1.5.x
|
|
||||||
// version does.
|
|
||||||
//
|
//
|
||||||
// copyBuffer stages through the provided buffer (if one is required)
|
// Stages reads from offsets into the buffer, if buffer is nil it is
|
||||||
// rather than allocating a temporary one. If buf is nil, one is
|
// initialized to optimalBufferSize.
|
||||||
// allocated; otherwise if it has zero length, copyBuffer panics.
|
func (c Client) hashCopyBuffer(writer io.Writer, reader io.ReaderAt, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) {
|
||||||
//
|
|
||||||
// FIXME: Remove this code when distributions move to newer Golang versions.
|
|
||||||
func copyBuffer(writer io.Writer, reader io.Reader, buf []byte) (written int64, err error) {
|
|
||||||
// If the reader has a WriteTo method, use it to do the copy.
|
|
||||||
// Avoids an allocation and a copy.
|
|
||||||
if wt, ok := reader.(io.WriterTo); ok {
|
|
||||||
return wt.WriteTo(writer)
|
|
||||||
}
|
|
||||||
// Similarly, if the writer has a ReadFrom method, use it to do
|
|
||||||
// the copy.
|
|
||||||
if rt, ok := writer.(io.ReaderFrom); ok {
|
|
||||||
return rt.ReadFrom(reader)
|
|
||||||
}
|
|
||||||
if buf == nil {
|
|
||||||
buf = make([]byte, 32*1024)
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
nr, er := reader.Read(buf)
|
|
||||||
if nr > 0 {
|
|
||||||
nw, ew := writer.Write(buf[0:nr])
|
|
||||||
if nw > 0 {
|
|
||||||
written += int64(nw)
|
|
||||||
}
|
|
||||||
if ew != nil {
|
|
||||||
err = ew
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if nr != nw {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if er == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if er != nil {
|
|
||||||
err = er
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// hashCopyBuffer is identical to hashCopyN except that it stages
|
|
||||||
// through the provided buffer (if one is required) rather than
|
|
||||||
// allocating a temporary one. If buf is nil, one is allocated for 5MiB.
|
|
||||||
func (c Client) hashCopyBuffer(writer io.Writer, reader io.Reader, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) {
|
|
||||||
// MD5 and SHA256 hasher.
|
// MD5 and SHA256 hasher.
|
||||||
var hashMD5, hashSHA256 hash.Hash
|
var hashMD5, hashSHA256 hash.Hash
|
||||||
// MD5 and SHA256 hasher.
|
// MD5 and SHA256 hasher.
|
||||||
@ -160,14 +112,61 @@ func (c Client) hashCopyBuffer(writer io.Writer, reader io.Reader, buf []byte) (
|
|||||||
hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
|
hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate buf if not initialized.
|
// Buffer is nil, initialize.
|
||||||
if buf == nil {
|
if buf == nil {
|
||||||
buf = make([]byte, optimalReadBufferSize)
|
buf = make([]byte, optimalReadBufferSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Offset to start reading from.
|
||||||
|
var readAtOffset int64
|
||||||
|
|
||||||
|
// Following block reads data at an offset from the input
|
||||||
|
// reader and copies data to into local temporary file.
|
||||||
|
for {
|
||||||
|
readAtSize, rerr := reader.ReadAt(buf, readAtOffset)
|
||||||
|
if rerr != nil {
|
||||||
|
if rerr != io.EOF {
|
||||||
|
return nil, nil, 0, rerr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
writeSize, werr := hashWriter.Write(buf[:readAtSize])
|
||||||
|
if werr != nil {
|
||||||
|
return nil, nil, 0, werr
|
||||||
|
}
|
||||||
|
if readAtSize != writeSize {
|
||||||
|
return nil, nil, 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue)
|
||||||
|
}
|
||||||
|
readAtOffset += int64(writeSize)
|
||||||
|
size += int64(writeSize)
|
||||||
|
if rerr == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finalize md5 sum and sha256 sum.
|
||||||
|
md5Sum = hashMD5.Sum(nil)
|
||||||
|
if c.signature.isV4() {
|
||||||
|
sha256Sum = hashSHA256.Sum(nil)
|
||||||
|
}
|
||||||
|
return md5Sum, sha256Sum, size, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// hashCopy is identical to hashCopyN except that it doesn't take
|
||||||
|
// any size argument.
|
||||||
|
func (c Client) hashCopy(writer io.Writer, reader io.Reader) (md5Sum, sha256Sum []byte, size int64, err error) {
|
||||||
|
// MD5 and SHA256 hasher.
|
||||||
|
var hashMD5, hashSHA256 hash.Hash
|
||||||
|
// MD5 and SHA256 hasher.
|
||||||
|
hashMD5 = md5.New()
|
||||||
|
hashWriter := io.MultiWriter(writer, hashMD5)
|
||||||
|
if c.signature.isV4() {
|
||||||
|
hashSHA256 = sha256.New()
|
||||||
|
hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
|
||||||
|
}
|
||||||
|
|
||||||
// Using copyBuffer to copy in large buffers, default buffer
|
// Using copyBuffer to copy in large buffers, default buffer
|
||||||
// for io.Copy of 32KiB is too small.
|
// for io.Copy of 32KiB is too small.
|
||||||
size, err = copyBuffer(hashWriter, reader, buf)
|
size, err = io.Copy(hashWriter, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, 0, err
|
return nil, nil, 0, err
|
||||||
}
|
}
|
||||||
@ -244,12 +243,8 @@ func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadI
|
|||||||
return uploadID, isNew, nil
|
return uploadID, isNew, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// computeHashBuffer - Calculates MD5 and SHA256 for an input read
|
// computeHash - Calculates MD5 and SHA256 for an input read Seeker.
|
||||||
// Seeker is identical to computeHash except that it stages
|
func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) {
|
||||||
// through the provided buffer (if one is required) rather than
|
|
||||||
// allocating a temporary one. If buf is nil, it uses a temporary
|
|
||||||
// buffer.
|
|
||||||
func (c Client) computeHashBuffer(reader io.ReadSeeker, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) {
|
|
||||||
// MD5 and SHA256 hasher.
|
// MD5 and SHA256 hasher.
|
||||||
var hashMD5, hashSHA256 hash.Hash
|
var hashMD5, hashSHA256 hash.Hash
|
||||||
// MD5 and SHA256 hasher.
|
// MD5 and SHA256 hasher.
|
||||||
@ -261,17 +256,10 @@ func (c Client) computeHashBuffer(reader io.ReadSeeker, buf []byte) (md5Sum, sha
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If no buffer is provided, no need to allocate just use io.Copy.
|
// If no buffer is provided, no need to allocate just use io.Copy.
|
||||||
if buf == nil {
|
|
||||||
size, err = io.Copy(hashWriter, reader)
|
size, err = io.Copy(hashWriter, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, 0, err
|
return nil, nil, 0, err
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
size, err = copyBuffer(hashWriter, reader, buf)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek back reader to the beginning location.
|
// Seek back reader to the beginning location.
|
||||||
if _, err := reader.Seek(0, 0); err != nil {
|
if _, err := reader.Seek(0, 0); err != nil {
|
||||||
@ -285,8 +273,3 @@ func (c Client) computeHashBuffer(reader io.ReadSeeker, buf []byte) (md5Sum, sha
|
|||||||
}
|
}
|
||||||
return md5Sum, sha256Sum, size, nil
|
return md5Sum, sha256Sum, size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// computeHash - Calculates MD5 and SHA256 for an input read Seeker.
|
|
||||||
func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) {
|
|
||||||
return c.computeHashBuffer(reader, nil)
|
|
||||||
}
|
|
||||||
|
68
vendor/src/github.com/minio/minio-go/api-put-object-copy.go
vendored
Normal file
68
vendor/src/github.com/minio/minio-go/api-put-object-copy.go
vendored
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
// CopyObject - copy a source object into a new object with the provided name in the provided bucket
|
||||||
|
func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
|
||||||
|
// Input validation.
|
||||||
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := isValidObjectName(objectName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if objectSource == "" {
|
||||||
|
return ErrInvalidArgument("Object source cannot be empty.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// customHeaders apply headers.
|
||||||
|
customHeaders := make(http.Header)
|
||||||
|
for _, cond := range cpCond.conditions {
|
||||||
|
customHeaders.Set(cond.key, cond.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set copy source.
|
||||||
|
customHeaders.Set("x-amz-copy-source", objectSource)
|
||||||
|
|
||||||
|
// Execute PUT on objectName.
|
||||||
|
resp, err := c.executeMethod("PUT", requestMetadata{
|
||||||
|
bucketName: bucketName,
|
||||||
|
objectName: objectName,
|
||||||
|
customHeader: customHeaders,
|
||||||
|
})
|
||||||
|
defer closeResponse(resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp != nil {
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return httpRespToErrorResponse(resp, bucketName, objectName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode copy response on success.
|
||||||
|
cpObjRes := copyObjectResult{}
|
||||||
|
err = xmlDecoder(resp.Body, &cpObjRes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return nil on success.
|
||||||
|
return nil
|
||||||
|
}
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -21,7 +21,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"mime"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -57,6 +59,14 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
|
|||||||
return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
|
return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set contentType based on filepath extension if not given or default
|
||||||
|
// value of "binary/octet-stream" if the extension has no associated type.
|
||||||
|
if contentType == "" {
|
||||||
|
if contentType = mime.TypeByExtension(filepath.Ext(filePath)); contentType == "" {
|
||||||
|
contentType = "application/octet-stream"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
|
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
|
||||||
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
|
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
|
||||||
if isGoogleEndpoint(c.endpointURL) {
|
if isGoogleEndpoint(c.endpointURL) {
|
||||||
@ -187,7 +197,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
|
|||||||
}, partsInfo) {
|
}, partsInfo) {
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
var objPart objectPart
|
var objPart objectPart
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader), partNumber,
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber,
|
||||||
md5Sum, sha256Sum, prtSize)
|
md5Sum, sha256Sum, prtSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return totalUploadedSize, err
|
return totalUploadedSize, err
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -134,8 +134,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
|||||||
}, partsInfo) {
|
}, partsInfo) {
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
var objPart objectPart
|
var objPart objectPart
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader), partNumber,
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, md5Sum, sha256Sum, prtSize)
|
||||||
md5Sum, sha256Sum, prtSize)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Reset the temporary buffer upon any error.
|
// Reset the temporary buffer upon any error.
|
||||||
tmpBuffer.Reset()
|
tmpBuffer.Reset()
|
||||||
@ -230,14 +229,8 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri
|
|||||||
customHeader: customHeader,
|
customHeader: customHeader,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate the request.
|
// Execute POST on an objectName to initiate multipart upload.
|
||||||
req, err := c.newRequest("POST", reqMetadata)
|
resp, err := c.executeMethod("POST", reqMetadata)
|
||||||
if err != nil {
|
|
||||||
return initiateMultipartUploadResult{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return initiateMultipartUploadResult{}, err
|
return initiateMultipartUploadResult{}, err
|
||||||
@ -257,7 +250,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// uploadPart - Uploads a part in a multipart upload.
|
// uploadPart - Uploads a part in a multipart upload.
|
||||||
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.ReadCloser, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
|
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return objectPart{}, err
|
return objectPart{}, err
|
||||||
@ -295,13 +288,8 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
|
|||||||
contentSHA256Bytes: sha256Sum,
|
contentSHA256Bytes: sha256Sum,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate a request.
|
// Execute PUT on each part.
|
||||||
req, err := c.newRequest("PUT", reqMetadata)
|
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||||
if err != nil {
|
|
||||||
return objectPart{}, err
|
|
||||||
}
|
|
||||||
// Execute the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objectPart{}, err
|
return objectPart{}, err
|
||||||
@ -342,24 +330,18 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate all the complete multipart buffer.
|
// Instantiate all the complete multipart buffer.
|
||||||
completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes)
|
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
|
||||||
reqMetadata := requestMetadata{
|
reqMetadata := requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
contentBody: ioutil.NopCloser(completeMultipartUploadBuffer),
|
contentBody: completeMultipartUploadBuffer,
|
||||||
contentLength: int64(completeMultipartUploadBuffer.Len()),
|
contentLength: int64(len(completeMultipartUploadBytes)),
|
||||||
contentSHA256Bytes: sum256(completeMultipartUploadBuffer.Bytes()),
|
contentSHA256Bytes: sum256(completeMultipartUploadBytes),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate the request.
|
// Execute POST to complete multipart upload for an objectName.
|
||||||
req, err := c.newRequest("POST", reqMetadata)
|
resp, err := c.executeMethod("POST", reqMetadata)
|
||||||
if err != nil {
|
|
||||||
return completeMultipartUploadResult{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return completeMultipartUploadResult{}, err
|
return completeMultipartUploadResult{}, err
|
||||||
|
@ -91,7 +91,7 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
|
|||||||
errResp := ToErrorResponse(err)
|
errResp := ToErrorResponse(err)
|
||||||
// Verify if multipart functionality is not available, if not
|
// Verify if multipart functionality is not available, if not
|
||||||
// fall back to single PutObject operation.
|
// fall back to single PutObject operation.
|
||||||
if errResp.Code == "NotImplemented" {
|
if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied." {
|
||||||
// Verify if size of reader is greater than '5GiB'.
|
// Verify if size of reader is greater than '5GiB'.
|
||||||
if size > maxSinglePutObjectSize {
|
if size > maxSinglePutObjectSize {
|
||||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -97,7 +97,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
|||||||
tmpBuffer := new(bytes.Buffer)
|
tmpBuffer := new(bytes.Buffer)
|
||||||
|
|
||||||
// Read defaults to reading at 5MiB buffer.
|
// Read defaults to reading at 5MiB buffer.
|
||||||
readBuffer := make([]byte, optimalReadBufferSize)
|
readAtBuffer := make([]byte, optimalReadBufferSize)
|
||||||
|
|
||||||
// Upload all the missing parts.
|
// Upload all the missing parts.
|
||||||
for partNumber <= lastPartNumber {
|
for partNumber <= lastPartNumber {
|
||||||
@ -147,7 +147,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
|||||||
// Calculates MD5 and SHA256 sum for a section reader.
|
// Calculates MD5 and SHA256 sum for a section reader.
|
||||||
var md5Sum, sha256Sum []byte
|
var md5Sum, sha256Sum []byte
|
||||||
var prtSize int64
|
var prtSize int64
|
||||||
md5Sum, sha256Sum, prtSize, err = c.hashCopyBuffer(tmpBuffer, sectionReader, readBuffer)
|
md5Sum, sha256Sum, prtSize, err = c.hashCopyBuffer(tmpBuffer, sectionReader, readAtBuffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -159,8 +159,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
|||||||
|
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
var objPart objectPart
|
var objPart objectPart
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader),
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, md5Sum, sha256Sum, prtSize)
|
||||||
partNumber, md5Sum, sha256Sum, prtSize)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Reset the buffer upon any error.
|
// Reset the buffer upon any error.
|
||||||
tmpBuffer.Reset()
|
tmpBuffer.Reset()
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -38,7 +38,7 @@ func getReaderSize(reader io.Reader) (size int64, err error) {
|
|||||||
if lenFn.Kind() == reflect.Func {
|
if lenFn.Kind() == reflect.Func {
|
||||||
// Call the 'Size' function and save its return value.
|
// Call the 'Size' function and save its return value.
|
||||||
result = lenFn.Call([]reflect.Value{})
|
result = lenFn.Call([]reflect.Value{})
|
||||||
if result != nil && len(result) == 1 {
|
if len(result) == 1 {
|
||||||
lenValue := result[0]
|
lenValue := result[0]
|
||||||
if lenValue.IsValid() {
|
if lenValue.IsValid() {
|
||||||
switch lenValue.Kind() {
|
switch lenValue.Kind() {
|
||||||
@ -146,11 +146,11 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
|
|||||||
|
|
||||||
// Update progress reader appropriately to the latest offset as we
|
// Update progress reader appropriately to the latest offset as we
|
||||||
// read from the source.
|
// read from the source.
|
||||||
reader = newHook(reader, progress)
|
readSeeker := newHook(reader, progress)
|
||||||
|
|
||||||
// This function does not calculate sha256 and md5sum for payload.
|
// This function does not calculate sha256 and md5sum for payload.
|
||||||
// Execute put object.
|
// Execute put object.
|
||||||
st, err := c.putObjectDo(bucketName, objectName, ioutil.NopCloser(reader), nil, nil, size, contentType)
|
st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, contentType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -178,12 +178,12 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
|
|||||||
size = maxSinglePutObjectSize
|
size = maxSinglePutObjectSize
|
||||||
}
|
}
|
||||||
var md5Sum, sha256Sum []byte
|
var md5Sum, sha256Sum []byte
|
||||||
var readCloser io.ReadCloser
|
|
||||||
if size <= minPartSize {
|
if size <= minPartSize {
|
||||||
// Initialize a new temporary buffer.
|
// Initialize a new temporary buffer.
|
||||||
tmpBuffer := new(bytes.Buffer)
|
tmpBuffer := new(bytes.Buffer)
|
||||||
md5Sum, sha256Sum, size, err = c.hashCopyN(tmpBuffer, reader, size)
|
md5Sum, sha256Sum, size, err = c.hashCopyN(tmpBuffer, reader, size)
|
||||||
readCloser = ioutil.NopCloser(tmpBuffer)
|
reader = bytes.NewReader(tmpBuffer.Bytes())
|
||||||
|
tmpBuffer.Reset()
|
||||||
} else {
|
} else {
|
||||||
// Initialize a new temporary file.
|
// Initialize a new temporary file.
|
||||||
var tmpFile *tempFile
|
var tmpFile *tempFile
|
||||||
@ -191,12 +191,13 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
defer tmpFile.Close()
|
||||||
md5Sum, sha256Sum, size, err = c.hashCopyN(tmpFile, reader, size)
|
md5Sum, sha256Sum, size, err = c.hashCopyN(tmpFile, reader, size)
|
||||||
// Seek back to beginning of the temporary file.
|
// Seek back to beginning of the temporary file.
|
||||||
if _, err = tmpFile.Seek(0, 0); err != nil {
|
if _, err = tmpFile.Seek(0, 0); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
readCloser = tmpFile
|
reader = tmpFile
|
||||||
}
|
}
|
||||||
// Return error if its not io.EOF.
|
// Return error if its not io.EOF.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -204,26 +205,26 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Progress the reader to the size.
|
|
||||||
if progress != nil {
|
|
||||||
if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil {
|
|
||||||
return size, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Execute put object.
|
// Execute put object.
|
||||||
st, err := c.putObjectDo(bucketName, objectName, readCloser, md5Sum, sha256Sum, size, contentType)
|
st, err := c.putObjectDo(bucketName, objectName, reader, md5Sum, sha256Sum, size, contentType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if st.Size != size {
|
if st.Size != size {
|
||||||
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
|
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
|
||||||
}
|
}
|
||||||
|
// Progress the reader to the size if putObjectDo is successful.
|
||||||
|
if progress != nil {
|
||||||
|
if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil {
|
||||||
|
return size, err
|
||||||
|
}
|
||||||
|
}
|
||||||
return size, nil
|
return size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// putObjectDo - executes the put object http operation.
|
// putObjectDo - executes the put object http operation.
|
||||||
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
|
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
|
||||||
func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
|
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
@ -258,13 +259,9 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser,
|
|||||||
contentMD5Bytes: md5Sum,
|
contentMD5Bytes: md5Sum,
|
||||||
contentSHA256Bytes: sha256Sum,
|
contentSHA256Bytes: sha256Sum,
|
||||||
}
|
}
|
||||||
// Initiate new request.
|
|
||||||
req, err := c.newRequest("PUT", reqMetadata)
|
// Execute PUT an objectName.
|
||||||
if err != nil {
|
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||||
return ObjectInfo{}, err
|
|
||||||
}
|
|
||||||
// Execute the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -30,15 +30,10 @@ func (c Client) RemoveBucket(bucketName string) error {
|
|||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Instantiate a new request.
|
// Execute DELETE on bucket.
|
||||||
req, err := c.newRequest("DELETE", requestMetadata{
|
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -55,6 +50,54 @@ func (c Client) RemoveBucket(bucketName string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RemoveBucketPolicy remove a bucket policy on given path.
|
||||||
|
func (c Client) RemoveBucketPolicy(bucketName, objectPrefix string) error {
|
||||||
|
// Input validation.
|
||||||
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
policy, err := c.getBucketPolicy(bucketName, objectPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// No bucket policy found, nothing to remove return success.
|
||||||
|
if policy.Statements == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save new statements after removing requested bucket policy.
|
||||||
|
policy.Statements = removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
|
||||||
|
|
||||||
|
// Commit the update policy.
|
||||||
|
return c.putBucketPolicy(bucketName, policy)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removes all policies on a bucket.
|
||||||
|
func (c Client) removeBucketPolicy(bucketName string) error {
|
||||||
|
// Input validation.
|
||||||
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Get resources properly escaped and lined up before
|
||||||
|
// using them in http request.
|
||||||
|
urlValues := make(url.Values)
|
||||||
|
urlValues.Set("policy", "")
|
||||||
|
|
||||||
|
// Execute DELETE on objectName.
|
||||||
|
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||||
|
bucketName: bucketName,
|
||||||
|
queryValues: urlValues,
|
||||||
|
})
|
||||||
|
defer closeResponse(resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// RemoveObject remove an object from a bucket.
|
// RemoveObject remove an object from a bucket.
|
||||||
func (c Client) RemoveObject(bucketName, objectName string) error {
|
func (c Client) RemoveObject(bucketName, objectName string) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
@ -64,16 +107,11 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
|
|||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := isValidObjectName(objectName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Instantiate the request.
|
// Execute DELETE on objectName.
|
||||||
req, err := c.newRequest("DELETE", requestMetadata{
|
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -124,18 +162,12 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er
|
|||||||
urlValues := make(url.Values)
|
urlValues := make(url.Values)
|
||||||
urlValues.Set("uploadId", uploadID)
|
urlValues.Set("uploadId", uploadID)
|
||||||
|
|
||||||
// Instantiate a new DELETE request.
|
// Execute DELETE on multipart upload.
|
||||||
req, err := c.newRequest("DELETE", requestMetadata{
|
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -155,7 +187,7 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er
|
|||||||
Key: objectName,
|
Key: objectName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return httpRespToErrorResponse(resp, bucketName, objectName)
|
return httpRespToErrorResponse(resp, bucketName, objectName)
|
||||||
|
@ -96,6 +96,12 @@ type initiator struct {
|
|||||||
DisplayName string
|
DisplayName string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// copyObjectResult container for copy object response.
|
||||||
|
type copyObjectResult struct {
|
||||||
|
ETag string
|
||||||
|
LastModified string // time string format "2006-01-02T15:04:05.000Z"
|
||||||
|
}
|
||||||
|
|
||||||
// objectPart container for particular part of an object.
|
// objectPart container for particular part of an object.
|
||||||
type objectPart struct {
|
type objectPart struct {
|
||||||
// Part number identifies the part.
|
// Part number identifies the part.
|
||||||
@ -171,27 +177,3 @@ type createBucketConfiguration struct {
|
|||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
|
||||||
Location string `xml:"LocationConstraint"`
|
Location string `xml:"LocationConstraint"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// grant container for the grantee and his or her permissions.
|
|
||||||
type grant struct {
|
|
||||||
// grantee container for DisplayName and ID of the person being
|
|
||||||
// granted permissions.
|
|
||||||
Grantee struct {
|
|
||||||
ID string
|
|
||||||
DisplayName string
|
|
||||||
EmailAddress string
|
|
||||||
Type string
|
|
||||||
URI string
|
|
||||||
}
|
|
||||||
Permission string
|
|
||||||
}
|
|
||||||
|
|
||||||
// accessControlPolicy contains the elements providing ACL permissions
|
|
||||||
// for a bucket.
|
|
||||||
type accessControlPolicy struct {
|
|
||||||
// accessControlList container for ACL information.
|
|
||||||
AccessControlList struct {
|
|
||||||
Grant []grant
|
|
||||||
}
|
|
||||||
Owner owner
|
|
||||||
}
|
|
26
vendor/src/github.com/minio/minio-go/api-stat.go
vendored
26
vendor/src/github.com/minio/minio-go/api-stat.go
vendored
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -29,15 +29,11 @@ func (c Client) BucketExists(bucketName string) error {
|
|||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Instantiate a new request.
|
|
||||||
req, err := c.newRequest("HEAD", requestMetadata{
|
// Execute HEAD on bucketName.
|
||||||
|
resp, err := c.executeMethod("HEAD", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -59,16 +55,12 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
|||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := isValidObjectName(objectName); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
// Instantiate a new request.
|
|
||||||
req, err := c.newRequest("HEAD", requestMetadata{
|
// Execute HEAD on objectName.
|
||||||
|
resp, err := c.executeMethod("HEAD", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return ObjectInfo{}, err
|
|
||||||
}
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
@ -93,7 +85,7 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
|||||||
Key: objectName,
|
Key: objectName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Parse Last-Modified has http time format.
|
// Parse Last-Modified has http time format.
|
||||||
@ -106,7 +98,7 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
|||||||
Key: objectName,
|
Key: objectName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Fetch content type if any present.
|
// Fetch content type if any present.
|
||||||
|
261
vendor/src/github.com/minio/minio-go/api.go
vendored
261
vendor/src/github.com/minio/minio-go/api.go
vendored
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -22,6 +22,8 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
"net/url"
|
"net/url"
|
||||||
@ -29,6 +31,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -56,15 +59,18 @@ type Client struct {
|
|||||||
httpClient *http.Client
|
httpClient *http.Client
|
||||||
bucketLocCache *bucketLocationCache
|
bucketLocCache *bucketLocationCache
|
||||||
|
|
||||||
// Advanced functionality
|
// Advanced functionality.
|
||||||
isTraceEnabled bool
|
isTraceEnabled bool
|
||||||
traceOutput io.Writer
|
traceOutput io.Writer
|
||||||
|
|
||||||
|
// Random seed.
|
||||||
|
random *rand.Rand
|
||||||
}
|
}
|
||||||
|
|
||||||
// Global constants.
|
// Global constants.
|
||||||
const (
|
const (
|
||||||
libraryName = "minio-go"
|
libraryName = "minio-go"
|
||||||
libraryVersion = "0.2.5"
|
libraryVersion = "1.0.1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User Agent should always following the below style.
|
// User Agent should always following the below style.
|
||||||
@ -78,7 +84,7 @@ const (
|
|||||||
|
|
||||||
// NewV2 - instantiate minio client with Amazon S3 signature version
|
// NewV2 - instantiate minio client with Amazon S3 signature version
|
||||||
// '2' compatibility.
|
// '2' compatibility.
|
||||||
func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
|
func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
|
||||||
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
|
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -90,7 +96,7 @@ func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool)
|
|||||||
|
|
||||||
// NewV4 - instantiate minio client with Amazon S3 signature version
|
// NewV4 - instantiate minio client with Amazon S3 signature version
|
||||||
// '4' compatibility.
|
// '4' compatibility.
|
||||||
func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
|
func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
|
||||||
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
|
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -102,7 +108,7 @@ func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool)
|
|||||||
|
|
||||||
// New - instantiate minio client Client, adds automatic verification
|
// New - instantiate minio client Client, adds automatic verification
|
||||||
// of signature.
|
// of signature.
|
||||||
func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
|
func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
|
||||||
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
|
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -112,13 +118,36 @@ func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (C
|
|||||||
if isGoogleEndpoint(clnt.endpointURL) {
|
if isGoogleEndpoint(clnt.endpointURL) {
|
||||||
clnt.signature = SignatureV2
|
clnt.signature = SignatureV2
|
||||||
}
|
}
|
||||||
// If Amazon S3 set to signature v2.
|
// If Amazon S3 set to signature v2.n
|
||||||
if isAmazonEndpoint(clnt.endpointURL) {
|
if isAmazonEndpoint(clnt.endpointURL) {
|
||||||
clnt.signature = SignatureV4
|
clnt.signature = SignatureV4
|
||||||
}
|
}
|
||||||
return clnt, nil
|
return clnt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// lockedRandSource provides protected rand source, implements rand.Source interface.
|
||||||
|
type lockedRandSource struct {
|
||||||
|
lk sync.Mutex
|
||||||
|
src rand.Source
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int63 returns a non-negative pseudo-random 63-bit integer as an
|
||||||
|
// int64.
|
||||||
|
func (r *lockedRandSource) Int63() (n int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
n = r.src.Int63()
|
||||||
|
r.lk.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed uses the provided seed value to initialize the generator to a
|
||||||
|
// deterministic state.
|
||||||
|
func (r *lockedRandSource) Seed(seed int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
r.src.Seed(seed)
|
||||||
|
r.lk.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
|
func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
|
||||||
// construct endpoint.
|
// construct endpoint.
|
||||||
endpointURL, err := getEndpointURL(endpoint, insecure)
|
endpointURL, err := getEndpointURL(endpoint, insecure)
|
||||||
@ -138,9 +167,20 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*
|
|||||||
clnt.endpointURL = endpointURL
|
clnt.endpointURL = endpointURL
|
||||||
|
|
||||||
// Instantiate http client and bucket location cache.
|
// Instantiate http client and bucket location cache.
|
||||||
clnt.httpClient = &http.Client{}
|
clnt.httpClient = &http.Client{
|
||||||
|
// Setting a sensible time out of 2minutes to wait for response
|
||||||
|
// headers. Request is pro-actively cancelled after 2minutes
|
||||||
|
// if no response was received from server.
|
||||||
|
Timeout: 2 * time.Minute,
|
||||||
|
Transport: http.DefaultTransport,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instantiae bucket location cache.
|
||||||
clnt.bucketLocCache = newBucketLocationCache()
|
clnt.bucketLocCache = newBucketLocationCache()
|
||||||
|
|
||||||
|
// Introduce a new locked random seed.
|
||||||
|
clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
|
||||||
|
|
||||||
// Return.
|
// Return.
|
||||||
return clnt, nil
|
return clnt, nil
|
||||||
}
|
}
|
||||||
@ -180,6 +220,13 @@ func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetClientTimeout - set http client timeout.
|
||||||
|
func (c *Client) SetClientTimeout(timeout time.Duration) {
|
||||||
|
if c.httpClient != nil {
|
||||||
|
c.httpClient.Timeout = timeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TraceOn - enable HTTP tracing.
|
// TraceOn - enable HTTP tracing.
|
||||||
func (c *Client) TraceOn(outputStream io.Writer) {
|
func (c *Client) TraceOn(outputStream io.Writer) {
|
||||||
// if outputStream is nil then default to os.Stdout.
|
// if outputStream is nil then default to os.Stdout.
|
||||||
@ -214,7 +261,7 @@ type requestMetadata struct {
|
|||||||
|
|
||||||
// Generated by our internal code.
|
// Generated by our internal code.
|
||||||
bucketLocation string
|
bucketLocation string
|
||||||
contentBody io.ReadCloser
|
contentBody io.Reader
|
||||||
contentLength int64
|
contentLength int64
|
||||||
contentSHA256Bytes []byte
|
contentSHA256Bytes []byte
|
||||||
contentMD5Bytes []byte
|
contentMD5Bytes []byte
|
||||||
@ -292,7 +339,7 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
|
|||||||
// to zero. Keep this workaround until the above bug is fixed.
|
// to zero. Keep this workaround until the above bug is fixed.
|
||||||
if resp.ContentLength == 0 {
|
if resp.ContentLength == 0 {
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
if err := resp.Header.Write(&buffer); err != nil {
|
if err = resp.Header.Write(&buffer); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
respTrace = buffer.Bytes()
|
respTrace = buffer.Bytes()
|
||||||
@ -322,11 +369,28 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
|
|||||||
|
|
||||||
// do - execute http request.
|
// do - execute http request.
|
||||||
func (c Client) do(req *http.Request) (*http.Response, error) {
|
func (c Client) do(req *http.Request) (*http.Response, error) {
|
||||||
// execute the request.
|
// do the request.
|
||||||
resp, err := c.httpClient.Do(req)
|
resp, err := c.httpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
// Handle this specifically for now until future Golang
|
||||||
|
// versions fix this issue properly.
|
||||||
|
urlErr, ok := err.(*url.Error)
|
||||||
|
if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
|
||||||
|
return nil, &url.Error{
|
||||||
|
Op: urlErr.Op,
|
||||||
|
URL: urlErr.URL,
|
||||||
|
Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL),
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response cannot be non-nil, report if its the case.
|
||||||
|
if resp == nil {
|
||||||
|
msg := "Response is empty. " + reportIssue
|
||||||
|
return nil, ErrInvalidArgument(msg)
|
||||||
|
}
|
||||||
|
|
||||||
// If trace is enabled, dump http request and response.
|
// If trace is enabled, dump http request and response.
|
||||||
if c.isTraceEnabled {
|
if c.isTraceEnabled {
|
||||||
err = c.dumpHTTP(req, resp)
|
err = c.dumpHTTP(req, resp)
|
||||||
@ -337,6 +401,113 @@ func (c Client) do(req *http.Request) (*http.Response, error) {
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List of success status.
|
||||||
|
var successStatus = []int{
|
||||||
|
http.StatusOK,
|
||||||
|
http.StatusNoContent,
|
||||||
|
http.StatusPartialContent,
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeMethod - instantiates a given method, and retries the
|
||||||
|
// request upon any error up to maxRetries attempts in a binomially
|
||||||
|
// delayed manner using a standard back off algorithm.
|
||||||
|
func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) {
|
||||||
|
var isRetryable bool // Indicates if request can be retried.
|
||||||
|
var bodySeeker io.Seeker // Extracted seeker from io.Reader.
|
||||||
|
if metadata.contentBody != nil {
|
||||||
|
// Check if body is seekable then it is retryable.
|
||||||
|
bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a done channel to control 'ListObjects' go routine.
|
||||||
|
doneCh := make(chan struct{}, 1)
|
||||||
|
|
||||||
|
// Indicate to our routine to exit cleanly upon return.
|
||||||
|
defer close(doneCh)
|
||||||
|
|
||||||
|
// Blank indentifier is kept here on purpose since 'range' without
|
||||||
|
// blank identifiers is only supported since go1.4
|
||||||
|
// https://golang.org/doc/go1.4#forrange.
|
||||||
|
for _ = range c.newRetryTimer(MaxRetry, time.Second, time.Second*30, MaxJitter, doneCh) {
|
||||||
|
// Retry executes the following function body if request has an
|
||||||
|
// error until maxRetries have been exhausted, retry attempts are
|
||||||
|
// performed after waiting for a given period of time in a
|
||||||
|
// binomial fashion.
|
||||||
|
if isRetryable {
|
||||||
|
// Seek back to beginning for each attempt.
|
||||||
|
if _, err = bodySeeker.Seek(0, 0); err != nil {
|
||||||
|
// If seek failed, no need to retry.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instantiate a new request.
|
||||||
|
var req *http.Request
|
||||||
|
req, err = c.newRequest(method, metadata)
|
||||||
|
if err != nil {
|
||||||
|
errResponse := ToErrorResponse(err)
|
||||||
|
if isS3CodeRetryable(errResponse.Code) {
|
||||||
|
continue // Retry.
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initiate the request.
|
||||||
|
res, err = c.do(req)
|
||||||
|
if err != nil {
|
||||||
|
// For supported network errors verify.
|
||||||
|
if isNetErrorRetryable(err) {
|
||||||
|
continue // Retry.
|
||||||
|
}
|
||||||
|
// For other errors, return here no need to retry.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// For any known successful http status, return quickly.
|
||||||
|
for _, httpStatus := range successStatus {
|
||||||
|
if httpStatus == res.StatusCode {
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the body to be saved later.
|
||||||
|
errBodyBytes, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Save the body.
|
||||||
|
errBodySeeker := bytes.NewReader(errBodyBytes)
|
||||||
|
res.Body = ioutil.NopCloser(errBodySeeker)
|
||||||
|
|
||||||
|
// For errors verify if its retryable otherwise fail quickly.
|
||||||
|
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
|
||||||
|
// Bucket region if set in error response, we can retry the
|
||||||
|
// request with the new region.
|
||||||
|
if errResponse.Region != "" {
|
||||||
|
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
|
||||||
|
continue // Retry.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify if error response code is retryable.
|
||||||
|
if isS3CodeRetryable(errResponse.Code) {
|
||||||
|
continue // Retry.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify if http status code is retryable.
|
||||||
|
if isHTTPStatusRetryable(res.StatusCode) {
|
||||||
|
continue // Retry.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the body back again.
|
||||||
|
errBodySeeker.Seek(0, 0) // Seek back to starting point.
|
||||||
|
res.Body = ioutil.NopCloser(errBodySeeker)
|
||||||
|
|
||||||
|
// For all other cases break out of the retry loop.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
// newRequest - instantiate a new HTTP request for a given method.
|
// newRequest - instantiate a new HTTP request for a given method.
|
||||||
func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) {
|
func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) {
|
||||||
// If no method is supplied default to 'POST'.
|
// If no method is supplied default to 'POST'.
|
||||||
@ -344,8 +515,17 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
|||||||
method = "POST"
|
method = "POST"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Default all requests to "us-east-1" or "cn-north-1" (china region)
|
||||||
|
location := "us-east-1"
|
||||||
|
if isAmazonChinaEndpoint(c.endpointURL) {
|
||||||
|
// For china specifically we need to set everything to
|
||||||
|
// cn-north-1 for now, there is no easier way until AWS S3
|
||||||
|
// provides a cleaner compatible API across "us-east-1" and
|
||||||
|
// China region.
|
||||||
|
location = "cn-north-1"
|
||||||
|
}
|
||||||
|
|
||||||
// Gather location only if bucketName is present.
|
// Gather location only if bucketName is present.
|
||||||
location := "us-east-1" // Default all other requests to "us-east-1".
|
|
||||||
if metadata.bucketName != "" {
|
if metadata.bucketName != "" {
|
||||||
location, err = c.getBucketLocation(metadata.bucketName)
|
location, err = c.getBucketLocation(metadata.bucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -385,10 +565,13 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
|||||||
|
|
||||||
// Set content body if available.
|
// Set content body if available.
|
||||||
if metadata.contentBody != nil {
|
if metadata.contentBody != nil {
|
||||||
req.Body = metadata.contentBody
|
req.Body = ioutil.NopCloser(metadata.contentBody)
|
||||||
}
|
}
|
||||||
|
|
||||||
// set UserAgent for the request.
|
// set 'Expect' header for the request.
|
||||||
|
req.Header.Set("Expect", "100-continue")
|
||||||
|
|
||||||
|
// set 'User-Agent' header for the request.
|
||||||
c.setUserAgent(req)
|
c.setUserAgent(req)
|
||||||
|
|
||||||
// Set all headers.
|
// Set all headers.
|
||||||
@ -415,7 +598,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
|||||||
|
|
||||||
// set md5Sum for content protection.
|
// set md5Sum for content protection.
|
||||||
if metadata.contentMD5Bytes != nil {
|
if metadata.contentMD5Bytes != nil {
|
||||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
|
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sign the request for all authenticated requests.
|
// Sign the request for all authenticated requests.
|
||||||
@ -478,55 +661,11 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
|
|||||||
}
|
}
|
||||||
// If there are any query values, add them to the end.
|
// If there are any query values, add them to the end.
|
||||||
if len(queryValues) > 0 {
|
if len(queryValues) > 0 {
|
||||||
urlStr = urlStr + "?" + queryValues.Encode()
|
urlStr = urlStr + "?" + queryEncode(queryValues)
|
||||||
}
|
}
|
||||||
u, err := url.Parse(urlStr)
|
u, err := url.Parse(urlStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return u, nil
|
return u, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CloudStorageClient - Cloud Storage Client interface.
|
|
||||||
type CloudStorageClient interface {
|
|
||||||
// Bucket Read/Write/Stat operations.
|
|
||||||
MakeBucket(bucketName string, cannedACL BucketACL, location string) error
|
|
||||||
BucketExists(bucketName string) error
|
|
||||||
RemoveBucket(bucketName string) error
|
|
||||||
SetBucketACL(bucketName string, cannedACL BucketACL) error
|
|
||||||
GetBucketACL(bucketName string) (BucketACL, error)
|
|
||||||
|
|
||||||
ListBuckets() ([]BucketInfo, error)
|
|
||||||
ListObjects(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo
|
|
||||||
ListIncompleteUploads(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo
|
|
||||||
|
|
||||||
// Object Read/Write/Stat operations.
|
|
||||||
GetObject(bucketName, objectName string) (reader *Object, err error)
|
|
||||||
PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error)
|
|
||||||
StatObject(bucketName, objectName string) (ObjectInfo, error)
|
|
||||||
RemoveObject(bucketName, objectName string) error
|
|
||||||
RemoveIncompleteUpload(bucketName, objectName string) error
|
|
||||||
|
|
||||||
// File to Object API.
|
|
||||||
FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error)
|
|
||||||
FGetObject(bucketName, objectName, filePath string) error
|
|
||||||
|
|
||||||
// PutObjectWithProgress for progress.
|
|
||||||
PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error)
|
|
||||||
|
|
||||||
// Presigned operations.
|
|
||||||
PresignedGetObject(bucketName, objectName string, expires time.Duration) (presignedURL string, err error)
|
|
||||||
PresignedPutObject(bucketName, objectName string, expires time.Duration) (presignedURL string, err error)
|
|
||||||
PresignedPostPolicy(*PostPolicy) (formData map[string]string, err error)
|
|
||||||
|
|
||||||
// Application info.
|
|
||||||
SetAppInfo(appName, appVersion string)
|
|
||||||
|
|
||||||
// Set custom transport.
|
|
||||||
SetCustomTransport(customTransport http.RoundTripper)
|
|
||||||
|
|
||||||
// HTTP tracing methods.
|
|
||||||
TraceOn(traceOutput io.Writer)
|
|
||||||
TraceOff()
|
|
||||||
}
|
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -61,10 +62,10 @@ func TestMakeBucketErrorV2(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket in 'eu-west-1'.
|
// Make a new bucket in 'eu-west-1'.
|
||||||
if err = c.MakeBucket(bucketName, "private", "eu-west-1"); err != nil {
|
if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
if err = c.MakeBucket(bucketName, "private", "eu-west-1"); err == nil {
|
if err = c.MakeBucket(bucketName, "eu-west-1"); err == nil {
|
||||||
t.Fatal("Error: make bucket should should fail for", bucketName)
|
t.Fatal("Error: make bucket should should fail for", bucketName)
|
||||||
}
|
}
|
||||||
// Verify valid error response from server.
|
// Verify valid error response from server.
|
||||||
@ -107,7 +108,7 @@ func TestGetObjectClosedTwiceV2(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -192,7 +193,7 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// make a new bucket.
|
// make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -229,7 +230,7 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests resumable put object cloud to cloud.
|
// Tests resumable put object cloud to cloud.
|
||||||
func TestResumbalePutObjectV2(t *testing.T) {
|
func TestResumablePutObjectV2(t *testing.T) {
|
||||||
// By passing 'go test -short' skips these tests.
|
// By passing 'go test -short' skips these tests.
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping functional tests for the short runs")
|
t.Skip("skipping functional tests for the short runs")
|
||||||
@ -259,7 +260,7 @@ func TestResumbalePutObjectV2(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -340,6 +341,154 @@ func TestResumbalePutObjectV2(t *testing.T) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests FPutObject hidden contentType setting
|
||||||
|
func TestFPutObjectV2(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping functional tests for short runs")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed random based on current time.
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
// Instantiate new minio client object.
|
||||||
|
c, err := minio.NewV2(
|
||||||
|
"s3.amazonaws.com",
|
||||||
|
os.Getenv("ACCESS_KEY"),
|
||||||
|
os.Getenv("SECRET_KEY"),
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable tracing, write to stderr.
|
||||||
|
// c.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Set user agent.
|
||||||
|
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
||||||
|
|
||||||
|
// Generate a new random bucket name.
|
||||||
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
|
// Make a new bucket.
|
||||||
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a temp file with 11*1024*1024 bytes of data.
|
||||||
|
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if n != int64(11*1024*1024) {
|
||||||
|
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the file pro-actively for windows.
|
||||||
|
err = file.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set base object name
|
||||||
|
objectName := bucketName + "FPutObject"
|
||||||
|
|
||||||
|
// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
|
||||||
|
n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if n != int64(11*1024*1024) {
|
||||||
|
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform FPutObject with no contentType provided (Expecting application/octet-stream)
|
||||||
|
n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if n != int64(11*1024*1024) {
|
||||||
|
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add extension to temp file name
|
||||||
|
fileName := file.Name()
|
||||||
|
err = os.Rename(file.Name(), fileName+".gtar")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform FPutObject with no contentType provided (Expecting application/x-gtar)
|
||||||
|
n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if n != int64(11*1024*1024) {
|
||||||
|
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check headers
|
||||||
|
rStandard, err := c.StatObject(bucketName, objectName+"-standard")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName, objectName+"-standard")
|
||||||
|
}
|
||||||
|
if rStandard.ContentType != "application/octet-stream" {
|
||||||
|
t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
|
||||||
|
"application/octet-stream", rStandard.ContentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName, objectName+"-Octet")
|
||||||
|
}
|
||||||
|
if rOctet.ContentType != "application/octet-stream" {
|
||||||
|
t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
|
||||||
|
"application/octet-stream", rStandard.ContentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName, objectName+"-GTar")
|
||||||
|
}
|
||||||
|
if rGTar.ContentType != "application/x-gtar" {
|
||||||
|
t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
|
||||||
|
"application/x-gtar", rStandard.ContentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all objects and bucket and temp file
|
||||||
|
err = c.RemoveObject(bucketName, objectName+"-standard")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveObject(bucketName, objectName+"-Octet")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveObject(bucketName, objectName+"-GTar")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveBucket(bucketName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.Remove(fileName + ".gtar")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// Tests resumable file based put object multipart upload.
|
// Tests resumable file based put object multipart upload.
|
||||||
func TestResumableFPutObjectV2(t *testing.T) {
|
func TestResumableFPutObjectV2(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
@ -370,7 +519,7 @@ func TestResumableFPutObjectV2(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// make a new bucket.
|
// make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -447,7 +596,7 @@ func TestMakeBucketRegionsV2(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket in 'eu-central-1'.
|
// Make a new bucket in 'eu-central-1'.
|
||||||
if err = c.MakeBucket(bucketName, "private", "eu-west-1"); err != nil {
|
if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -458,7 +607,7 @@ func TestMakeBucketRegionsV2(t *testing.T) {
|
|||||||
// Make a new bucket with '.' in its name, in 'us-west-2'. This
|
// Make a new bucket with '.' in its name, in 'us-west-2'. This
|
||||||
// request is internally staged into a path style instead of
|
// request is internally staged into a path style instead of
|
||||||
// virtual host style.
|
// virtual host style.
|
||||||
if err = c.MakeBucket(bucketName+".withperiod", "private", "us-west-2"); err != nil {
|
if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
|
||||||
t.Fatal("Error:", err, bucketName+".withperiod")
|
t.Fatal("Error:", err, bucketName+".withperiod")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -468,70 +617,6 @@ func TestMakeBucketRegionsV2(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests resumable put object multipart upload.
|
|
||||||
func TestResumablePutObjectV2(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping functional tests for the short runs")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seed random based on current time.
|
|
||||||
rand.Seed(time.Now().Unix())
|
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
|
||||||
c, err := minio.NewV2(
|
|
||||||
"s3.amazonaws.com",
|
|
||||||
os.Getenv("ACCESS_KEY"),
|
|
||||||
os.Getenv("SECRET_KEY"),
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enable tracing, write to stderr.
|
|
||||||
// c.TraceOn(os.Stderr)
|
|
||||||
|
|
||||||
// Set user agent.
|
|
||||||
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
|
||||||
|
|
||||||
// Generate a new random bucket name.
|
|
||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
|
||||||
|
|
||||||
// make a new bucket.
|
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err, bucketName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate 11MB
|
|
||||||
buf := make([]byte, 11*1024*1024)
|
|
||||||
|
|
||||||
_, err = io.ReadFull(crand.Reader, buf)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
objectName := bucketName + "-resumable"
|
|
||||||
reader := bytes.NewReader(buf)
|
|
||||||
n, err := c.PutObject(bucketName, objectName, reader, "application/octet-stream")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err, bucketName, objectName)
|
|
||||||
}
|
|
||||||
if n != int64(len(buf)) {
|
|
||||||
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.RemoveObject(bucketName, objectName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.RemoveBucket(bucketName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests get object ReaderSeeker interface methods.
|
// Tests get object ReaderSeeker interface methods.
|
||||||
func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
|
func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
@ -562,7 +647,7 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -629,13 +714,37 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
|
|||||||
if n != 0 {
|
if n != 0 {
|
||||||
t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n)
|
t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n)
|
||||||
}
|
}
|
||||||
var buffer bytes.Buffer
|
|
||||||
if _, err = io.CopyN(&buffer, r, st.Size); err != nil {
|
var buffer1 bytes.Buffer
|
||||||
|
if n, err = io.CopyN(&buffer1, r, st.Size); err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(buf, buffer.Bytes()) {
|
}
|
||||||
|
if !bytes.Equal(buf, buffer1.Bytes()) {
|
||||||
t.Fatal("Error: Incorrect read bytes v/s original buffer.")
|
t.Fatal("Error: Incorrect read bytes v/s original buffer.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Seek again and read again.
|
||||||
|
n, err = r.Seek(offset-1, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if n != (offset - 1) {
|
||||||
|
t.Fatalf("Error: number of bytes seeked back does not match, want %v, got %v\n", offset-1, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buffer2 bytes.Buffer
|
||||||
|
if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Verify now lesser bytes.
|
||||||
|
if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
|
||||||
|
t.Fatal("Error: Incorrect read bytes v/s original buffer.")
|
||||||
|
}
|
||||||
|
|
||||||
err = c.RemoveObject(bucketName, objectName)
|
err = c.RemoveObject(bucketName, objectName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
@ -676,7 +785,7 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -788,6 +897,132 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests copy object
|
||||||
|
func TestCopyObjectV2(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping functional tests for short runs")
|
||||||
|
}
|
||||||
|
// Seed random based on current time.
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
// Instantiate new minio client object
|
||||||
|
c, err := minio.NewV2(
|
||||||
|
"s3.amazonaws.com",
|
||||||
|
os.Getenv("ACCESS_KEY"),
|
||||||
|
os.Getenv("SECRET_KEY"),
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable tracing, write to stderr.
|
||||||
|
// c.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Set user agent.
|
||||||
|
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
||||||
|
|
||||||
|
// Generate a new random bucket name.
|
||||||
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
|
// Make a new bucket in 'us-east-1' (source bucket).
|
||||||
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a new bucket in 'us-east-1' (destination bucket).
|
||||||
|
err = c.MakeBucket(bucketName+"-copy", "us-east-1")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName+"-copy")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate data more than 32K
|
||||||
|
buf := make([]byte, rand.Intn(1<<20)+32*1024)
|
||||||
|
|
||||||
|
_, err = io.ReadFull(crand.Reader, buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the data
|
||||||
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName, objectName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != int64(len(buf)) {
|
||||||
|
t.Fatalf("Error: number of bytes does not match want %v, got %v",
|
||||||
|
len(buf), n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set copy conditions.
|
||||||
|
copyConds := minio.NewCopyConditions()
|
||||||
|
err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy source.
|
||||||
|
copySource := bucketName + "/" + objectName
|
||||||
|
|
||||||
|
// Perform the Copy
|
||||||
|
err = c.CopyObject(bucketName+"-copy", objectName+"-copy", copySource, copyConds)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source object
|
||||||
|
reader, err := c.GetObject(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
// Destination object
|
||||||
|
readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
// Check the various fields of source object against destination object.
|
||||||
|
objInfo, err := reader.Stat()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
objInfoCopy, err := readerCopy.Stat()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if objInfo.Size != objInfoCopy.Size {
|
||||||
|
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n",
|
||||||
|
objInfo.Size, objInfoCopy.Size)
|
||||||
|
}
|
||||||
|
if objInfo.ETag != objInfoCopy.ETag {
|
||||||
|
t.Fatalf("Error: ETags do not match, want %v, got %v\n",
|
||||||
|
objInfoCopy.ETag, objInfo.ETag)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all objects and buckets
|
||||||
|
err = c.RemoveObject(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveBucket(bucketName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveBucket(bucketName + "-copy")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Tests comprehensive list of all methods.
|
// Tests comprehensive list of all methods.
|
||||||
func TestFunctionalV2(t *testing.T) {
|
func TestFunctionalV2(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
@ -817,7 +1052,7 @@ func TestFunctionalV2(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -844,22 +1079,11 @@ func TestFunctionalV2(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make the bucket 'public read/write'.
|
// Make the bucket 'public read/write'.
|
||||||
err = c.SetBucketACL(bucketName, "public-read-write")
|
err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyReadWrite)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the previously set acl.
|
|
||||||
acl, err := c.GetBucketACL(bucketName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ACL must be 'public read/write'.
|
|
||||||
if acl != minio.BucketACL("public-read-write") {
|
|
||||||
t.Fatal("Error:", acl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// List all buckets.
|
// List all buckets.
|
||||||
buckets, err := c.ListBuckets()
|
buckets, err := c.ListBuckets()
|
||||||
if len(buckets) == 0 {
|
if len(buckets) == 0 {
|
||||||
@ -954,11 +1178,12 @@ func TestFunctionalV2(t *testing.T) {
|
|||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second)
|
// Generate presigned GET object url.
|
||||||
|
presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
}
|
}
|
||||||
|
// Verify if presigned url works.
|
||||||
resp, err := http.Get(presignedGetURL)
|
resp, err := http.Get(presignedGetURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
@ -974,6 +1199,34 @@ func TestFunctionalV2(t *testing.T) {
|
|||||||
t.Fatal("Error: bytes mismatch.")
|
t.Fatal("Error: bytes mismatch.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set request parameters.
|
||||||
|
reqParams := make(url.Values)
|
||||||
|
reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
|
||||||
|
// Generate presigned GET object url.
|
||||||
|
presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error: ", err)
|
||||||
|
}
|
||||||
|
// Verify if presigned url works.
|
||||||
|
resp, err = http.Get(presignedGetURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error: ", err)
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
t.Fatal("Error: ", resp.Status)
|
||||||
|
}
|
||||||
|
newPresignedBytes, err = ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error: ", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(newPresignedBytes, buf) {
|
||||||
|
t.Fatal("Error: bytes mismatch for presigned GET url.")
|
||||||
|
}
|
||||||
|
// Verify content disposition.
|
||||||
|
if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
|
||||||
|
t.Fatalf("Error: wrong Content-Disposition received %s", resp.Header.Get("Content-Disposition"))
|
||||||
|
}
|
||||||
|
|
||||||
presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
|
presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
@ -987,7 +1240,13 @@ func TestFunctionalV2(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
}
|
}
|
||||||
httpClient := &http.Client{}
|
httpClient := &http.Client{
|
||||||
|
// Setting a sensible time out of 30secs to wait for response
|
||||||
|
// headers. Request is pro-actively cancelled after 30secs
|
||||||
|
// with no response.
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
Transport: http.DefaultTransport,
|
||||||
|
}
|
||||||
resp, err = httpClient.Do(req)
|
resp, err = httpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -85,10 +86,10 @@ func TestMakeBucketError(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket in 'eu-central-1'.
|
// Make a new bucket in 'eu-central-1'.
|
||||||
if err = c.MakeBucket(bucketName, "private", "eu-central-1"); err != nil {
|
if err = c.MakeBucket(bucketName, "eu-central-1"); err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
if err = c.MakeBucket(bucketName, "private", "eu-central-1"); err == nil {
|
if err = c.MakeBucket(bucketName, "eu-central-1"); err == nil {
|
||||||
t.Fatal("Error: make bucket should should fail for", bucketName)
|
t.Fatal("Error: make bucket should should fail for", bucketName)
|
||||||
}
|
}
|
||||||
// Verify valid error response from server.
|
// Verify valid error response from server.
|
||||||
@ -131,7 +132,7 @@ func TestMakeBucketRegions(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket in 'eu-central-1'.
|
// Make a new bucket in 'eu-central-1'.
|
||||||
if err = c.MakeBucket(bucketName, "private", "eu-central-1"); err != nil {
|
if err = c.MakeBucket(bucketName, "eu-central-1"); err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,7 +143,7 @@ func TestMakeBucketRegions(t *testing.T) {
|
|||||||
// Make a new bucket with '.' in its name, in 'us-west-2'. This
|
// Make a new bucket with '.' in its name, in 'us-west-2'. This
|
||||||
// request is internally staged into a path style instead of
|
// request is internally staged into a path style instead of
|
||||||
// virtual host style.
|
// virtual host style.
|
||||||
if err = c.MakeBucket(bucketName+".withperiod", "private", "us-west-2"); err != nil {
|
if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
|
||||||
t.Fatal("Error:", err, bucketName+".withperiod")
|
t.Fatal("Error:", err, bucketName+".withperiod")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,7 +183,7 @@ func TestGetObjectClosedTwice(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -267,7 +268,7 @@ func TestRemovePartiallyUploaded(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -307,7 +308,7 @@ func TestRemovePartiallyUploaded(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests resumable put object cloud to cloud.
|
// Tests resumable put object cloud to cloud.
|
||||||
func TestResumbalePutObject(t *testing.T) {
|
func TestResumablePutObject(t *testing.T) {
|
||||||
// By passing 'go test -short' skips these tests.
|
// By passing 'go test -short' skips these tests.
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping functional tests for the short runs")
|
t.Skip("skipping functional tests for the short runs")
|
||||||
@ -337,7 +338,7 @@ func TestResumbalePutObject(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -447,7 +448,7 @@ func TestResumableFPutObject(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -497,10 +498,10 @@ func TestResumableFPutObject(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests resumable put object multipart upload.
|
// Tests FPutObject hidden contentType setting
|
||||||
func TestResumablePutObject(t *testing.T) {
|
func TestFPutObject(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping functional tests for the short runs")
|
t.Skip("skipping functional tests for short runs")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seed random based on current time.
|
// Seed random based on current time.
|
||||||
@ -527,30 +528,108 @@ func TestResumablePutObject(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate 11MB
|
// Make a temp file with 11*1024*1024 bytes of data.
|
||||||
buf := make([]byte, 11*1024*1024)
|
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
|
||||||
|
|
||||||
_, err = io.ReadFull(crand.Reader, buf)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
objectName := bucketName + "-resumable"
|
n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
|
||||||
reader := bytes.NewReader(buf)
|
|
||||||
n, err := c.PutObject(bucketName, objectName, reader, "application/octet-stream")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName, objectName)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
if n != int64(len(buf)) {
|
if n != int64(11*1024*1024) {
|
||||||
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
|
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.RemoveObject(bucketName, objectName)
|
// Close the file pro-actively for windows.
|
||||||
|
err = file.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set base object name
|
||||||
|
objectName := bucketName + "FPutObject"
|
||||||
|
|
||||||
|
// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
|
||||||
|
n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if n != int64(11*1024*1024) {
|
||||||
|
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform FPutObject with no contentType provided (Expecting application/octet-stream)
|
||||||
|
n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if n != int64(11*1024*1024) {
|
||||||
|
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add extension to temp file name
|
||||||
|
fileName := file.Name()
|
||||||
|
err = os.Rename(file.Name(), fileName+".gtar")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform FPutObject with no contentType provided (Expecting application/x-gtar)
|
||||||
|
n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if n != int64(11*1024*1024) {
|
||||||
|
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check headers
|
||||||
|
rStandard, err := c.StatObject(bucketName, objectName+"-standard")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName, objectName+"-standard")
|
||||||
|
}
|
||||||
|
if rStandard.ContentType != "application/octet-stream" {
|
||||||
|
t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
|
||||||
|
"application/octet-stream", rStandard.ContentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName, objectName+"-Octet")
|
||||||
|
}
|
||||||
|
if rOctet.ContentType != "application/octet-stream" {
|
||||||
|
t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
|
||||||
|
"application/octet-stream", rStandard.ContentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName, objectName+"-GTar")
|
||||||
|
}
|
||||||
|
if rGTar.ContentType != "application/x-gtar" {
|
||||||
|
t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
|
||||||
|
"application/x-gtar", rStandard.ContentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all objects and bucket and temp file
|
||||||
|
err = c.RemoveObject(bucketName, objectName+"-standard")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveObject(bucketName, objectName+"-Octet")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveObject(bucketName, objectName+"-GTar")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
}
|
}
|
||||||
@ -559,6 +638,12 @@ func TestResumablePutObject(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = os.Remove(fileName + ".gtar")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests get object ReaderSeeker interface methods.
|
// Tests get object ReaderSeeker interface methods.
|
||||||
@ -591,7 +676,7 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -658,13 +743,37 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
|
|||||||
if n != 0 {
|
if n != 0 {
|
||||||
t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n)
|
t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n)
|
||||||
}
|
}
|
||||||
var buffer bytes.Buffer
|
|
||||||
if _, err = io.CopyN(&buffer, r, st.Size); err != nil {
|
var buffer1 bytes.Buffer
|
||||||
|
if n, err = io.CopyN(&buffer1, r, st.Size); err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(buf, buffer.Bytes()) {
|
}
|
||||||
|
if !bytes.Equal(buf, buffer1.Bytes()) {
|
||||||
t.Fatal("Error: Incorrect read bytes v/s original buffer.")
|
t.Fatal("Error: Incorrect read bytes v/s original buffer.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Seek again and read again.
|
||||||
|
n, err = r.Seek(offset-1, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if n != (offset - 1) {
|
||||||
|
t.Fatalf("Error: number of bytes seeked back does not match, want %v, got %v\n", offset-1, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buffer2 bytes.Buffer
|
||||||
|
if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Verify now lesser bytes.
|
||||||
|
if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
|
||||||
|
t.Fatal("Error: Incorrect read bytes v/s original buffer.")
|
||||||
|
}
|
||||||
|
|
||||||
err = c.RemoveObject(bucketName, objectName)
|
err = c.RemoveObject(bucketName, objectName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
@ -705,7 +814,7 @@ func TestGetObjectReadAtFunctional(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -817,6 +926,132 @@ func TestGetObjectReadAtFunctional(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests copy object
|
||||||
|
func TestCopyObject(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping functional tests for short runs")
|
||||||
|
}
|
||||||
|
// Seed random based on current time.
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
// Instantiate new minio client object
|
||||||
|
c, err := minio.NewV4(
|
||||||
|
"s3.amazonaws.com",
|
||||||
|
os.Getenv("ACCESS_KEY"),
|
||||||
|
os.Getenv("SECRET_KEY"),
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable tracing, write to stderr.
|
||||||
|
// c.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Set user agent.
|
||||||
|
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
||||||
|
|
||||||
|
// Generate a new random bucket name.
|
||||||
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
|
// Make a new bucket in 'us-east-1' (source bucket).
|
||||||
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a new bucket in 'us-east-1' (destination bucket).
|
||||||
|
err = c.MakeBucket(bucketName+"-copy", "us-east-1")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName+"-copy")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate data more than 32K
|
||||||
|
buf := make([]byte, rand.Intn(1<<20)+32*1024)
|
||||||
|
|
||||||
|
_, err = io.ReadFull(crand.Reader, buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the data
|
||||||
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName, objectName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != int64(len(buf)) {
|
||||||
|
t.Fatalf("Error: number of bytes does not match want %v, got %v",
|
||||||
|
len(buf), n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set copy conditions.
|
||||||
|
copyConds := minio.NewCopyConditions()
|
||||||
|
err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy source.
|
||||||
|
copySource := bucketName + "/" + objectName
|
||||||
|
|
||||||
|
// Perform the Copy
|
||||||
|
err = c.CopyObject(bucketName+"-copy", objectName+"-copy", copySource, copyConds)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Source object
|
||||||
|
reader, err := c.GetObject(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
// Destination object
|
||||||
|
readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
// Check the various fields of source object against destination object.
|
||||||
|
objInfo, err := reader.Stat()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
objInfoCopy, err := readerCopy.Stat()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if objInfo.Size != objInfoCopy.Size {
|
||||||
|
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n",
|
||||||
|
objInfo.Size, objInfoCopy.Size)
|
||||||
|
}
|
||||||
|
if objInfo.ETag != objInfoCopy.ETag {
|
||||||
|
t.Fatalf("Error: ETags do not match, want %v, got %v\n",
|
||||||
|
objInfoCopy.ETag, objInfo.ETag)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all objects and buckets
|
||||||
|
err = c.RemoveObject(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveBucket(bucketName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveBucket(bucketName + "-copy")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Tests comprehensive list of all methods.
|
// Tests comprehensive list of all methods.
|
||||||
func TestFunctional(t *testing.T) {
|
func TestFunctional(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
@ -846,7 +1081,7 @@ func TestFunctional(t *testing.T) {
|
|||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
// Make a new bucket.
|
// Make a new bucket.
|
||||||
err = c.MakeBucket(bucketName, "private", "us-east-1")
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
@ -872,23 +1107,54 @@ func TestFunctional(t *testing.T) {
|
|||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Asserting the default bucket policy.
|
||||||
|
policy, err := c.GetBucketPolicy(bucketName, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if policy != "none" {
|
||||||
|
t.Fatalf("Default bucket policy incorrect")
|
||||||
|
}
|
||||||
|
// Set the bucket policy to 'public readonly'.
|
||||||
|
err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyReadOnly)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
// should return policy `readonly`.
|
||||||
|
policy, err = c.GetBucketPolicy(bucketName, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if policy != "readonly" {
|
||||||
|
t.Fatalf("Expected bucket policy to be readonly")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the bucket 'public writeonly'.
|
||||||
|
err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyWriteOnly)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
// should return policy `writeonly`.
|
||||||
|
policy, err = c.GetBucketPolicy(bucketName, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if policy != "writeonly" {
|
||||||
|
t.Fatalf("Expected bucket policy to be writeonly")
|
||||||
|
}
|
||||||
// Make the bucket 'public read/write'.
|
// Make the bucket 'public read/write'.
|
||||||
err = c.SetBucketACL(bucketName, "public-read-write")
|
err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyReadWrite)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
// should return policy `readwrite`.
|
||||||
// Get the previously set acl.
|
policy, err = c.GetBucketPolicy(bucketName, "")
|
||||||
acl, err := c.GetBucketACL(bucketName)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
if policy != "readwrite" {
|
||||||
// ACL must be 'public read/write'.
|
t.Fatalf("Expected bucket policy to be readwrite")
|
||||||
if acl != minio.BucketACL("public-read-write") {
|
|
||||||
t.Fatal("Error:", acl)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// List all buckets.
|
// List all buckets.
|
||||||
buckets, err := c.ListBuckets()
|
buckets, err := c.ListBuckets()
|
||||||
if len(buckets) == 0 {
|
if len(buckets) == 0 {
|
||||||
@ -983,11 +1249,13 @@ func TestFunctional(t *testing.T) {
|
|||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second)
|
// Generate presigned GET object url.
|
||||||
|
presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify if presigned url works.
|
||||||
resp, err := http.Get(presignedGetURL)
|
resp, err := http.Get(presignedGetURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
@ -1003,6 +1271,32 @@ func TestFunctional(t *testing.T) {
|
|||||||
t.Fatal("Error: bytes mismatch.")
|
t.Fatal("Error: bytes mismatch.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set request parameters.
|
||||||
|
reqParams := make(url.Values)
|
||||||
|
reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
|
||||||
|
presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error: ", err)
|
||||||
|
}
|
||||||
|
// Verify if presigned url works.
|
||||||
|
resp, err = http.Get(presignedGetURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error: ", err)
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
t.Fatal("Error: ", resp.Status)
|
||||||
|
}
|
||||||
|
newPresignedBytes, err = ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error: ", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(newPresignedBytes, buf) {
|
||||||
|
t.Fatal("Error: bytes mismatch for presigned GET URL.")
|
||||||
|
}
|
||||||
|
if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
|
||||||
|
t.Fatalf("Error: wrong Content-Disposition received %s", resp.Header.Get("Content-Disposition"))
|
||||||
|
}
|
||||||
|
|
||||||
presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
|
presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
@ -1016,7 +1310,13 @@ func TestFunctional(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
}
|
}
|
||||||
httpClient := &http.Client{}
|
httpClient := &http.Client{
|
||||||
|
// Setting a sensible time out of 30secs to wait for response
|
||||||
|
// headers. Request is pro-actively cancelled after 30secs
|
||||||
|
// with no response.
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
Transport: http.DefaultTransport,
|
||||||
|
}
|
||||||
resp, err = httpClient.Do(req)
|
resp, err = httpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
|
@ -160,31 +160,6 @@ func TestValidBucketLocation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests valid bucket names.
|
|
||||||
func TestBucketNames(t *testing.T) {
|
|
||||||
buckets := []struct {
|
|
||||||
name string
|
|
||||||
valid error
|
|
||||||
}{
|
|
||||||
{".mybucket", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")},
|
|
||||||
{"mybucket.", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")},
|
|
||||||
{"mybucket-", ErrInvalidBucketName("Bucket name contains invalid characters.")},
|
|
||||||
{"my", ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.")},
|
|
||||||
{"", ErrInvalidBucketName("Bucket name cannot be empty.")},
|
|
||||||
{"my..bucket", ErrInvalidBucketName("Bucket name cannot have successive periods.")},
|
|
||||||
{"my.bucket.com", nil},
|
|
||||||
{"my-bucket", nil},
|
|
||||||
{"123my-bucket", nil},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, b := range buckets {
|
|
||||||
err := isValidBucketName(b.name)
|
|
||||||
if err != b.valid {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests temp file.
|
// Tests temp file.
|
||||||
func TestTempFile(t *testing.T) {
|
func TestTempFile(t *testing.T) {
|
||||||
tmpFile, err := newTempFile("testing")
|
tmpFile, err := newTempFile("testing")
|
||||||
@ -340,17 +315,17 @@ func TestSignatureType(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests bucket acl types.
|
// Tests bucket policy types.
|
||||||
func TestBucketACLTypes(t *testing.T) {
|
func TestBucketPolicyTypes(t *testing.T) {
|
||||||
want := map[string]bool{
|
want := map[string]bool{
|
||||||
"private": true,
|
"none": true,
|
||||||
"public-read": true,
|
"readonly": true,
|
||||||
"public-read-write": true,
|
"writeonly": true,
|
||||||
"authenticated-read": true,
|
"readwrite": true,
|
||||||
"invalid": false,
|
"invalid": false,
|
||||||
}
|
}
|
||||||
for acl, ok := range want {
|
for bucketPolicy, ok := range want {
|
||||||
if BucketACL(acl).isValidBucketACL() != ok {
|
if BucketPolicy(bucketPolicy).isValidBucketPolicy() != ok {
|
||||||
t.Fatal("Error")
|
t.Fatal("Error")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -396,188 +371,3 @@ func TestPartSize(t *testing.T) {
|
|||||||
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
|
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests url encoding.
|
|
||||||
func TestURLEncoding(t *testing.T) {
|
|
||||||
type urlStrings struct {
|
|
||||||
name string
|
|
||||||
encodedName string
|
|
||||||
}
|
|
||||||
|
|
||||||
want := []urlStrings{
|
|
||||||
{
|
|
||||||
name: "bigfile-1._%",
|
|
||||||
encodedName: "bigfile-1._%25",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "本語",
|
|
||||||
encodedName: "%E6%9C%AC%E8%AA%9E",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "本語.1",
|
|
||||||
encodedName: "%E6%9C%AC%E8%AA%9E.1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: ">123>3123123",
|
|
||||||
encodedName: "%3E123%3E3123123",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "test 1 2.txt",
|
|
||||||
encodedName: "test%201%202.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "test++ 1.txt",
|
|
||||||
encodedName: "test%2B%2B%201.txt",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, u := range want {
|
|
||||||
if u.encodedName != urlEncodePath(u.name) {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests constructing valid endpoint url.
|
|
||||||
func TestGetEndpointURL(t *testing.T) {
|
|
||||||
if _, err := getEndpointURL("s3.amazonaws.com", false); err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if _, err := getEndpointURL("192.168.1.1", false); err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if _, err := getEndpointURL("13333.123123.-", false); err == nil {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
if _, err := getEndpointURL("s3.aamzza.-", false); err == nil {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
if _, err := getEndpointURL("s3.amazonaws.com:443", false); err == nil {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests valid ip address.
|
|
||||||
func TestValidIPAddr(t *testing.T) {
|
|
||||||
type validIP struct {
|
|
||||||
ip string
|
|
||||||
valid bool
|
|
||||||
}
|
|
||||||
|
|
||||||
want := []validIP{
|
|
||||||
{
|
|
||||||
ip: "192.168.1.1",
|
|
||||||
valid: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ip: "192.1.8",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ip: "..192.",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ip: "192.168.1.1.1",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, w := range want {
|
|
||||||
valid := isValidIP(w.ip)
|
|
||||||
if valid != w.valid {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests valid endpoint domain.
|
|
||||||
func TestValidEndpointDomain(t *testing.T) {
|
|
||||||
type validEndpoint struct {
|
|
||||||
endpointDomain string
|
|
||||||
valid bool
|
|
||||||
}
|
|
||||||
|
|
||||||
want := []validEndpoint{
|
|
||||||
{
|
|
||||||
endpointDomain: "s3.amazonaws.com",
|
|
||||||
valid: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "s3.amazonaws.com_",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "%$$$",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "s3.amz.test.com",
|
|
||||||
valid: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "s3.%%",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "localhost",
|
|
||||||
valid: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "-localhost",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "\n \t",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: " ",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, w := range want {
|
|
||||||
valid := isValidDomain(w.endpointDomain)
|
|
||||||
if valid != w.valid {
|
|
||||||
t.Fatal("Error:", w.endpointDomain)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests valid endpoint url.
|
|
||||||
func TestValidEndpointURL(t *testing.T) {
|
|
||||||
type validURL struct {
|
|
||||||
url string
|
|
||||||
valid bool
|
|
||||||
}
|
|
||||||
want := []validURL{
|
|
||||||
{
|
|
||||||
url: "https://s3.amazonaws.com",
|
|
||||||
valid: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
url: "https://s3.amazonaws.com/bucket/object",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
url: "192.168.1.1",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, w := range want {
|
|
||||||
u, err := url.Parse(w.url)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
valid := false
|
|
||||||
if err := isValidEndpointURL(u); err == nil {
|
|
||||||
valid = true
|
|
||||||
}
|
|
||||||
if valid != w.valid {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -17,8 +17,8 @@ install:
|
|||||||
- go version
|
- go version
|
||||||
- go env
|
- go env
|
||||||
- go get -u github.com/golang/lint/golint
|
- go get -u github.com/golang/lint/golint
|
||||||
- go get -u golang.org/x/tools/cmd/vet
|
|
||||||
- go get -u github.com/remyoudompheng/go-misc/deadcode
|
- go get -u github.com/remyoudompheng/go-misc/deadcode
|
||||||
|
- go get -u github.com/gordonklaus/ineffassign
|
||||||
|
|
||||||
# to run your custom scripts instead of automatic MSBuild
|
# to run your custom scripts instead of automatic MSBuild
|
||||||
build_script:
|
build_script:
|
||||||
@ -26,6 +26,7 @@ build_script:
|
|||||||
- gofmt -s -l .
|
- gofmt -s -l .
|
||||||
- golint github.com/minio/minio-go...
|
- golint github.com/minio/minio-go...
|
||||||
- deadcode
|
- deadcode
|
||||||
|
- ineffassign .
|
||||||
- go test -short -v
|
- go test -short -v
|
||||||
- go test -short -race -v
|
- go test -short -race -v
|
||||||
|
|
||||||
|
@ -1,75 +0,0 @@
|
|||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package minio
|
|
||||||
|
|
||||||
// BucketACL - Bucket level access control.
|
|
||||||
type BucketACL string
|
|
||||||
|
|
||||||
// Different types of ACL's currently supported for buckets.
|
|
||||||
const (
|
|
||||||
bucketPrivate = BucketACL("private")
|
|
||||||
bucketReadOnly = BucketACL("public-read")
|
|
||||||
bucketPublic = BucketACL("public-read-write")
|
|
||||||
bucketAuthenticated = BucketACL("authenticated-read")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Stringify acl.
|
|
||||||
func (b BucketACL) String() string {
|
|
||||||
if string(b) == "" {
|
|
||||||
return "private"
|
|
||||||
}
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// isValidBucketACL - Is provided acl string supported.
|
|
||||||
func (b BucketACL) isValidBucketACL() bool {
|
|
||||||
switch true {
|
|
||||||
case b.isPrivate():
|
|
||||||
fallthrough
|
|
||||||
case b.isReadOnly():
|
|
||||||
fallthrough
|
|
||||||
case b.isPublic():
|
|
||||||
fallthrough
|
|
||||||
case b.isAuthenticated():
|
|
||||||
return true
|
|
||||||
case b.String() == "private":
|
|
||||||
// By default its "private"
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// isPrivate - Is acl Private.
|
|
||||||
func (b BucketACL) isPrivate() bool {
|
|
||||||
return b == bucketPrivate
|
|
||||||
}
|
|
||||||
|
|
||||||
// isPublicRead - Is acl PublicRead.
|
|
||||||
func (b BucketACL) isReadOnly() bool {
|
|
||||||
return b == bucketReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// isPublicReadWrite - Is acl PublicReadWrite.
|
|
||||||
func (b BucketACL) isPublic() bool {
|
|
||||||
return b == bucketPublic
|
|
||||||
}
|
|
||||||
|
|
||||||
// isAuthenticated - Is acl AuthenticatedRead.
|
|
||||||
func (b BucketACL) isAuthenticated() bool {
|
|
||||||
return b == bucketAuthenticated
|
|
||||||
}
|
|
@ -20,7 +20,8 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path/filepath"
|
"path"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -67,15 +68,18 @@ func (r *bucketLocationCache) Delete(bucketName string) {
|
|||||||
|
|
||||||
// getBucketLocation - Get location for the bucketName from location map cache.
|
// getBucketLocation - Get location for the bucketName from location map cache.
|
||||||
func (c Client) getBucketLocation(bucketName string) (string, error) {
|
func (c Client) getBucketLocation(bucketName string) (string, error) {
|
||||||
// For anonymous requests, default to "us-east-1" and let other calls
|
|
||||||
// move forward.
|
|
||||||
if c.anonymous {
|
|
||||||
return "us-east-1", nil
|
|
||||||
}
|
|
||||||
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
||||||
return location, nil
|
return location, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if isAmazonChinaEndpoint(c.endpointURL) {
|
||||||
|
// For china specifically we need to set everything to
|
||||||
|
// cn-north-1 for now, there is no easier way until AWS S3
|
||||||
|
// provides a cleaner compatible API across "us-east-1" and
|
||||||
|
// China region.
|
||||||
|
return "cn-north-1", nil
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize a new request.
|
// Initialize a new request.
|
||||||
req, err := c.getBucketLocationRequest(bucketName)
|
req, err := c.getBucketLocationRequest(bucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -88,9 +92,27 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
location, err := processBucketLocationResponse(resp, bucketName)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
c.bucketLocCache.Set(bucketName, location)
|
||||||
|
return location, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processes the getBucketLocation http response from the server.
|
||||||
|
func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) {
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return "", httpRespToErrorResponse(resp, bucketName, "")
|
err = httpRespToErrorResponse(resp, bucketName, "")
|
||||||
|
errResp := ToErrorResponse(err)
|
||||||
|
// For access denied error, it could be an anonymous
|
||||||
|
// request. Move forward and let the top level callers
|
||||||
|
// succeed if possible based on their policy.
|
||||||
|
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||||
|
return "us-east-1", nil
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,7 +135,6 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Save the location into cache.
|
// Save the location into cache.
|
||||||
c.bucketLocCache.Set(bucketName, location)
|
|
||||||
|
|
||||||
// Return.
|
// Return.
|
||||||
return location, nil
|
return location, nil
|
||||||
@ -127,7 +148,7 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
|
|||||||
|
|
||||||
// Set get bucket location always as path style.
|
// Set get bucket location always as path style.
|
||||||
targetURL := c.endpointURL
|
targetURL := c.endpointURL
|
||||||
targetURL.Path = filepath.Join(bucketName, "") + "/"
|
targetURL.Path = path.Join(bucketName, "") + "/"
|
||||||
targetURL.RawQuery = urlValues.Encode()
|
targetURL.RawQuery = urlValues.Encode()
|
||||||
|
|
||||||
// Get a new HTTP request for the method.
|
// Get a new HTTP request for the method.
|
||||||
|
320
vendor/src/github.com/minio/minio-go/bucket-cache_test.go
vendored
Normal file
320
vendor/src/github.com/minio/minio-go/bucket-cache_test.go
vendored
Normal file
@ -0,0 +1,320 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016, 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/xml"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Test validates `newBucketLocationCache`.
|
||||||
|
func TestNewBucketLocationCache(t *testing.T) {
|
||||||
|
expectedBucketLocationcache := &bucketLocationCache{
|
||||||
|
items: make(map[string]string),
|
||||||
|
}
|
||||||
|
actualBucketLocationCache := newBucketLocationCache()
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(actualBucketLocationCache, expectedBucketLocationcache) {
|
||||||
|
t.Errorf("Unexpected return value")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate bucketLocationCache operations.
|
||||||
|
func TestBucketLocationCacheOps(t *testing.T) {
|
||||||
|
testBucketLocationCache := newBucketLocationCache()
|
||||||
|
expectedBucketName := "minio-bucket"
|
||||||
|
expectedLocation := "us-east-1"
|
||||||
|
testBucketLocationCache.Set(expectedBucketName, expectedLocation)
|
||||||
|
actualLocation, ok := testBucketLocationCache.Get(expectedBucketName)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("Bucket location cache not set")
|
||||||
|
}
|
||||||
|
if expectedLocation != actualLocation {
|
||||||
|
t.Errorf("Bucket location cache not set to expected value")
|
||||||
|
}
|
||||||
|
testBucketLocationCache.Delete(expectedBucketName)
|
||||||
|
_, ok = testBucketLocationCache.Get(expectedBucketName)
|
||||||
|
if ok {
|
||||||
|
t.Errorf("Bucket location cache not deleted as expected")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate http request generation for 'getBucketLocation'.
|
||||||
|
func TestGetBucketLocationRequest(t *testing.T) {
|
||||||
|
// Generates expected http request for getBucketLocation.
|
||||||
|
// Used for asserting with the actual request generated.
|
||||||
|
createExpectedRequest := func(c *Client, bucketName string, req *http.Request) (*http.Request, error) {
|
||||||
|
// Set location query.
|
||||||
|
urlValues := make(url.Values)
|
||||||
|
urlValues.Set("location", "")
|
||||||
|
|
||||||
|
// Set get bucket location always as path style.
|
||||||
|
targetURL := c.endpointURL
|
||||||
|
targetURL.Path = path.Join(bucketName, "") + "/"
|
||||||
|
targetURL.RawQuery = urlValues.Encode()
|
||||||
|
|
||||||
|
// Get a new HTTP request for the method.
|
||||||
|
req, err := http.NewRequest("GET", targetURL.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set UserAgent for the request.
|
||||||
|
c.setUserAgent(req)
|
||||||
|
|
||||||
|
// Set sha256 sum for signature calculation only with signature version '4'.
|
||||||
|
if c.signature.isV4() {
|
||||||
|
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign the request.
|
||||||
|
if c.signature.isV4() {
|
||||||
|
req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
|
||||||
|
} else if c.signature.isV2() {
|
||||||
|
req = signV2(*req, c.accessKeyID, c.secretAccessKey)
|
||||||
|
}
|
||||||
|
return req, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
// Info for 'Client' creation.
|
||||||
|
// Will be used as arguments for 'NewClient'.
|
||||||
|
type infoForClient struct {
|
||||||
|
endPoint string
|
||||||
|
accessKey string
|
||||||
|
secretKey string
|
||||||
|
enableInsecure bool
|
||||||
|
}
|
||||||
|
// dataset for 'NewClient' call.
|
||||||
|
info := []infoForClient{
|
||||||
|
// endpoint localhost.
|
||||||
|
// both access-key and secret-key are empty.
|
||||||
|
{"localhost:9000", "", "", false},
|
||||||
|
// both access-key are secret-key exists.
|
||||||
|
{"localhost:9000", "my-access-key", "my-secret-key", false},
|
||||||
|
// one of acess-key and secret-key are empty.
|
||||||
|
{"localhost:9000", "", "my-secret-key", false},
|
||||||
|
|
||||||
|
// endpoint amazon s3.
|
||||||
|
{"s3.amazonaws.com", "", "", false},
|
||||||
|
{"s3.amazonaws.com", "my-access-key", "my-secret-key", false},
|
||||||
|
{"s3.amazonaws.com", "my-acess-key", "", false},
|
||||||
|
|
||||||
|
// endpoint google cloud storage.
|
||||||
|
{"storage.googleapis.com", "", "", false},
|
||||||
|
{"storage.googleapis.com", "my-access-key", "my-secret-key", false},
|
||||||
|
{"storage.googleapis.com", "", "my-secret-key", false},
|
||||||
|
|
||||||
|
// endpoint custom domain running Minio server.
|
||||||
|
{"play.minio.io", "", "", false},
|
||||||
|
{"play.minio.io", "my-access-key", "my-secret-key", false},
|
||||||
|
{"play.minio.io", "my-acess-key", "", false},
|
||||||
|
}
|
||||||
|
testCases := []struct {
|
||||||
|
bucketName string
|
||||||
|
// data for new client creation.
|
||||||
|
info infoForClient
|
||||||
|
// error in the output.
|
||||||
|
err error
|
||||||
|
// flag indicating whether tests should pass.
|
||||||
|
shouldPass bool
|
||||||
|
}{
|
||||||
|
// Client is constructed using the info struct.
|
||||||
|
// case with empty location.
|
||||||
|
{"my-bucket", info[0], nil, true},
|
||||||
|
// case with location set to standard 'us-east-1'.
|
||||||
|
{"my-bucket", info[0], nil, true},
|
||||||
|
// case with location set to a value different from 'us-east-1'.
|
||||||
|
{"my-bucket", info[0], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", info[1], nil, true},
|
||||||
|
{"my-bucket", info[1], nil, true},
|
||||||
|
{"my-bucket", info[1], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", info[2], nil, true},
|
||||||
|
{"my-bucket", info[2], nil, true},
|
||||||
|
{"my-bucket", info[2], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", info[3], nil, true},
|
||||||
|
{"my-bucket", info[3], nil, true},
|
||||||
|
{"my-bucket", info[3], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", info[4], nil, true},
|
||||||
|
{"my-bucket", info[4], nil, true},
|
||||||
|
{"my-bucket", info[4], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", info[5], nil, true},
|
||||||
|
{"my-bucket", info[5], nil, true},
|
||||||
|
{"my-bucket", info[5], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", info[6], nil, true},
|
||||||
|
{"my-bucket", info[6], nil, true},
|
||||||
|
{"my-bucket", info[6], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", info[7], nil, true},
|
||||||
|
{"my-bucket", info[7], nil, true},
|
||||||
|
{"my-bucket", info[7], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", info[8], nil, true},
|
||||||
|
{"my-bucket", info[8], nil, true},
|
||||||
|
{"my-bucket", info[8], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", info[9], nil, true},
|
||||||
|
{"my-bucket", info[9], nil, true},
|
||||||
|
{"my-bucket", info[9], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", info[10], nil, true},
|
||||||
|
{"my-bucket", info[10], nil, true},
|
||||||
|
{"my-bucket", info[10], nil, true},
|
||||||
|
|
||||||
|
{"my-bucket", info[11], nil, true},
|
||||||
|
{"my-bucket", info[11], nil, true},
|
||||||
|
{"my-bucket", info[11], nil, true},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
// cannot create a newclient with empty endPoint value.
|
||||||
|
// validates and creates a new client only if the endPoint value is not empty.
|
||||||
|
client := &Client{}
|
||||||
|
var err error
|
||||||
|
if testCase.info.endPoint != "" {
|
||||||
|
|
||||||
|
client, err = New(testCase.info.endPoint, testCase.info.accessKey, testCase.info.secretKey, testCase.info.enableInsecure)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d: Failed to create new Client: %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
actualReq, err := client.getBucketLocationRequest(testCase.bucketName)
|
||||||
|
if err != nil && testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
if err == nil && !testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||||
|
}
|
||||||
|
// Failed as expected, but does it fail for the expected reason.
|
||||||
|
if err != nil && !testCase.shouldPass {
|
||||||
|
if err.Error() != testCase.err.Error() {
|
||||||
|
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test passes as expected, but the output values are verified for correctness here.
|
||||||
|
if err == nil && testCase.shouldPass {
|
||||||
|
expectedReq := &http.Request{}
|
||||||
|
expectedReq, err = createExpectedRequest(client, testCase.bucketName, expectedReq)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d: Expected request Creation failed", i+1)
|
||||||
|
}
|
||||||
|
if expectedReq.Method != actualReq.Method {
|
||||||
|
t.Errorf("Test %d: The expected Request method doesn't match with the actual one", i+1)
|
||||||
|
}
|
||||||
|
if expectedReq.URL.String() != actualReq.URL.String() {
|
||||||
|
t.Errorf("Test %d: Expected the request URL to be '%s', but instead found '%s'", i+1, expectedReq.URL.String(), actualReq.URL.String())
|
||||||
|
}
|
||||||
|
if expectedReq.ContentLength != actualReq.ContentLength {
|
||||||
|
t.Errorf("Test %d: Expected the request body Content-Length to be '%d', but found '%d' instead", i+1, expectedReq.ContentLength, actualReq.ContentLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
if expectedReq.Header.Get("X-Amz-Content-Sha256") != actualReq.Header.Get("X-Amz-Content-Sha256") {
|
||||||
|
t.Errorf("Test %d: 'X-Amz-Content-Sha256' header of the expected request doesn't match with that of the actual request", i+1)
|
||||||
|
}
|
||||||
|
if expectedReq.Header.Get("User-Agent") != actualReq.Header.Get("User-Agent") {
|
||||||
|
t.Errorf("Test %d: Expected 'User-Agent' header to be \"%s\",but found \"%s\" instead", i+1, expectedReq.Header.Get("User-Agent"), actualReq.Header.Get("User-Agent"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// generates http response with bucket location set in the body.
|
||||||
|
func generateLocationResponse(resp *http.Response, bodyContent []byte) (*http.Response, error) {
|
||||||
|
resp.StatusCode = http.StatusOK
|
||||||
|
resp.Body = ioutil.NopCloser(bytes.NewBuffer(bodyContent))
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests the processing of GetPolicy response from server.
|
||||||
|
func TestProcessBucketLocationResponse(t *testing.T) {
|
||||||
|
// LocationResponse - format for location response.
|
||||||
|
type LocationResponse struct {
|
||||||
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint" json:"-"`
|
||||||
|
Location string `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
APIErrors := []APIError{
|
||||||
|
{
|
||||||
|
Code: "AccessDenied",
|
||||||
|
Description: "Access Denied",
|
||||||
|
HTTPStatusCode: http.StatusUnauthorized,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
testCases := []struct {
|
||||||
|
bucketName string
|
||||||
|
inputLocation string
|
||||||
|
isAPIError bool
|
||||||
|
apiErr APIError
|
||||||
|
// expected results.
|
||||||
|
expectedResult string
|
||||||
|
err error
|
||||||
|
// flag indicating whether tests should pass.
|
||||||
|
shouldPass bool
|
||||||
|
}{
|
||||||
|
{"my-bucket", "", true, APIErrors[0], "us-east-1", nil, true},
|
||||||
|
{"my-bucket", "", false, APIError{}, "us-east-1", nil, true},
|
||||||
|
{"my-bucket", "EU", false, APIError{}, "eu-west-1", nil, true},
|
||||||
|
{"my-bucket", "eu-central-1", false, APIError{}, "eu-central-1", nil, true},
|
||||||
|
{"my-bucket", "us-east-1", false, APIError{}, "us-east-1", nil, true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
inputResponse := &http.Response{}
|
||||||
|
var err error
|
||||||
|
if testCase.isAPIError {
|
||||||
|
inputResponse = generateErrorResponse(inputResponse, testCase.apiErr, testCase.bucketName)
|
||||||
|
} else {
|
||||||
|
inputResponse, err = generateLocationResponse(inputResponse, encodeResponse(LocationResponse{
|
||||||
|
Location: testCase.inputLocation,
|
||||||
|
}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d: Creation of valid response failed", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
actualResult, err := processBucketLocationResponse(inputResponse, "my-bucket")
|
||||||
|
if err != nil && testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
if err == nil && !testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||||
|
}
|
||||||
|
// Failed as expected, but does it fail for the expected reason.
|
||||||
|
if err != nil && !testCase.shouldPass {
|
||||||
|
if err.Error() != testCase.err.Error() {
|
||||||
|
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil && testCase.shouldPass {
|
||||||
|
if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
|
||||||
|
t.Errorf("Test %d: The expected BucketPolicy doesnt match the actual BucketPolicy", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
488
vendor/src/github.com/minio/minio-go/bucket-policy.go
vendored
Normal file
488
vendor/src/github.com/minio/minio-go/bucket-policy.go
vendored
Normal file
@ -0,0 +1,488 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// maximum supported access policy size.
|
||||||
|
const maxAccessPolicySize = 20 * 1024 * 1024 // 20KiB.
|
||||||
|
|
||||||
|
// Resource prefix for all aws resources.
|
||||||
|
const awsResourcePrefix = "arn:aws:s3:::"
|
||||||
|
|
||||||
|
// BucketPolicy - Bucket level policy.
|
||||||
|
type BucketPolicy string
|
||||||
|
|
||||||
|
// Different types of Policies currently supported for buckets.
|
||||||
|
const (
|
||||||
|
BucketPolicyNone BucketPolicy = "none"
|
||||||
|
BucketPolicyReadOnly = "readonly"
|
||||||
|
BucketPolicyReadWrite = "readwrite"
|
||||||
|
BucketPolicyWriteOnly = "writeonly"
|
||||||
|
)
|
||||||
|
|
||||||
|
// isValidBucketPolicy - Is provided policy value supported.
|
||||||
|
func (p BucketPolicy) isValidBucketPolicy() bool {
|
||||||
|
switch p {
|
||||||
|
case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// User - canonical users list.
|
||||||
|
type User struct {
|
||||||
|
AWS []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Statement - minio policy statement
|
||||||
|
type Statement struct {
|
||||||
|
Sid string
|
||||||
|
Effect string
|
||||||
|
Principal User `json:"Principal"`
|
||||||
|
Actions []string `json:"Action"`
|
||||||
|
Resources []string `json:"Resource"`
|
||||||
|
Conditions map[string]map[string]string `json:"Condition,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketAccessPolicy - minio policy collection
|
||||||
|
type BucketAccessPolicy struct {
|
||||||
|
Version string // date in 0000-00-00 format
|
||||||
|
Statements []Statement `json:"Statement"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read write actions.
|
||||||
|
var (
|
||||||
|
readWriteBucketActions = []string{
|
||||||
|
"s3:GetBucketLocation",
|
||||||
|
"s3:ListBucket",
|
||||||
|
"s3:ListBucketMultipartUploads",
|
||||||
|
// Add more bucket level read-write actions here.
|
||||||
|
}
|
||||||
|
readWriteObjectActions = []string{
|
||||||
|
"s3:AbortMultipartUpload",
|
||||||
|
"s3:DeleteObject",
|
||||||
|
"s3:GetObject",
|
||||||
|
"s3:ListMultipartUploadParts",
|
||||||
|
"s3:PutObject",
|
||||||
|
// Add more object level read-write actions here.
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Write only actions.
|
||||||
|
var (
|
||||||
|
writeOnlyBucketActions = []string{
|
||||||
|
"s3:GetBucketLocation",
|
||||||
|
"s3:ListBucketMultipartUploads",
|
||||||
|
// Add more bucket level write actions here.
|
||||||
|
}
|
||||||
|
writeOnlyObjectActions = []string{
|
||||||
|
"s3:AbortMultipartUpload",
|
||||||
|
"s3:DeleteObject",
|
||||||
|
"s3:ListMultipartUploadParts",
|
||||||
|
"s3:PutObject",
|
||||||
|
// Add more object level write actions here.
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Read only actions.
|
||||||
|
var (
|
||||||
|
readOnlyBucketActions = []string{
|
||||||
|
"s3:GetBucketLocation",
|
||||||
|
"s3:ListBucket",
|
||||||
|
// Add more bucket level read actions here.
|
||||||
|
}
|
||||||
|
readOnlyObjectActions = []string{
|
||||||
|
"s3:GetObject",
|
||||||
|
// Add more object level read actions here.
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// subsetActions returns true if the first array is completely
|
||||||
|
// contained in the second array. There must be at least
|
||||||
|
// the same number of duplicate values in second as there
|
||||||
|
// are in first.
|
||||||
|
func subsetActions(first, second []string) bool {
|
||||||
|
set := make(map[string]int)
|
||||||
|
for _, value := range second {
|
||||||
|
set[value]++
|
||||||
|
}
|
||||||
|
for _, value := range first {
|
||||||
|
if count, found := set[value]; !found {
|
||||||
|
return false
|
||||||
|
} else if count < 1 {
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
set[value] = count - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verifies if we have read/write policy set at bucketName, objectPrefix.
|
||||||
|
func isBucketPolicyReadWrite(statements []Statement, bucketName string, objectPrefix string) bool {
|
||||||
|
var commonActions, readWrite bool
|
||||||
|
sort.Strings(readWriteBucketActions)
|
||||||
|
sort.Strings(readWriteObjectActions)
|
||||||
|
for _, statement := range statements {
|
||||||
|
for _, resource := range statement.Resources {
|
||||||
|
if resource == awsResourcePrefix+bucketName {
|
||||||
|
if subsetActions(readWriteBucketActions, statement.Actions) {
|
||||||
|
commonActions = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
|
||||||
|
if subsetActions(readWriteObjectActions, statement.Actions) {
|
||||||
|
readWrite = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return commonActions && readWrite
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verifies if we have write only policy set at bucketName, objectPrefix.
|
||||||
|
func isBucketPolicyWriteOnly(statements []Statement, bucketName string, objectPrefix string) bool {
|
||||||
|
var commonActions, writeOnly bool
|
||||||
|
sort.Strings(writeOnlyBucketActions)
|
||||||
|
sort.Strings(writeOnlyObjectActions)
|
||||||
|
for _, statement := range statements {
|
||||||
|
for _, resource := range statement.Resources {
|
||||||
|
if resource == awsResourcePrefix+bucketName {
|
||||||
|
if subsetActions(writeOnlyBucketActions, statement.Actions) {
|
||||||
|
commonActions = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
|
||||||
|
if subsetActions(writeOnlyObjectActions, statement.Actions) {
|
||||||
|
writeOnly = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return commonActions && writeOnly
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verifies if we have read only policy set at bucketName, objectPrefix.
|
||||||
|
func isBucketPolicyReadOnly(statements []Statement, bucketName string, objectPrefix string) bool {
|
||||||
|
var commonActions, readOnly bool
|
||||||
|
sort.Strings(readOnlyBucketActions)
|
||||||
|
sort.Strings(readOnlyObjectActions)
|
||||||
|
for _, statement := range statements {
|
||||||
|
for _, resource := range statement.Resources {
|
||||||
|
if resource == awsResourcePrefix+bucketName {
|
||||||
|
if subsetActions(readOnlyBucketActions, statement.Actions) {
|
||||||
|
commonActions = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
|
||||||
|
if subsetActions(readOnlyObjectActions, statement.Actions) {
|
||||||
|
readOnly = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return commonActions && readOnly
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removes read write bucket policy if found.
|
||||||
|
func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName string, objectPrefix string) []Statement {
|
||||||
|
var newStatements []Statement
|
||||||
|
for _, statement := range statements {
|
||||||
|
for _, resource := range statement.Resources {
|
||||||
|
if resource == awsResourcePrefix+bucketName {
|
||||||
|
var newActions []string
|
||||||
|
for _, action := range statement.Actions {
|
||||||
|
switch action {
|
||||||
|
case "s3:GetBucketLocation", "s3:ListBucket", "s3:ListBucketMultipartUploads":
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newActions = append(newActions, action)
|
||||||
|
}
|
||||||
|
statement.Actions = newActions
|
||||||
|
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
|
||||||
|
var newActions []string
|
||||||
|
for _, action := range statement.Actions {
|
||||||
|
switch action {
|
||||||
|
case "s3:PutObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:DeleteObject", "s3:GetObject":
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newActions = append(newActions, action)
|
||||||
|
}
|
||||||
|
statement.Actions = newActions
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(statement.Actions) != 0 {
|
||||||
|
newStatements = append(newStatements, statement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return newStatements
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removes write only bucket policy if found.
|
||||||
|
func removeBucketPolicyStatementWriteOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
|
||||||
|
var newStatements []Statement
|
||||||
|
for _, statement := range statements {
|
||||||
|
for _, resource := range statement.Resources {
|
||||||
|
if resource == awsResourcePrefix+bucketName {
|
||||||
|
var newActions []string
|
||||||
|
for _, action := range statement.Actions {
|
||||||
|
switch action {
|
||||||
|
case "s3:GetBucketLocation", "s3:ListBucketMultipartUploads":
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newActions = append(newActions, action)
|
||||||
|
}
|
||||||
|
statement.Actions = newActions
|
||||||
|
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
|
||||||
|
var newActions []string
|
||||||
|
for _, action := range statement.Actions {
|
||||||
|
switch action {
|
||||||
|
case "s3:PutObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:DeleteObject":
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newActions = append(newActions, action)
|
||||||
|
}
|
||||||
|
statement.Actions = newActions
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(statement.Actions) != 0 {
|
||||||
|
newStatements = append(newStatements, statement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return newStatements
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removes read only bucket policy if found.
|
||||||
|
func removeBucketPolicyStatementReadOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
|
||||||
|
var newStatements []Statement
|
||||||
|
for _, statement := range statements {
|
||||||
|
for _, resource := range statement.Resources {
|
||||||
|
if resource == awsResourcePrefix+bucketName {
|
||||||
|
var newActions []string
|
||||||
|
for _, action := range statement.Actions {
|
||||||
|
switch action {
|
||||||
|
case "s3:GetBucketLocation", "s3:ListBucket":
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newActions = append(newActions, action)
|
||||||
|
}
|
||||||
|
statement.Actions = newActions
|
||||||
|
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
|
||||||
|
var newActions []string
|
||||||
|
for _, action := range statement.Actions {
|
||||||
|
if action == "s3:GetObject" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newActions = append(newActions, action)
|
||||||
|
}
|
||||||
|
statement.Actions = newActions
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(statement.Actions) != 0 {
|
||||||
|
newStatements = append(newStatements, statement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return newStatements
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove bucket policies based on the type.
|
||||||
|
func removeBucketPolicyStatement(statements []Statement, bucketName string, objectPrefix string) []Statement {
|
||||||
|
// Verify type of policy to be removed.
|
||||||
|
if isBucketPolicyReadWrite(statements, bucketName, objectPrefix) {
|
||||||
|
statements = removeBucketPolicyStatementReadWrite(statements, bucketName, objectPrefix)
|
||||||
|
} else if isBucketPolicyWriteOnly(statements, bucketName, objectPrefix) {
|
||||||
|
statements = removeBucketPolicyStatementWriteOnly(statements, bucketName, objectPrefix)
|
||||||
|
} else if isBucketPolicyReadOnly(statements, bucketName, objectPrefix) {
|
||||||
|
statements = removeBucketPolicyStatementReadOnly(statements, bucketName, objectPrefix)
|
||||||
|
}
|
||||||
|
return statements
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshals bucket policy byte array into a structured bucket access policy.
|
||||||
|
func unMarshalBucketPolicy(bucketPolicyBuf []byte) (BucketAccessPolicy, error) {
|
||||||
|
// Untyped lazy JSON struct.
|
||||||
|
type bucketAccessPolicyUntyped struct {
|
||||||
|
Version string
|
||||||
|
Statement []struct {
|
||||||
|
Sid string
|
||||||
|
Effect string
|
||||||
|
Principal struct {
|
||||||
|
AWS json.RawMessage
|
||||||
|
}
|
||||||
|
Action json.RawMessage
|
||||||
|
Resource json.RawMessage
|
||||||
|
Condition map[string]map[string]string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var policyUntyped = bucketAccessPolicyUntyped{}
|
||||||
|
// Unmarshal incoming policy into an untyped structure, to be
|
||||||
|
// evaluated lazily later.
|
||||||
|
err := json.Unmarshal(bucketPolicyBuf, &policyUntyped)
|
||||||
|
if err != nil {
|
||||||
|
return BucketAccessPolicy{}, err
|
||||||
|
}
|
||||||
|
var policy = BucketAccessPolicy{}
|
||||||
|
policy.Version = policyUntyped.Version
|
||||||
|
for _, stmtUntyped := range policyUntyped.Statement {
|
||||||
|
statement := Statement{}
|
||||||
|
// These are properly typed messages.
|
||||||
|
statement.Sid = stmtUntyped.Sid
|
||||||
|
statement.Effect = stmtUntyped.Effect
|
||||||
|
statement.Conditions = stmtUntyped.Condition
|
||||||
|
|
||||||
|
// AWS user can have two different types, either as []string
|
||||||
|
// and either as regular 'string'. We fall back to doing this
|
||||||
|
// since there is no other easier way to fix this.
|
||||||
|
err = json.Unmarshal(stmtUntyped.Principal.AWS, &statement.Principal.AWS)
|
||||||
|
if err != nil {
|
||||||
|
var awsUser string
|
||||||
|
err = json.Unmarshal(stmtUntyped.Principal.AWS, &awsUser)
|
||||||
|
if err != nil {
|
||||||
|
return BucketAccessPolicy{}, err
|
||||||
|
}
|
||||||
|
statement.Principal.AWS = []string{awsUser}
|
||||||
|
}
|
||||||
|
// Actions can have two different types, either as []string
|
||||||
|
// and either as regular 'string'. We fall back to doing this
|
||||||
|
// since there is no other easier way to fix this.
|
||||||
|
err = json.Unmarshal(stmtUntyped.Action, &statement.Actions)
|
||||||
|
if err != nil {
|
||||||
|
var action string
|
||||||
|
err = json.Unmarshal(stmtUntyped.Action, &action)
|
||||||
|
if err != nil {
|
||||||
|
return BucketAccessPolicy{}, err
|
||||||
|
}
|
||||||
|
statement.Actions = []string{action}
|
||||||
|
}
|
||||||
|
// Resources can have two different types, either as []string
|
||||||
|
// and either as regular 'string'. We fall back to doing this
|
||||||
|
// since there is no other easier way to fix this.
|
||||||
|
err = json.Unmarshal(stmtUntyped.Resource, &statement.Resources)
|
||||||
|
if err != nil {
|
||||||
|
var resource string
|
||||||
|
err = json.Unmarshal(stmtUntyped.Resource, &resource)
|
||||||
|
if err != nil {
|
||||||
|
return BucketAccessPolicy{}, err
|
||||||
|
}
|
||||||
|
statement.Resources = []string{resource}
|
||||||
|
}
|
||||||
|
// Append the typed policy.
|
||||||
|
policy.Statements = append(policy.Statements, statement)
|
||||||
|
}
|
||||||
|
return policy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Identifies the policy type from policy Statements.
|
||||||
|
func identifyPolicyType(policy BucketAccessPolicy, bucketName, objectPrefix string) (bucketPolicy BucketPolicy) {
|
||||||
|
if policy.Statements == nil {
|
||||||
|
return BucketPolicyNone
|
||||||
|
}
|
||||||
|
if isBucketPolicyReadWrite(policy.Statements, bucketName, objectPrefix) {
|
||||||
|
return BucketPolicyReadWrite
|
||||||
|
} else if isBucketPolicyWriteOnly(policy.Statements, bucketName, objectPrefix) {
|
||||||
|
return BucketPolicyWriteOnly
|
||||||
|
} else if isBucketPolicyReadOnly(policy.Statements, bucketName, objectPrefix) {
|
||||||
|
return BucketPolicyReadOnly
|
||||||
|
}
|
||||||
|
return BucketPolicyNone
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate policy statements for various bucket policies.
|
||||||
|
// refer to http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
|
||||||
|
// for more details about statement fields.
|
||||||
|
func generatePolicyStatement(bucketPolicy BucketPolicy, bucketName, objectPrefix string) ([]Statement, error) {
|
||||||
|
if !bucketPolicy.isValidBucketPolicy() {
|
||||||
|
return []Statement{}, ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
|
||||||
|
}
|
||||||
|
var statements []Statement
|
||||||
|
if bucketPolicy == BucketPolicyNone {
|
||||||
|
return []Statement{}, nil
|
||||||
|
} else if bucketPolicy == BucketPolicyReadWrite {
|
||||||
|
// Get read-write policy.
|
||||||
|
statements = setReadWriteStatement(bucketName, objectPrefix)
|
||||||
|
} else if bucketPolicy == BucketPolicyReadOnly {
|
||||||
|
// Get read only policy.
|
||||||
|
statements = setReadOnlyStatement(bucketName, objectPrefix)
|
||||||
|
} else if bucketPolicy == BucketPolicyWriteOnly {
|
||||||
|
// Return Write only policy.
|
||||||
|
statements = setWriteOnlyStatement(bucketName, objectPrefix)
|
||||||
|
}
|
||||||
|
return statements, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Obtain statements for read-write BucketPolicy.
|
||||||
|
func setReadWriteStatement(bucketName, objectPrefix string) []Statement {
|
||||||
|
bucketResourceStatement := Statement{}
|
||||||
|
objectResourceStatement := Statement{}
|
||||||
|
statements := []Statement{}
|
||||||
|
|
||||||
|
bucketResourceStatement.Effect = "Allow"
|
||||||
|
bucketResourceStatement.Principal.AWS = []string{"*"}
|
||||||
|
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
|
||||||
|
bucketResourceStatement.Actions = readWriteBucketActions
|
||||||
|
objectResourceStatement.Effect = "Allow"
|
||||||
|
objectResourceStatement.Principal.AWS = []string{"*"}
|
||||||
|
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
|
||||||
|
objectResourceStatement.Actions = readWriteObjectActions
|
||||||
|
// Save the read write policy.
|
||||||
|
statements = append(statements, bucketResourceStatement, objectResourceStatement)
|
||||||
|
return statements
|
||||||
|
}
|
||||||
|
|
||||||
|
// Obtain statements for read only BucketPolicy.
|
||||||
|
func setReadOnlyStatement(bucketName, objectPrefix string) []Statement {
|
||||||
|
bucketResourceStatement := Statement{}
|
||||||
|
objectResourceStatement := Statement{}
|
||||||
|
statements := []Statement{}
|
||||||
|
|
||||||
|
bucketResourceStatement.Effect = "Allow"
|
||||||
|
bucketResourceStatement.Principal.AWS = []string{"*"}
|
||||||
|
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
|
||||||
|
bucketResourceStatement.Actions = readOnlyBucketActions
|
||||||
|
objectResourceStatement.Effect = "Allow"
|
||||||
|
objectResourceStatement.Principal.AWS = []string{"*"}
|
||||||
|
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
|
||||||
|
objectResourceStatement.Actions = readOnlyObjectActions
|
||||||
|
// Save the read only policy.
|
||||||
|
statements = append(statements, bucketResourceStatement, objectResourceStatement)
|
||||||
|
return statements
|
||||||
|
}
|
||||||
|
|
||||||
|
// Obtain statements for write only BucketPolicy.
|
||||||
|
func setWriteOnlyStatement(bucketName, objectPrefix string) []Statement {
|
||||||
|
bucketResourceStatement := Statement{}
|
||||||
|
objectResourceStatement := Statement{}
|
||||||
|
statements := []Statement{}
|
||||||
|
// Write only policy.
|
||||||
|
bucketResourceStatement.Effect = "Allow"
|
||||||
|
bucketResourceStatement.Principal.AWS = []string{"*"}
|
||||||
|
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
|
||||||
|
bucketResourceStatement.Actions = writeOnlyBucketActions
|
||||||
|
objectResourceStatement.Effect = "Allow"
|
||||||
|
objectResourceStatement.Principal.AWS = []string{"*"}
|
||||||
|
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
|
||||||
|
objectResourceStatement.Actions = writeOnlyObjectActions
|
||||||
|
// Save the write only policy.
|
||||||
|
statements = append(statements, bucketResourceStatement, objectResourceStatement)
|
||||||
|
return statements
|
||||||
|
}
|
515
vendor/src/github.com/minio/minio-go/bucket-policy_test.go
vendored
Normal file
515
vendor/src/github.com/minio/minio-go/bucket-policy_test.go
vendored
Normal file
@ -0,0 +1,515 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Validates bucket policy string.
|
||||||
|
func TestIsValidBucketPolicy(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
inputPolicy BucketPolicy
|
||||||
|
expectedResult bool
|
||||||
|
}{
|
||||||
|
// valid inputs.
|
||||||
|
{BucketPolicy("none"), true},
|
||||||
|
{BucketPolicy("readonly"), true},
|
||||||
|
{BucketPolicy("readwrite"), true},
|
||||||
|
{BucketPolicy("writeonly"), true},
|
||||||
|
// invalid input.
|
||||||
|
{BucketPolicy("readwriteonly"), false},
|
||||||
|
{BucketPolicy("writeread"), false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualResult := testCase.inputPolicy.isValidBucketPolicy()
|
||||||
|
if testCase.expectedResult != actualResult {
|
||||||
|
t.Errorf("Test %d: Expected IsValidBucket policy to be '%v' for policy \"%s\", but instead found it to be '%v'", i+1, testCase.expectedResult, testCase.inputPolicy, actualResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests whether first array is completly contained in second array.
|
||||||
|
func TestSubsetActions(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
firstArray []string
|
||||||
|
secondArray []string
|
||||||
|
|
||||||
|
expectedResult bool
|
||||||
|
}{
|
||||||
|
{[]string{"aaa", "bbb"}, []string{"ccc", "bbb"}, false},
|
||||||
|
{[]string{"aaa", "bbb"}, []string{"aaa", "ccc"}, false},
|
||||||
|
{[]string{"aaa", "bbb"}, []string{"aaa", "bbb"}, true},
|
||||||
|
{[]string{"aaa", "bbb"}, []string{"aaa", "bbb", "ccc"}, true},
|
||||||
|
{[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb", "ccc"}, false},
|
||||||
|
{[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb", "bbb", "aaa"}, true},
|
||||||
|
{[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb"}, false},
|
||||||
|
{[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb", "aaa", "bbb", "ccc"}, true},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualResult := subsetActions(testCase.firstArray, testCase.secondArray)
|
||||||
|
if testCase.expectedResult != actualResult {
|
||||||
|
t.Errorf("Test %d: First array '%v' is not contained in second array '%v'", i+1, testCase.firstArray, testCase.secondArray)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate Bucket Policy type identifier.
|
||||||
|
func TestIdentifyPolicyType(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
inputPolicy BucketAccessPolicy
|
||||||
|
bucketName string
|
||||||
|
objName string
|
||||||
|
|
||||||
|
expectedPolicy BucketPolicy
|
||||||
|
}{
|
||||||
|
{BucketAccessPolicy{Version: "2012-10-17"}, "my-bucket", "", BucketPolicyNone},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualBucketPolicy := identifyPolicyType(testCase.inputPolicy, testCase.bucketName, testCase.objName)
|
||||||
|
if testCase.expectedPolicy != actualBucketPolicy {
|
||||||
|
t.Errorf("Test %d: Expected bucket policy to be '%v', but instead got '%v'", i+1, testCase.expectedPolicy, actualBucketPolicy)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test validate Resource Statement Generator.
|
||||||
|
func TestGeneratePolicyStatement(t *testing.T) {
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
bucketPolicy BucketPolicy
|
||||||
|
bucketName string
|
||||||
|
objectPrefix string
|
||||||
|
expectedStatements []Statement
|
||||||
|
|
||||||
|
shouldPass bool
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{BucketPolicy("my-policy"), "my-bucket", "", []Statement{}, false, ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", BucketPolicy("my-policy")))},
|
||||||
|
{BucketPolicyNone, "my-bucket", "", []Statement{}, true, nil},
|
||||||
|
{BucketPolicyReadOnly, "read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), true, nil},
|
||||||
|
{BucketPolicyWriteOnly, "write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), true, nil},
|
||||||
|
{BucketPolicyReadWrite, "read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true, nil},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualStatements, err := generatePolicyStatement(testCase.bucketPolicy, testCase.bucketName, testCase.objectPrefix)
|
||||||
|
|
||||||
|
if err != nil && testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil && !testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||||
|
}
|
||||||
|
// Failed as expected, but does it fail for the expected reason.
|
||||||
|
if err != nil && !testCase.shouldPass {
|
||||||
|
if err.Error() != testCase.err.Error() {
|
||||||
|
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Test passes as expected, but the output values are verified for correctness here.
|
||||||
|
if err == nil && testCase.shouldPass {
|
||||||
|
if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
|
||||||
|
t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validating read only statement generator.
|
||||||
|
func TestsetReadOnlyStatement(t *testing.T) {
|
||||||
|
|
||||||
|
expectedReadOnlyStatement := func(bucketName, objectPrefix string) []Statement {
|
||||||
|
bucketResourceStatement := &Statement{}
|
||||||
|
objectResourceStatement := &Statement{}
|
||||||
|
statements := []Statement{}
|
||||||
|
|
||||||
|
bucketResourceStatement.Effect = "Allow"
|
||||||
|
bucketResourceStatement.Principal.AWS = []string{"*"}
|
||||||
|
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
|
||||||
|
bucketResourceStatement.Actions = readOnlyBucketActions
|
||||||
|
objectResourceStatement.Effect = "Allow"
|
||||||
|
objectResourceStatement.Principal.AWS = []string{"*"}
|
||||||
|
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
|
||||||
|
objectResourceStatement.Actions = readOnlyObjectActions
|
||||||
|
// Save the read only policy.
|
||||||
|
statements = append(statements, *bucketResourceStatement, *objectResourceStatement)
|
||||||
|
return statements
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
// inputs.
|
||||||
|
bucketName string
|
||||||
|
objectPrefix string
|
||||||
|
// expected result.
|
||||||
|
expectedStatements []Statement
|
||||||
|
}{
|
||||||
|
{"my-bucket", "", expectedReadOnlyStatement("my-bucket", "")},
|
||||||
|
{"my-bucket", "Asia/", expectedReadOnlyStatement("my-bucket", "Asia/")},
|
||||||
|
{"my-bucket", "Asia/India", expectedReadOnlyStatement("my-bucket", "Asia/India")},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualStaments := setReadOnlyStatement(testCase.bucketName, testCase.objectPrefix)
|
||||||
|
if !reflect.DeepEqual(testCase.expectedStatements, actualStaments) {
|
||||||
|
t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validating write only statement generator.
|
||||||
|
func TestsetWriteOnlyStatement(t *testing.T) {
|
||||||
|
|
||||||
|
expectedWriteOnlyStatement := func(bucketName, objectPrefix string) []Statement {
|
||||||
|
bucketResourceStatement := &Statement{}
|
||||||
|
objectResourceStatement := &Statement{}
|
||||||
|
statements := []Statement{}
|
||||||
|
// Write only policy.
|
||||||
|
bucketResourceStatement.Effect = "Allow"
|
||||||
|
bucketResourceStatement.Principal.AWS = []string{"*"}
|
||||||
|
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
|
||||||
|
bucketResourceStatement.Actions = writeOnlyBucketActions
|
||||||
|
objectResourceStatement.Effect = "Allow"
|
||||||
|
objectResourceStatement.Principal.AWS = []string{"*"}
|
||||||
|
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
|
||||||
|
objectResourceStatement.Actions = writeOnlyObjectActions
|
||||||
|
// Save the write only policy.
|
||||||
|
statements = append(statements, *bucketResourceStatement, *objectResourceStatement)
|
||||||
|
return statements
|
||||||
|
}
|
||||||
|
testCases := []struct {
|
||||||
|
// inputs.
|
||||||
|
bucketName string
|
||||||
|
objectPrefix string
|
||||||
|
// expected result.
|
||||||
|
expectedStatements []Statement
|
||||||
|
}{
|
||||||
|
{"my-bucket", "", expectedWriteOnlyStatement("my-bucket", "")},
|
||||||
|
{"my-bucket", "Asia/", expectedWriteOnlyStatement("my-bucket", "Asia/")},
|
||||||
|
{"my-bucket", "Asia/India", expectedWriteOnlyStatement("my-bucket", "Asia/India")},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualStaments := setWriteOnlyStatement(testCase.bucketName, testCase.objectPrefix)
|
||||||
|
if !reflect.DeepEqual(testCase.expectedStatements, actualStaments) {
|
||||||
|
t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validating read-write statement generator.
|
||||||
|
func TestsetReadWriteStatement(t *testing.T) {
|
||||||
|
// Obtain statements for read-write BucketPolicy.
|
||||||
|
expectedReadWriteStatement := func(bucketName, objectPrefix string) []Statement {
|
||||||
|
bucketResourceStatement := &Statement{}
|
||||||
|
objectResourceStatement := &Statement{}
|
||||||
|
statements := []Statement{}
|
||||||
|
|
||||||
|
bucketResourceStatement.Effect = "Allow"
|
||||||
|
bucketResourceStatement.Principal.AWS = []string{"*"}
|
||||||
|
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
|
||||||
|
bucketResourceStatement.Actions = readWriteBucketActions
|
||||||
|
objectResourceStatement.Effect = "Allow"
|
||||||
|
objectResourceStatement.Principal.AWS = []string{"*"}
|
||||||
|
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
|
||||||
|
objectResourceStatement.Actions = readWriteObjectActions
|
||||||
|
// Save the read write policy.
|
||||||
|
statements = append(statements, *bucketResourceStatement, *objectResourceStatement)
|
||||||
|
return statements
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
// inputs.
|
||||||
|
bucketName string
|
||||||
|
objectPrefix string
|
||||||
|
// expected result.
|
||||||
|
expectedStatements []Statement
|
||||||
|
}{
|
||||||
|
{"my-bucket", "", expectedReadWriteStatement("my-bucket", "")},
|
||||||
|
{"my-bucket", "Asia/", expectedReadWriteStatement("my-bucket", "Asia/")},
|
||||||
|
{"my-bucket", "Asia/India", expectedReadWriteStatement("my-bucket", "Asia/India")},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualStaments := setReadWriteStatement(testCase.bucketName, testCase.objectPrefix)
|
||||||
|
if !reflect.DeepEqual(testCase.expectedStatements, actualStaments) {
|
||||||
|
t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate Unmarshalling of BucketAccessPolicy.
|
||||||
|
func TestUnMarshalBucketPolicy(t *testing.T) {
|
||||||
|
|
||||||
|
bucketAccesPolicies := []BucketAccessPolicy{
|
||||||
|
{Version: "1.0"},
|
||||||
|
{Version: "1.0", Statements: setReadOnlyStatement("minio-bucket", "")},
|
||||||
|
{Version: "1.0", Statements: setReadWriteStatement("minio-bucket", "Asia/")},
|
||||||
|
{Version: "1.0", Statements: setWriteOnlyStatement("minio-bucket", "Asia/India/")},
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
inputPolicy BucketAccessPolicy
|
||||||
|
// expected results.
|
||||||
|
expectedPolicy BucketAccessPolicy
|
||||||
|
err error
|
||||||
|
// Flag indicating whether the test should pass.
|
||||||
|
shouldPass bool
|
||||||
|
}{
|
||||||
|
{bucketAccesPolicies[0], bucketAccesPolicies[0], nil, true},
|
||||||
|
{bucketAccesPolicies[1], bucketAccesPolicies[1], nil, true},
|
||||||
|
{bucketAccesPolicies[2], bucketAccesPolicies[2], nil, true},
|
||||||
|
{bucketAccesPolicies[3], bucketAccesPolicies[3], nil, true},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
inputPolicyBytes, e := json.Marshal(testCase.inputPolicy)
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Test %d: Couldn't Marshal bucket policy", i+1)
|
||||||
|
}
|
||||||
|
actualAccessPolicy, err := unMarshalBucketPolicy(inputPolicyBytes)
|
||||||
|
if err != nil && testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil && !testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||||
|
}
|
||||||
|
// Failed as expected, but does it fail for the expected reason.
|
||||||
|
if err != nil && !testCase.shouldPass {
|
||||||
|
if err.Error() != testCase.err.Error() {
|
||||||
|
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Test passes as expected, but the output values are verified for correctness here.
|
||||||
|
if err == nil && testCase.shouldPass {
|
||||||
|
if !reflect.DeepEqual(testCase.expectedPolicy, actualAccessPolicy) {
|
||||||
|
t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Statement.Action, Statement.Resource, Statement.Principal.AWS fields could be just string also.
|
||||||
|
// Setting these values to just a string and testing the unMarshalBucketPolicy
|
||||||
|
func TestUnMarshalBucketPolicyUntyped(t *testing.T) {
|
||||||
|
obtainRaw := func(v interface{}, t *testing.T) []byte {
|
||||||
|
rawData, e := json.Marshal(v)
|
||||||
|
if e != nil {
|
||||||
|
t.Fatal(e.Error())
|
||||||
|
}
|
||||||
|
return rawData
|
||||||
|
}
|
||||||
|
|
||||||
|
type untypedStatement struct {
|
||||||
|
Sid string
|
||||||
|
Effect string
|
||||||
|
Principal struct {
|
||||||
|
AWS json.RawMessage
|
||||||
|
}
|
||||||
|
Action json.RawMessage
|
||||||
|
Resource json.RawMessage
|
||||||
|
Condition map[string]map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
type bucketAccessPolicyUntyped struct {
|
||||||
|
Version string
|
||||||
|
Statement []untypedStatement
|
||||||
|
}
|
||||||
|
|
||||||
|
statements := setReadOnlyStatement("my-bucket", "Asia/")
|
||||||
|
expectedBucketPolicy := BucketAccessPolicy{Statements: statements}
|
||||||
|
accessPolicyUntyped := bucketAccessPolicyUntyped{}
|
||||||
|
accessPolicyUntyped.Statement = make([]untypedStatement, 2)
|
||||||
|
|
||||||
|
accessPolicyUntyped.Statement[0].Effect = statements[0].Effect
|
||||||
|
accessPolicyUntyped.Statement[0].Principal.AWS = obtainRaw(statements[0].Principal.AWS, t)
|
||||||
|
accessPolicyUntyped.Statement[0].Action = obtainRaw(statements[0].Actions, t)
|
||||||
|
accessPolicyUntyped.Statement[0].Resource = obtainRaw(statements[0].Resources, t)
|
||||||
|
|
||||||
|
// Setting the values are strings.
|
||||||
|
accessPolicyUntyped.Statement[1].Effect = statements[1].Effect
|
||||||
|
accessPolicyUntyped.Statement[1].Principal.AWS = obtainRaw(statements[1].Principal.AWS[0], t)
|
||||||
|
accessPolicyUntyped.Statement[1].Action = obtainRaw(statements[1].Actions[0], t)
|
||||||
|
accessPolicyUntyped.Statement[1].Resource = obtainRaw(statements[1].Resources[0], t)
|
||||||
|
|
||||||
|
inputPolicyBytes := obtainRaw(accessPolicyUntyped, t)
|
||||||
|
actualAccessPolicy, err := unMarshalBucketPolicy(inputPolicyBytes)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unmarshalling bucket policy from untyped statements failed")
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(expectedBucketPolicy, actualAccessPolicy) {
|
||||||
|
t.Errorf("Expected BucketPolicy after unmarshalling untyped statements doesn't match the actual one")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate removal of policy statement from the list of statements.
|
||||||
|
func TestRemoveBucketPolicyStatement(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
bucketName string
|
||||||
|
objectPrefix string
|
||||||
|
inputStatements []Statement
|
||||||
|
}{
|
||||||
|
{"my-bucket", "", []Statement{}},
|
||||||
|
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", "")},
|
||||||
|
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", "")},
|
||||||
|
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", "")},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualStatements := removeBucketPolicyStatement(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
|
||||||
|
// empty statement is expected after the invocation of removeBucketPolicyStatement().
|
||||||
|
if len(actualStatements) != 0 {
|
||||||
|
t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate removing of read only bucket statement.
|
||||||
|
func TestRemoveBucketPolicyStatementReadOnly(t *testing.T) {
|
||||||
|
var emptyStatement []Statement
|
||||||
|
testCases := []struct {
|
||||||
|
bucketName string
|
||||||
|
objectPrefix string
|
||||||
|
inputStatements []Statement
|
||||||
|
expectedStatements []Statement
|
||||||
|
}{
|
||||||
|
{"my-bucket", "", []Statement{}, emptyStatement},
|
||||||
|
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), emptyStatement},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualStatements := removeBucketPolicyStatementReadOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
|
||||||
|
// empty statement is expected after the invocation of removeBucketPolicyStatement().
|
||||||
|
if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
|
||||||
|
t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate removing of write only bucket statement.
|
||||||
|
func TestRemoveBucketPolicyStatementWriteOnly(t *testing.T) {
|
||||||
|
var emptyStatement []Statement
|
||||||
|
testCases := []struct {
|
||||||
|
bucketName string
|
||||||
|
objectPrefix string
|
||||||
|
inputStatements []Statement
|
||||||
|
expectedStatements []Statement
|
||||||
|
}{
|
||||||
|
{"my-bucket", "", []Statement{}, emptyStatement},
|
||||||
|
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), emptyStatement},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualStatements := removeBucketPolicyStatementWriteOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
|
||||||
|
// empty statement is expected after the invocation of removeBucketPolicyStatement().
|
||||||
|
if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
|
||||||
|
t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate removing of read-write bucket statement.
|
||||||
|
func TestRemoveBucketPolicyStatementReadWrite(t *testing.T) {
|
||||||
|
var emptyStatement []Statement
|
||||||
|
testCases := []struct {
|
||||||
|
bucketName string
|
||||||
|
objectPrefix string
|
||||||
|
inputStatements []Statement
|
||||||
|
expectedStatements []Statement
|
||||||
|
}{
|
||||||
|
{"my-bucket", "", []Statement{}, emptyStatement},
|
||||||
|
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), emptyStatement},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualStatements := removeBucketPolicyStatementReadWrite(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
|
||||||
|
// empty statement is expected after the invocation of removeBucketPolicyStatement().
|
||||||
|
if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
|
||||||
|
t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate whether the bucket policy is read only.
|
||||||
|
func TestIsBucketPolicyReadOnly(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
bucketName string
|
||||||
|
objectPrefix string
|
||||||
|
inputStatements []Statement
|
||||||
|
// expected result.
|
||||||
|
expectedResult bool
|
||||||
|
}{
|
||||||
|
{"my-bucket", "", []Statement{}, false},
|
||||||
|
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), true},
|
||||||
|
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), false},
|
||||||
|
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualResult := isBucketPolicyReadOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
|
||||||
|
// empty statement is expected after the invocation of removeBucketPolicyStatement().
|
||||||
|
if testCase.expectedResult != actualResult {
|
||||||
|
t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate whether the bucket policy is read-write.
|
||||||
|
func TestIsBucketPolicyReadWrite(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
bucketName string
|
||||||
|
objectPrefix string
|
||||||
|
inputStatements []Statement
|
||||||
|
// expected result.
|
||||||
|
expectedResult bool
|
||||||
|
}{
|
||||||
|
{"my-bucket", "", []Statement{}, false},
|
||||||
|
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), false},
|
||||||
|
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), false},
|
||||||
|
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualResult := isBucketPolicyReadWrite(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
|
||||||
|
// empty statement is expected after the invocation of removeBucketPolicyStatement().
|
||||||
|
if testCase.expectedResult != actualResult {
|
||||||
|
t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate whether the bucket policy is read only.
|
||||||
|
func TestIsBucketPolicyWriteOnly(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
bucketName string
|
||||||
|
objectPrefix string
|
||||||
|
inputStatements []Statement
|
||||||
|
// expected result.
|
||||||
|
expectedResult bool
|
||||||
|
}{
|
||||||
|
{"my-bucket", "", []Statement{}, false},
|
||||||
|
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), false},
|
||||||
|
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), true},
|
||||||
|
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
actualResult := isBucketPolicyWriteOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
|
||||||
|
// empty statement is expected after the invocation of removeBucketPolicyStatement().
|
||||||
|
if testCase.expectedResult != actualResult {
|
||||||
|
t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
97
vendor/src/github.com/minio/minio-go/copy-conditions.go
vendored
Normal file
97
vendor/src/github.com/minio/minio-go/copy-conditions.go
vendored
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// copyCondition explanation:
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// copyCondition {
|
||||||
|
// key: "x-amz-copy-if-modified-since",
|
||||||
|
// value: "Tue, 15 Nov 1994 12:45:26 GMT",
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
type copyCondition struct {
|
||||||
|
key string
|
||||||
|
value string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyConditions - copy conditions.
|
||||||
|
type CopyConditions struct {
|
||||||
|
conditions []copyCondition
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCopyConditions - Instantiate new list of conditions.
|
||||||
|
func NewCopyConditions() CopyConditions {
|
||||||
|
return CopyConditions{
|
||||||
|
conditions: make([]copyCondition, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMatchETag - set match etag.
|
||||||
|
func (c CopyConditions) SetMatchETag(etag string) error {
|
||||||
|
if etag == "" {
|
||||||
|
return ErrInvalidArgument("ETag cannot be empty.")
|
||||||
|
}
|
||||||
|
c.conditions = append(c.conditions, copyCondition{
|
||||||
|
key: "x-amz-copy-source-if-match",
|
||||||
|
value: etag,
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMatchETagExcept - set match etag except.
|
||||||
|
func (c CopyConditions) SetMatchETagExcept(etag string) error {
|
||||||
|
if etag == "" {
|
||||||
|
return ErrInvalidArgument("ETag cannot be empty.")
|
||||||
|
}
|
||||||
|
c.conditions = append(c.conditions, copyCondition{
|
||||||
|
key: "x-amz-copy-source-if-none-match",
|
||||||
|
value: etag,
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUnmodified - set unmodified time since.
|
||||||
|
func (c CopyConditions) SetUnmodified(modTime time.Time) error {
|
||||||
|
if modTime.IsZero() {
|
||||||
|
return ErrInvalidArgument("Modified since cannot be empty.")
|
||||||
|
}
|
||||||
|
c.conditions = append(c.conditions, copyCondition{
|
||||||
|
key: "x-amz-copy-source-if-unmodified-since",
|
||||||
|
value: modTime.Format(http.TimeFormat),
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetModified - set modified time since.
|
||||||
|
func (c CopyConditions) SetModified(modTime time.Time) error {
|
||||||
|
if modTime.IsZero() {
|
||||||
|
return ErrInvalidArgument("Modified since cannot be empty.")
|
||||||
|
}
|
||||||
|
c.conditions = append(c.conditions, copyCondition{
|
||||||
|
key: "x-amz-copy-source-if-modified-since",
|
||||||
|
value: modTime.Format(http.TimeFormat),
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
@ -1,46 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname is a dummy value, please replace them with original value.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = s3Client.BucketExists("my-bucketname")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Println("Success")
|
|
||||||
}
|
|
@ -1,44 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname, my-objectname and my-filename.csv are dummy values, please replace them with original values.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s3Client.FGetObject("bucket-name", "objectName", "fileName.csv"); err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
log.Println("Successfully saved fileName.csv")
|
|
||||||
}
|
|
@ -1,44 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname, my-objectname and my-filename.csv are dummy values, please replace them with original values.
|
|
||||||
|
|
||||||
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
|
|
||||||
// This boolean value is the last argument for New().
|
|
||||||
|
|
||||||
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
|
|
||||||
// determined based on the Endpoint value.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", "application/csv"); err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
log.Println("Successfully uploaded my-filename.csv")
|
|
||||||
}
|
|
@ -1,46 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname is a dummy value, please replace them with original value.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
acl, err := s3Client.GetBucketACL("my-bucketname")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
log.Println(acl)
|
|
||||||
|
|
||||||
}
|
|
@ -1,45 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
buckets, err := s3Client.ListBuckets()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
for _, bucket := range buckets {
|
|
||||||
log.Println(bucket)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,56 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname and my-prefixname are dummy values, please replace them with original values.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a done channel to control 'ListObjects' go routine.
|
|
||||||
doneCh := make(chan struct{})
|
|
||||||
|
|
||||||
// Indicate to our routine to exit cleanly upon return.
|
|
||||||
defer close(doneCh)
|
|
||||||
|
|
||||||
// List all multipart uploads from a bucket-name with a matching prefix.
|
|
||||||
for multipartObject := range s3Client.ListIncompleteUploads("my-bucketname", "my-prefixname", true, doneCh) {
|
|
||||||
if multipartObject.Err != nil {
|
|
||||||
fmt.Println(multipartObject.Err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fmt.Println(multipartObject)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,56 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname and my-prefixname are dummy values, please replace them with original values.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a done channel to control 'ListObjects' go routine.
|
|
||||||
doneCh := make(chan struct{})
|
|
||||||
|
|
||||||
// Indicate to our routine to exit cleanly upon return.
|
|
||||||
defer close(doneCh)
|
|
||||||
|
|
||||||
// List all objects from a bucket-name with a matching prefix.
|
|
||||||
for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) {
|
|
||||||
if object.Err != nil {
|
|
||||||
fmt.Println(object.Err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fmt.Println(object)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,45 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname is a dummy value, please replace them with original value.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = s3Client.MakeBucket("my-bucketname", minio.BucketACL("private"), "us-east-1")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
log.Println("Success")
|
|
||||||
}
|
|
@ -1,46 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
log.Println(presignedURL)
|
|
||||||
}
|
|
@ -1,56 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
policy := minio.NewPostPolicy()
|
|
||||||
policy.SetBucket("my-bucketname")
|
|
||||||
policy.SetKey("my-objectname")
|
|
||||||
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
|
|
||||||
m, err := s3Client.PresignedPostPolicy(policy)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
fmt.Printf("curl ")
|
|
||||||
for k, v := range m {
|
|
||||||
fmt.Printf("-F %s=%s ", k, v)
|
|
||||||
}
|
|
||||||
fmt.Printf("-F file=@/etc/bashrc ")
|
|
||||||
fmt.Printf("https://play.minio.io:9002/my-bucketname\n")
|
|
||||||
}
|
|
@ -1,46 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
presignedURL, err := s3Client.PresignedPutObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
log.Println(presignedURL)
|
|
||||||
}
|
|
@ -1,52 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
object, err := os.Open("my-testfile")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
defer object.Close()
|
|
||||||
|
|
||||||
n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
|
|
||||||
}
|
|
@ -1,46 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname is a dummy value, please replace them with original value.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
// This operation will only work if your bucket is empty.
|
|
||||||
err = s3Client.RemoveBucket("my-bucketname")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
log.Println("Success")
|
|
||||||
|
|
||||||
}
|
|
@ -1,46 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for err := range s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname") {
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Println("Success")
|
|
||||||
}
|
|
@ -1,44 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
err = s3Client.RemoveObject("my-bucketname", "my-objectname")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
log.Println("Success")
|
|
||||||
}
|
|
@ -1,45 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname is a dummy value, please replace them with original value.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = s3Client.SetBucketACL("my-bucketname", minio.BucketACL("public-read-write"))
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,44 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
|
|
||||||
|
|
||||||
// Requests are always secure by default. set inSecure=true to enable insecure access.
|
|
||||||
// inSecure boolean is the last argument for New().
|
|
||||||
|
|
||||||
// New provides a client object backend by automatically detected signature type based
|
|
||||||
// on the provider.
|
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
stat, err := s3Client.StatObject("my-bucketname", "my-objectname")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
log.Println(stat)
|
|
||||||
}
|
|
67
vendor/src/github.com/minio/minio-go/examples/s3/copyobject.go
vendored
Normal file
67
vendor/src/github.com/minio/minio-go/examples/s3/copyobject.go
vendored
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
// +build ignore
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
|
||||||
|
// my-objectname are dummy values, please replace them with original values.
|
||||||
|
|
||||||
|
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
|
||||||
|
// This boolean value is the last argument for New().
|
||||||
|
|
||||||
|
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
|
||||||
|
// determined based on the Endpoint value.
|
||||||
|
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable trace.
|
||||||
|
// s3Client.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// All following conditions are allowed and can be combined together.
|
||||||
|
|
||||||
|
// Set copy conditions.
|
||||||
|
var copyConds = minio.NewCopyConditions()
|
||||||
|
// Set modified condition, copy object modified since 2014 April.
|
||||||
|
copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
|
||||||
|
|
||||||
|
// Set unmodified condition, copy object unmodified since 2014 April.
|
||||||
|
// copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
|
||||||
|
|
||||||
|
// Set matching ETag condition, copy object which matches the following ETag.
|
||||||
|
// copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
|
||||||
|
|
||||||
|
// Set matching ETag except condition, copy object which does not match the following ETag.
|
||||||
|
// copyConds.SetMatchETagExcept("31624deb84149d2f8ef9c385918b653a")
|
||||||
|
|
||||||
|
// Initiate copy object.
|
||||||
|
err = s3Client.CopyObject("my-bucketname", "my-objectname", "/my-sourcebucketname/my-sourceobjectname", copyConds)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
log.Println("Copied source object /my-sourcebucketname/my-sourceobjectname to destination /my-bucketname/my-objectname Successfully.")
|
||||||
|
}
|
@ -1,7 +1,7 @@
|
|||||||
// +build ignore
|
// +build ignore
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -19,44 +19,35 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"log"
|
"log"
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
"github.com/minio/minio-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
// Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values.
|
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
|
||||||
|
// dummy values, please replace them with original values.
|
||||||
|
|
||||||
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
|
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
|
||||||
// This boolean value is the last argument for New().
|
// This boolean value is the last argument for New().
|
||||||
|
|
||||||
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
|
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
|
||||||
// determined based on the Endpoint value.
|
// determined based on the Endpoint value.
|
||||||
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
|
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
|
// s3Client.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
policy, err := s3Client.GetBucketPolicy("my-bucketname", "my-objectprefix")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
defer reader.Close()
|
// Description of policy output.
|
||||||
|
// "none" - The specified bucket does not have a bucket policy.
|
||||||
localFile, err := os.Create("my-testfile")
|
// "readonly" - Read only operatoins are allowed.
|
||||||
if err != nil {
|
// "writeonly" - Write only operations are allowed.
|
||||||
log.Fatalln(err)
|
// "readwrite" - both read and write operations are allowed, the bucket is public.
|
||||||
}
|
log.Println("Success - ", policy)
|
||||||
defer localfile.Close()
|
|
||||||
|
|
||||||
stat, err := reader.Stat()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.CopyN(localFile, reader, stat.Size); err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
@ -50,7 +50,7 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
defer localfile.Close()
|
defer localFile.Close()
|
||||||
|
|
||||||
stat, err := reader.Stat()
|
stat, err := reader.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
76
vendor/src/github.com/minio/minio-go/examples/s3/listobjects-N.go
vendored
Normal file
76
vendor/src/github.com/minio/minio-go/examples/s3/listobjects-N.go
vendored
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
// +build ignore
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
|
||||||
|
// are dummy values, please replace them with original values.
|
||||||
|
|
||||||
|
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
|
||||||
|
// This boolean value is the last argument for New().
|
||||||
|
|
||||||
|
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
|
||||||
|
// determined based on the Endpoint value.
|
||||||
|
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// List 'N' number of objects from a bucket-name with a matching prefix.
|
||||||
|
listObjectsN := func(bucket, prefix string, recursive bool, N int) (objsInfo []minio.ObjectInfo, err error) {
|
||||||
|
// Create a done channel to control 'ListObjects' go routine.
|
||||||
|
doneCh := make(chan struct{}, 1)
|
||||||
|
|
||||||
|
// Free the channel upon return.
|
||||||
|
defer close(doneCh)
|
||||||
|
|
||||||
|
i := 1
|
||||||
|
for object := range s3Client.ListObjects(bucket, prefix, recursive, doneCh) {
|
||||||
|
if object.Err != nil {
|
||||||
|
return nil, object.Err
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
// Verify if we have printed N objects.
|
||||||
|
if i == N {
|
||||||
|
// Indicate ListObjects go-routine to exit and stop
|
||||||
|
// feeding the objectInfo channel.
|
||||||
|
doneCh <- struct{}{}
|
||||||
|
}
|
||||||
|
objsInfo = append(objsInfo, object)
|
||||||
|
}
|
||||||
|
return objsInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List recursively first 100 entries for prefix 'my-prefixname'.
|
||||||
|
recursive := true
|
||||||
|
objsInfo, err := listObjectsN("my-bucketname", "my-prefixname", recursive, 100)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print all the entries.
|
||||||
|
fmt.Println(objsInfo)
|
||||||
|
}
|
@ -38,7 +38,7 @@ func main() {
|
|||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s3Client.MakeBucket("my-bucketname", minio.BucketACL("private"), "us-east-1")
|
err = s3Client.MakeBucket("my-bucketname", "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
||||||
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
"github.com/minio/minio-go"
|
||||||
@ -39,7 +40,12 @@ func main() {
|
|||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
|
// Set request parameters
|
||||||
|
reqParams := make(url.Values)
|
||||||
|
reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
|
||||||
|
|
||||||
|
// Gernerate presigned get object url.
|
||||||
|
presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second, reqParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
// +build ignore
|
// +build ignore
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -38,10 +38,9 @@ func main() {
|
|||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
acl, err := s3Client.GetBucketACL("my-bucketname")
|
err = s3Client.RemoveBucketPolicy("my-bucketname", "my-objectprefix")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
log.Println(acl)
|
log.Println("Success")
|
||||||
|
|
||||||
}
|
}
|
@ -38,10 +38,9 @@ func main() {
|
|||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for err := range s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname") {
|
err = s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
log.Println("Success")
|
log.Println("Success")
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
// +build ignore
|
// +build ignore
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -25,8 +25,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname
|
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
|
||||||
// are dummy values, please replace them with original values.
|
// dummy values, please replace them with original values.
|
||||||
|
|
||||||
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
|
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
|
||||||
// This boolean value is the last argument for New().
|
// This boolean value is the last argument for New().
|
||||||
@ -38,9 +38,9 @@ func main() {
|
|||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s3Client.SetBucketACL("my-bucketname", minio.BucketACL("public-read-write"))
|
err = s3Client.SetBucketPolicy("my-bucketname", "my-objectprefix", minio.BucketPolicyReadWrite)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
log.Println("Success")
|
||||||
}
|
}
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -27,6 +27,22 @@ type hookReader struct {
|
|||||||
hook io.Reader
|
hook io.Reader
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Seek implements io.Seeker. Seeks source first, and if necessary
|
||||||
|
// seeks hook if Seek method is appropriately found.
|
||||||
|
func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
|
||||||
|
// Verify for source has embedded Seeker, use it.
|
||||||
|
sourceSeeker, ok := hr.source.(io.Seeker)
|
||||||
|
if ok {
|
||||||
|
return sourceSeeker.Seek(offset, whence)
|
||||||
|
}
|
||||||
|
// Verify if hook has embedded Seeker, use it.
|
||||||
|
hookSeeker, ok := hr.hook.(io.Seeker)
|
||||||
|
if ok {
|
||||||
|
return hookSeeker.Seek(offset, whence)
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Read implements io.Reader. Always reads from the source, the return
|
// Read implements io.Reader. Always reads from the source, the return
|
||||||
// value 'n' number of bytes are reported through the hook. Returns
|
// value 'n' number of bytes are reported through the hook. Returns
|
||||||
// error for all non io.EOF conditions.
|
// error for all non io.EOF conditions.
|
||||||
@ -44,7 +60,7 @@ func (hr *hookReader) Read(b []byte) (n int, err error) {
|
|||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHook returns a io.Reader which implements hookReader that
|
// newHook returns a io.ReadSeeker which implements hookReader that
|
||||||
// reports the data read from the source to the hook.
|
// reports the data read from the source to the hook.
|
||||||
func newHook(source, hook io.Reader) io.Reader {
|
func newHook(source, hook io.Reader) io.Reader {
|
||||||
if hook == nil {
|
if hook == nil {
|
||||||
|
@ -73,6 +73,11 @@ func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
|
|||||||
|
|
||||||
// Get encoded URL path.
|
// Get encoded URL path.
|
||||||
path := encodeURL2Path(req.URL)
|
path := encodeURL2Path(req.URL)
|
||||||
|
if len(req.URL.Query()) > 0 {
|
||||||
|
// Keep the usual queries unescaped for string to sign.
|
||||||
|
query, _ := url.QueryUnescape(queryEncode(req.URL.Query()))
|
||||||
|
path = path + "?" + query
|
||||||
|
}
|
||||||
|
|
||||||
// Find epoch expires when the request will expire.
|
// Find epoch expires when the request will expire.
|
||||||
epochExpires := d.Unix() + expires
|
epochExpires := d.Unix() + expires
|
||||||
@ -93,12 +98,16 @@ func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
|
|||||||
query.Set("AWSAccessKeyId", accessKeyID)
|
query.Set("AWSAccessKeyId", accessKeyID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fill in Expires and Signature for presigned query.
|
// Fill in Expires for presigned query.
|
||||||
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
|
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
|
||||||
query.Set("Signature", signature)
|
|
||||||
|
|
||||||
// Encode query and save.
|
// Encode query and save.
|
||||||
req.URL.RawQuery = query.Encode()
|
req.URL.RawQuery = queryEncode(query)
|
||||||
|
|
||||||
|
// Save signature finally.
|
||||||
|
req.URL.RawQuery += "&Signature=" + urlEncodePath(signature)
|
||||||
|
|
||||||
|
// Return.
|
||||||
return &req
|
return &req
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,7 +124,7 @@ func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
|
|||||||
// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
|
// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
|
||||||
//
|
//
|
||||||
// StringToSign = HTTP-Verb + "\n" +
|
// StringToSign = HTTP-Verb + "\n" +
|
||||||
// Content-MD5 + "\n" +
|
// Content-Md5 + "\n" +
|
||||||
// Content-Type + "\n" +
|
// Content-Type + "\n" +
|
||||||
// Date + "\n" +
|
// Date + "\n" +
|
||||||
// CanonicalizedProtocolHeaders +
|
// CanonicalizedProtocolHeaders +
|
||||||
@ -163,7 +172,7 @@ func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
|
|||||||
// From the Amazon docs:
|
// From the Amazon docs:
|
||||||
//
|
//
|
||||||
// StringToSign = HTTP-Verb + "\n" +
|
// StringToSign = HTTP-Verb + "\n" +
|
||||||
// Content-MD5 + "\n" +
|
// Content-Md5 + "\n" +
|
||||||
// Content-Type + "\n" +
|
// Content-Type + "\n" +
|
||||||
// Date + "\n" +
|
// Date + "\n" +
|
||||||
// CanonicalizedProtocolHeaders +
|
// CanonicalizedProtocolHeaders +
|
||||||
@ -183,7 +192,7 @@ func getStringToSignV2(req http.Request) string {
|
|||||||
func writeDefaultHeaders(buf *bytes.Buffer, req http.Request) {
|
func writeDefaultHeaders(buf *bytes.Buffer, req http.Request) {
|
||||||
buf.WriteString(req.Method)
|
buf.WriteString(req.Method)
|
||||||
buf.WriteByte('\n')
|
buf.WriteByte('\n')
|
||||||
buf.WriteString(req.Header.Get("Content-MD5"))
|
buf.WriteString(req.Header.Get("Content-Md5"))
|
||||||
buf.WriteByte('\n')
|
buf.WriteByte('\n')
|
||||||
buf.WriteString(req.Header.Get("Content-Type"))
|
buf.WriteString(req.Header.Get("Content-Type"))
|
||||||
buf.WriteByte('\n')
|
buf.WriteByte('\n')
|
||||||
@ -226,7 +235,8 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Must be sorted:
|
// The following list is already sorted and should always be, otherwise we could
|
||||||
|
// have signature-related issues
|
||||||
var resourceList = []string{
|
var resourceList = []string{
|
||||||
"acl",
|
"acl",
|
||||||
"location",
|
"location",
|
||||||
@ -234,13 +244,13 @@ var resourceList = []string{
|
|||||||
"notification",
|
"notification",
|
||||||
"partNumber",
|
"partNumber",
|
||||||
"policy",
|
"policy",
|
||||||
"response-content-type",
|
"requestPayment",
|
||||||
"response-content-language",
|
|
||||||
"response-expires",
|
|
||||||
"response-cache-control",
|
"response-cache-control",
|
||||||
"response-content-disposition",
|
"response-content-disposition",
|
||||||
"response-content-encoding",
|
"response-content-encoding",
|
||||||
"requestPayment",
|
"response-content-language",
|
||||||
|
"response-content-type",
|
||||||
|
"response-expires",
|
||||||
"torrent",
|
"torrent",
|
||||||
"uploadId",
|
"uploadId",
|
||||||
"uploads",
|
"uploads",
|
||||||
@ -262,7 +272,6 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) {
|
|||||||
path := encodeURL2Path(requestURL)
|
path := encodeURL2Path(requestURL)
|
||||||
buf.WriteString(path)
|
buf.WriteString(path)
|
||||||
|
|
||||||
sort.Strings(resourceList)
|
|
||||||
if requestURL.RawQuery != "" {
|
if requestURL.RawQuery != "" {
|
||||||
var n int
|
var n int
|
||||||
vals, _ := url.ParseQuery(requestURL.RawQuery)
|
vals, _ := url.ParseQuery(requestURL.RawQuery)
|
||||||
@ -283,7 +292,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) {
|
|||||||
// Request parameters
|
// Request parameters
|
||||||
if len(vv[0]) > 0 {
|
if len(vv[0]) > 0 {
|
||||||
buf.WriteByte('=')
|
buf.WriteByte('=')
|
||||||
buf.WriteString(url.QueryEscape(vv[0]))
|
buf.WriteString(strings.Replace(url.QueryEscape(vv[0]), "+", "%20", -1))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
35
vendor/src/github.com/minio/minio-go/request-signature-v2_test.go
vendored
Normal file
35
vendor/src/github.com/minio/minio-go/request-signature-v2_test.go
vendored
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests for 'func TestResourceListSorting(t *testing.T)'.
|
||||||
|
func TestResourceListSorting(t *testing.T) {
|
||||||
|
sortedResourceList := make([]string, len(resourceList))
|
||||||
|
copy(sortedResourceList, resourceList)
|
||||||
|
sort.Strings(sortedResourceList)
|
||||||
|
for i := 0; i < len(resourceList); i++ {
|
||||||
|
if resourceList[i] != sortedResourceList[i] {
|
||||||
|
t.Errorf("Expected resourceList[%d] = \"%s\", resourceList is not correctly sorted.", i, sortedResourceList[i])
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
138
vendor/src/github.com/minio/minio-go/retry.go
vendored
Normal file
138
vendor/src/github.com/minio/minio-go/retry.go
vendored
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MaxRetry is the maximum number of retries before stopping.
|
||||||
|
var MaxRetry = 5
|
||||||
|
|
||||||
|
// MaxJitter will randomize over the full exponential backoff time
|
||||||
|
const MaxJitter = 1.0
|
||||||
|
|
||||||
|
// NoJitter disables the use of jitter for randomizing the exponential backoff time
|
||||||
|
const NoJitter = 0.0
|
||||||
|
|
||||||
|
// newRetryTimer creates a timer with exponentially increasing delays
|
||||||
|
// until the maximum retry attempts are reached.
|
||||||
|
func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
|
||||||
|
attemptCh := make(chan int)
|
||||||
|
|
||||||
|
// computes the exponential backoff duration according to
|
||||||
|
// https://www.awsarchitectureblog.com/2015/03/backoff.html
|
||||||
|
exponentialBackoffWait := func(attempt int) time.Duration {
|
||||||
|
// normalize jitter to the range [0, 1.0]
|
||||||
|
if jitter < NoJitter {
|
||||||
|
jitter = NoJitter
|
||||||
|
}
|
||||||
|
if jitter > MaxJitter {
|
||||||
|
jitter = MaxJitter
|
||||||
|
}
|
||||||
|
|
||||||
|
//sleep = random_between(0, min(cap, base * 2 ** attempt))
|
||||||
|
sleep := unit * time.Duration(1<<uint(attempt))
|
||||||
|
if sleep > cap {
|
||||||
|
sleep = cap
|
||||||
|
}
|
||||||
|
if jitter != NoJitter {
|
||||||
|
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
|
||||||
|
}
|
||||||
|
return sleep
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(attemptCh)
|
||||||
|
for i := 0; i < maxRetry; i++ {
|
||||||
|
select {
|
||||||
|
// Attempts start from 1.
|
||||||
|
case attemptCh <- i + 1:
|
||||||
|
case <-doneCh:
|
||||||
|
// Stop the routine.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
time.Sleep(exponentialBackoffWait(i))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return attemptCh
|
||||||
|
}
|
||||||
|
|
||||||
|
// isNetErrorRetryable - is network error retryable.
|
||||||
|
func isNetErrorRetryable(err error) bool {
|
||||||
|
switch err.(type) {
|
||||||
|
case net.Error:
|
||||||
|
switch err.(type) {
|
||||||
|
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
|
||||||
|
return true
|
||||||
|
case *url.Error:
|
||||||
|
// For a URL error, where it replies back "connection closed"
|
||||||
|
// retry again.
|
||||||
|
if strings.Contains(err.Error(), "Connection closed by foreign host") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if strings.Contains(err.Error(), "net/http: TLS handshake timeout") {
|
||||||
|
// If error is - tlsHandshakeTimeoutError, retry.
|
||||||
|
return true
|
||||||
|
} else if strings.Contains(err.Error(), "i/o timeout") {
|
||||||
|
// If error is - tcp timeoutError, retry.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// List of AWS S3 error codes which are retryable.
|
||||||
|
var retryableS3Codes = map[string]struct{}{
|
||||||
|
"RequestError": {},
|
||||||
|
"RequestTimeout": {},
|
||||||
|
"Throttling": {},
|
||||||
|
"ThrottlingException": {},
|
||||||
|
"RequestLimitExceeded": {},
|
||||||
|
"RequestThrottled": {},
|
||||||
|
"InternalError": {},
|
||||||
|
"ExpiredToken": {},
|
||||||
|
"ExpiredTokenException": {},
|
||||||
|
// Add more AWS S3 codes here.
|
||||||
|
}
|
||||||
|
|
||||||
|
// isS3CodeRetryable - is s3 error code retryable.
|
||||||
|
func isS3CodeRetryable(s3Code string) (ok bool) {
|
||||||
|
_, ok = retryableS3Codes[s3Code]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// List of HTTP status codes which are retryable.
|
||||||
|
var retryableHTTPStatusCodes = map[int]struct{}{
|
||||||
|
429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
|
||||||
|
http.StatusInternalServerError: {},
|
||||||
|
http.StatusBadGateway: {},
|
||||||
|
http.StatusServiceUnavailable: {},
|
||||||
|
// Add more HTTP status codes here.
|
||||||
|
}
|
||||||
|
|
||||||
|
// isHTTPStatusRetryable - is HTTP error code retryable.
|
||||||
|
func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
|
||||||
|
_, ok = retryableHTTPStatusCodes[httpStatusCode]
|
||||||
|
return ok
|
||||||
|
}
|
@ -17,6 +17,7 @@
|
|||||||
package minio
|
package minio
|
||||||
|
|
||||||
// awsS3EndpointMap Amazon S3 endpoint map.
|
// awsS3EndpointMap Amazon S3 endpoint map.
|
||||||
|
// "cn-north-1" adds support for AWS China.
|
||||||
var awsS3EndpointMap = map[string]string{
|
var awsS3EndpointMap = map[string]string{
|
||||||
"us-east-1": "s3.amazonaws.com",
|
"us-east-1": "s3.amazonaws.com",
|
||||||
"us-west-2": "s3-us-west-2.amazonaws.com",
|
"us-west-2": "s3-us-west-2.amazonaws.com",
|
||||||
@ -27,6 +28,7 @@ var awsS3EndpointMap = map[string]string{
|
|||||||
"ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
|
"ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
|
||||||
"ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
|
"ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
|
||||||
"sa-east-1": "s3-sa-east-1.amazonaws.com",
|
"sa-east-1": "s3-sa-east-1.amazonaws.com",
|
||||||
|
"cn-north-1": "s3.cn-north-1.amazonaws.com.cn",
|
||||||
}
|
}
|
||||||
|
|
||||||
// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
|
// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
|
||||||
|
64
vendor/src/github.com/minio/minio-go/test-utils_test.go
vendored
Normal file
64
vendor/src/github.com/minio/minio-go/test-utils_test.go
vendored
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/xml"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Contains common used utilities for tests.
|
||||||
|
|
||||||
|
// APIError Used for mocking error response from server.
|
||||||
|
type APIError struct {
|
||||||
|
Code string
|
||||||
|
Description string
|
||||||
|
HTTPStatusCode int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mocks XML error response from the server.
|
||||||
|
func generateErrorResponse(resp *http.Response, APIErr APIError, bucketName string) *http.Response {
|
||||||
|
// generate error response.
|
||||||
|
errorResponse := getAPIErrorResponse(APIErr, bucketName)
|
||||||
|
encodedErrorResponse := encodeResponse(errorResponse)
|
||||||
|
// write Header.
|
||||||
|
resp.StatusCode = APIErr.HTTPStatusCode
|
||||||
|
resp.Body = ioutil.NopCloser(bytes.NewBuffer(encodedErrorResponse))
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// getErrorResponse gets in standard error and resource value and
|
||||||
|
// provides a encodable populated response values.
|
||||||
|
func getAPIErrorResponse(err APIError, bucketName string) ErrorResponse {
|
||||||
|
var errResp = ErrorResponse{}
|
||||||
|
errResp.Code = err.Code
|
||||||
|
errResp.Message = err.Description
|
||||||
|
errResp.BucketName = bucketName
|
||||||
|
return errResp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encodes the response headers into XML format.
|
||||||
|
func encodeResponse(response interface{}) []byte {
|
||||||
|
var bytesBuffer bytes.Buffer
|
||||||
|
bytesBuffer.WriteString(xml.Header)
|
||||||
|
encode := xml.NewEncoder(&bytesBuffer)
|
||||||
|
encode.Encode(response)
|
||||||
|
return bytesBuffer.Bytes()
|
||||||
|
}
|
56
vendor/src/github.com/minio/minio-go/utils.go
vendored
56
vendor/src/github.com/minio/minio-go/utils.go
vendored
@ -17,7 +17,9 @@
|
|||||||
package minio
|
package minio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"crypto/hmac"
|
"crypto/hmac"
|
||||||
|
"crypto/md5"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
@ -27,6 +29,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
@ -45,6 +48,13 @@ func sum256(data []byte) []byte {
|
|||||||
return hash.Sum(nil)
|
return hash.Sum(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sumMD5 calculate sumMD5 sum for an input byte array.
|
||||||
|
func sumMD5(data []byte) []byte {
|
||||||
|
hash := md5.New()
|
||||||
|
hash.Write(data)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
// sumHMAC calculate hmac between two input byte array.
|
// sumHMAC calculate hmac between two input byte array.
|
||||||
func sumHMAC(key []byte, data []byte) []byte {
|
func sumHMAC(key []byte, data []byte) []byte {
|
||||||
hash := hmac.New(sha256.New, key)
|
hash := hmac.New(sha256.New, key)
|
||||||
@ -163,6 +173,23 @@ func isAmazonEndpoint(endpointURL *url.URL) bool {
|
|||||||
if endpointURL.Host == "s3.amazonaws.com" {
|
if endpointURL.Host == "s3.amazonaws.com" {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
if isAmazonChinaEndpoint(endpointURL) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match if it is exactly Amazon S3 China endpoint.
|
||||||
|
// Customers who wish to use the new Beijing Region are required to sign up for a separate set of account credentials unique to the China (Beijing) Region.
|
||||||
|
// Customers with existing AWS credentials will not be able to access resources in the new Region, and vice versa."
|
||||||
|
// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
|
||||||
|
func isAmazonChinaEndpoint(endpointURL *url.URL) bool {
|
||||||
|
if endpointURL == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -183,7 +210,7 @@ func isValidEndpointURL(endpointURL *url.URL) error {
|
|||||||
return ErrInvalidArgument("Endpoint url cannot be empty.")
|
return ErrInvalidArgument("Endpoint url cannot be empty.")
|
||||||
}
|
}
|
||||||
if endpointURL.Path != "/" && endpointURL.Path != "" {
|
if endpointURL.Path != "/" && endpointURL.Path != "" {
|
||||||
return ErrInvalidArgument("Endpoing url cannot have fully qualified paths.")
|
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
|
||||||
}
|
}
|
||||||
if strings.Contains(endpointURL.Host, ".amazonaws.com") {
|
if strings.Contains(endpointURL.Host, ".amazonaws.com") {
|
||||||
if !isAmazonEndpoint(endpointURL) {
|
if !isAmazonEndpoint(endpointURL) {
|
||||||
@ -229,7 +256,7 @@ func isValidBucketName(bucketName string) error {
|
|||||||
if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
|
if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
|
||||||
return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
|
return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
|
||||||
}
|
}
|
||||||
if match, _ := regexp.MatchString("\\.\\.", bucketName); match == true {
|
if match, _ := regexp.MatchString("\\.\\.", bucketName); match {
|
||||||
return ErrInvalidBucketName("Bucket name cannot have successive periods.")
|
return ErrInvalidBucketName("Bucket name cannot have successive periods.")
|
||||||
}
|
}
|
||||||
if !validBucketName.MatchString(bucketName) {
|
if !validBucketName.MatchString(bucketName) {
|
||||||
@ -264,6 +291,31 @@ func isValidObjectPrefix(objectPrefix string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// queryEncode - encodes query values in their URL encoded form.
|
||||||
|
func queryEncode(v url.Values) string {
|
||||||
|
if v == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
keys := make([]string, 0, len(v))
|
||||||
|
for k := range v {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
for _, k := range keys {
|
||||||
|
vs := v[k]
|
||||||
|
prefix := urlEncodePath(k) + "="
|
||||||
|
for _, v := range vs {
|
||||||
|
if buf.Len() > 0 {
|
||||||
|
buf.WriteByte('&')
|
||||||
|
}
|
||||||
|
buf.WriteString(prefix)
|
||||||
|
buf.WriteString(urlEncodePath(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
|
// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
|
||||||
//
|
//
|
||||||
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
|
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
|
||||||
|
430
vendor/src/github.com/minio/minio-go/utils_test.go
vendored
Normal file
430
vendor/src/github.com/minio/minio-go/utils_test.go
vendored
Normal file
@ -0,0 +1,430 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests for 'getEndpointURL(endpoint string, inSecure bool)'.
|
||||||
|
func TestGetEndpointURL(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
// Inputs.
|
||||||
|
endPoint string
|
||||||
|
inSecure bool
|
||||||
|
|
||||||
|
// Expected result.
|
||||||
|
result string
|
||||||
|
err error
|
||||||
|
// Flag indicating whether the test is expected to pass or not.
|
||||||
|
shouldPass bool
|
||||||
|
}{
|
||||||
|
{"s3.amazonaws.com", false, "https://s3.amazonaws.com", nil, true},
|
||||||
|
{"s3.cn-north-1.amazonaws.com.cn", false, "https://s3.cn-north-1.amazonaws.com.cn", nil, true},
|
||||||
|
{"s3.amazonaws.com", true, "http://s3.amazonaws.com", nil, true},
|
||||||
|
{"s3.cn-north-1.amazonaws.com.cn", true, "http://s3.cn-north-1.amazonaws.com.cn", nil, true},
|
||||||
|
{"192.168.1.1:9000", true, "http://192.168.1.1:9000", nil, true},
|
||||||
|
{"192.168.1.1:9000", false, "https://192.168.1.1:9000", nil, true},
|
||||||
|
{"192.168.1.1::9000", false, "", fmt.Errorf("too many colons in address %s", "192.168.1.1::9000"), false},
|
||||||
|
{"13333.123123.-", false, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-"), false},
|
||||||
|
{"13333.123123.-", false, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-"), false},
|
||||||
|
{"s3.amazonaws.com:443", false, "", fmt.Errorf("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
|
||||||
|
{"storage.googleapis.com:4000", false, "", fmt.Errorf("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
|
||||||
|
{"s3.aamzza.-", false, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "s3.aamzza.-"), false},
|
||||||
|
{"", false, "", fmt.Errorf("Endpoint: does not follow ip address or domain name standards."), false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
result, err := getEndpointURL(testCase.endPoint, testCase.inSecure)
|
||||||
|
if err != nil && testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
if err == nil && !testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||||
|
}
|
||||||
|
// Failed as expected, but does it fail for the expected reason.
|
||||||
|
if err != nil && !testCase.shouldPass {
|
||||||
|
if err.Error() != testCase.err.Error() {
|
||||||
|
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test passes as expected, but the output values are verified for correctness here.
|
||||||
|
if err == nil && testCase.shouldPass {
|
||||||
|
if testCase.result != result.String() {
|
||||||
|
t.Errorf("Test %d: Expected the result Url to be \"%s\", but found \"%s\" instead", i+1, testCase.result, result.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests for 'isValidDomain(host string) bool'.
|
||||||
|
func TestIsValidDomain(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
// Input.
|
||||||
|
host string
|
||||||
|
// Expected result.
|
||||||
|
result bool
|
||||||
|
}{
|
||||||
|
{"s3.amazonaws.com", true},
|
||||||
|
{"s3.cn-north-1.amazonaws.com.cn", true},
|
||||||
|
{"s3.amazonaws.com_", false},
|
||||||
|
{"%$$$", false},
|
||||||
|
{"s3.amz.test.com", true},
|
||||||
|
{"s3.%%", false},
|
||||||
|
{"localhost", true},
|
||||||
|
{"-localhost", false},
|
||||||
|
{"", false},
|
||||||
|
{"\n \t", false},
|
||||||
|
{" ", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
result := isValidDomain(testCase.host)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected isValidDomain test to be '%v', but found '%v' instead", i+1, testCase.result, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate end point validator.
|
||||||
|
func TestIsValidEndpointURL(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
url string
|
||||||
|
err error
|
||||||
|
// Flag indicating whether the test is expected to pass or not.
|
||||||
|
shouldPass bool
|
||||||
|
}{
|
||||||
|
{"", nil, true},
|
||||||
|
{"/", nil, true},
|
||||||
|
{"https://s3.amazonaws.com", nil, true},
|
||||||
|
{"https://s3.cn-north-1.amazonaws.com.cn", nil, true},
|
||||||
|
{"https://s3.amazonaws.com/", nil, true},
|
||||||
|
{"https://storage.googleapis.com/", nil, true},
|
||||||
|
{"192.168.1.1", fmt.Errorf("Endpoint url cannot have fully qualified paths."), false},
|
||||||
|
{"https://amazon.googleapis.com/", fmt.Errorf("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
|
||||||
|
{"https://storage.googleapis.com/bucket/", fmt.Errorf("Endpoint url cannot have fully qualified paths."), false},
|
||||||
|
{"https://z3.amazonaws.com", fmt.Errorf("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
|
||||||
|
{"https://s3.amazonaws.com/bucket/object", fmt.Errorf("Endpoint url cannot have fully qualified paths."), false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
endPoint, e := url.Parse(testCase.url)
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
|
||||||
|
}
|
||||||
|
err := isValidEndpointURL(endPoint)
|
||||||
|
if err != nil && testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
if err == nil && !testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||||
|
}
|
||||||
|
// Failed as expected, but does it fail for the expected reason.
|
||||||
|
if err != nil && !testCase.shouldPass {
|
||||||
|
if err.Error() != testCase.err.Error() {
|
||||||
|
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate IP address validator.
|
||||||
|
func TestIsValidIP(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
// Input.
|
||||||
|
ip string
|
||||||
|
// Expected result.
|
||||||
|
result bool
|
||||||
|
}{
|
||||||
|
{"192.168.1.1", true},
|
||||||
|
{"192.168.1", false},
|
||||||
|
{"192.168.1.1.1", false},
|
||||||
|
{"-192.168.1.1", false},
|
||||||
|
{"260.192.1.1", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
result := isValidIP(testCase.ip)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected isValidIP to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.ip, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate virtual host validator.
|
||||||
|
func TestIsVirtualHostSupported(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
url string
|
||||||
|
bucket string
|
||||||
|
// Expeceted result.
|
||||||
|
result bool
|
||||||
|
}{
|
||||||
|
{"https://s3.amazonaws.com", "my-bucket", true},
|
||||||
|
{"https://s3.cn-north-1.amazonaws.com.cn", "my-bucket", true},
|
||||||
|
{"https://s3.amazonaws.com", "my-bucket.", false},
|
||||||
|
{"https://amazons3.amazonaws.com", "my-bucket.", false},
|
||||||
|
{"https://storage.googleapis.com/", "my-bucket", true},
|
||||||
|
{"https://mystorage.googleapis.com/", "my-bucket", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
endPoint, e := url.Parse(testCase.url)
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
|
||||||
|
}
|
||||||
|
result := isVirtualHostSupported(endPoint, testCase.bucket)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected isVirtualHostSupported to be '%v' for input url \"%s\" and bucket \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, testCase.bucket, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate Amazon endpoint validator.
|
||||||
|
func TestIsAmazonEndpoint(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
url string
|
||||||
|
// Expected result.
|
||||||
|
result bool
|
||||||
|
}{
|
||||||
|
{"https://192.168.1.1", false},
|
||||||
|
{"192.168.1.1", false},
|
||||||
|
{"http://storage.googleapis.com", false},
|
||||||
|
{"https://storage.googleapis.com", false},
|
||||||
|
{"storage.googleapis.com", false},
|
||||||
|
{"s3.amazonaws.com", false},
|
||||||
|
{"https://amazons3.amazonaws.com", false},
|
||||||
|
{"-192.168.1.1", false},
|
||||||
|
{"260.192.1.1", false},
|
||||||
|
// valid inputs.
|
||||||
|
{"https://s3.amazonaws.com", true},
|
||||||
|
{"https://s3.cn-north-1.amazonaws.com.cn", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
endPoint, e := url.Parse(testCase.url)
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
|
||||||
|
}
|
||||||
|
result := isAmazonEndpoint(endPoint)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate Amazon S3 China endpoint validator.
|
||||||
|
func TestIsAmazonChinaEndpoint(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
url string
|
||||||
|
// Expected result.
|
||||||
|
result bool
|
||||||
|
}{
|
||||||
|
{"https://192.168.1.1", false},
|
||||||
|
{"192.168.1.1", false},
|
||||||
|
{"http://storage.googleapis.com", false},
|
||||||
|
{"https://storage.googleapis.com", false},
|
||||||
|
{"storage.googleapis.com", false},
|
||||||
|
{"s3.amazonaws.com", false},
|
||||||
|
{"https://amazons3.amazonaws.com", false},
|
||||||
|
{"-192.168.1.1", false},
|
||||||
|
{"260.192.1.1", false},
|
||||||
|
// s3.amazonaws.com is not a valid Amazon S3 China end point.
|
||||||
|
{"https://s3.amazonaws.com", false},
|
||||||
|
// valid input.
|
||||||
|
{"https://s3.cn-north-1.amazonaws.com.cn", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
endPoint, e := url.Parse(testCase.url)
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
|
||||||
|
}
|
||||||
|
result := isAmazonChinaEndpoint(endPoint)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate Google Cloud end point validator.
|
||||||
|
func TestIsGoogleEndpoint(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
url string
|
||||||
|
// Expected result.
|
||||||
|
result bool
|
||||||
|
}{
|
||||||
|
{"192.168.1.1", false},
|
||||||
|
{"https://192.168.1.1", false},
|
||||||
|
{"s3.amazonaws.com", false},
|
||||||
|
{"http://s3.amazonaws.com", false},
|
||||||
|
{"https://s3.amazonaws.com", false},
|
||||||
|
{"https://s3.cn-north-1.amazonaws.com.cn", false},
|
||||||
|
{"-192.168.1.1", false},
|
||||||
|
{"260.192.1.1", false},
|
||||||
|
// valid inputs.
|
||||||
|
{"http://storage.googleapis.com", true},
|
||||||
|
{"https://storage.googleapis.com", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
endPoint, e := url.Parse(testCase.url)
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
|
||||||
|
}
|
||||||
|
result := isGoogleEndpoint(endPoint)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected isGoogleEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate the expiry time validator.
|
||||||
|
func TestIsValidExpiry(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
// Input.
|
||||||
|
duration time.Duration
|
||||||
|
// Expected result.
|
||||||
|
err error
|
||||||
|
// Flag to indicate whether the test should pass.
|
||||||
|
shouldPass bool
|
||||||
|
}{
|
||||||
|
{100 * time.Millisecond, fmt.Errorf("Expires cannot be lesser than 1 second."), false},
|
||||||
|
{604801 * time.Second, fmt.Errorf("Expires cannot be greater than 7 days."), false},
|
||||||
|
{0 * time.Second, fmt.Errorf("Expires cannot be lesser than 1 second."), false},
|
||||||
|
{1 * time.Second, nil, true},
|
||||||
|
{10000 * time.Second, nil, true},
|
||||||
|
{999 * time.Second, nil, true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
err := isValidExpiry(testCase.duration)
|
||||||
|
if err != nil && testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
if err == nil && !testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||||
|
}
|
||||||
|
// Failed as expected, but does it fail for the expected reason.
|
||||||
|
if err != nil && !testCase.shouldPass {
|
||||||
|
if err.Error() != testCase.err.Error() {
|
||||||
|
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate the bucket name validator.
|
||||||
|
func TestIsValidBucketName(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
// Input.
|
||||||
|
bucketName string
|
||||||
|
// Expected result.
|
||||||
|
err error
|
||||||
|
// Flag to indicate whether test should Pass.
|
||||||
|
shouldPass bool
|
||||||
|
}{
|
||||||
|
{".mybucket", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
|
||||||
|
{"mybucket.", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
|
||||||
|
{"mybucket-", ErrInvalidBucketName("Bucket name contains invalid characters."), false},
|
||||||
|
{"my", ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters."), false},
|
||||||
|
{"", ErrInvalidBucketName("Bucket name cannot be empty."), false},
|
||||||
|
{"my..bucket", ErrInvalidBucketName("Bucket name cannot have successive periods."), false},
|
||||||
|
{"my.bucket.com", nil, true},
|
||||||
|
{"my-bucket", nil, true},
|
||||||
|
{"123my-bucket", nil, true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
err := isValidBucketName(testCase.bucketName)
|
||||||
|
if err != nil && testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
if err == nil && !testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||||
|
}
|
||||||
|
// Failed as expected, but does it fail for the expected reason.
|
||||||
|
if err != nil && !testCase.shouldPass {
|
||||||
|
if err.Error() != testCase.err.Error() {
|
||||||
|
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate the query encoder.
|
||||||
|
func TestQueryEncode(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
queryKey string
|
||||||
|
valueToEncode []string
|
||||||
|
// Expected result.
|
||||||
|
result string
|
||||||
|
}{
|
||||||
|
{"prefix", []string{"test@123", "test@456"}, "prefix=test%40123&prefix=test%40456"},
|
||||||
|
{"@prefix", []string{"test@123"}, "%40prefix=test%40123"},
|
||||||
|
{"prefix", []string{"test#123"}, "prefix=test%23123"},
|
||||||
|
{"prefix#", []string{"test#123"}, "prefix%23=test%23123"},
|
||||||
|
{"prefix", []string{"test123"}, "prefix=test123"},
|
||||||
|
{"prefix", []string{"test本語123", "test123"}, "prefix=test%E6%9C%AC%E8%AA%9E123&prefix=test123"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
urlValues := make(url.Values)
|
||||||
|
for _, valueToEncode := range testCase.valueToEncode {
|
||||||
|
urlValues.Add(testCase.queryKey, valueToEncode)
|
||||||
|
}
|
||||||
|
result := queryEncode(urlValues)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate the URL path encoder.
|
||||||
|
func TestUrlEncodePath(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
// Input.
|
||||||
|
inputStr string
|
||||||
|
// Expected result.
|
||||||
|
result string
|
||||||
|
}{
|
||||||
|
{"thisisthe%url", "thisisthe%25url"},
|
||||||
|
{"本語", "%E6%9C%AC%E8%AA%9E"},
|
||||||
|
{"本語.1", "%E6%9C%AC%E8%AA%9E.1"},
|
||||||
|
{">123", "%3E123"},
|
||||||
|
{"myurl#link", "myurl%23link"},
|
||||||
|
{"space in url", "space%20in%20url"},
|
||||||
|
{"url+path", "url%2Bpath"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
result := urlEncodePath(testCase.inputStr)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user