2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-18 02:55:18 +00:00

Merge pull request #1475 from restic/update-minio-go

Update to minio-go v4
This commit is contained in:
Alexander Neumann 2017-12-09 10:12:05 +01:00
commit 4f46b4f393
136 changed files with 5369 additions and 2415 deletions

6
Gopkg.lock generated
View File

@ -100,8 +100,8 @@
[[projects]] [[projects]]
name = "github.com/minio/minio-go" name = "github.com/minio/minio-go"
packages = [".","pkg/credentials","pkg/encrypt","pkg/policy","pkg/s3signer","pkg/s3utils","pkg/set"] packages = [".","pkg/credentials","pkg/encrypt","pkg/policy","pkg/s3signer","pkg/s3utils","pkg/set"]
revision = "4e0f567303d4cc90ceb055a451959fb9fc391fb9" revision = "57a8ae886b49af6eb0d2c27c2d007ed2f71e1da5"
version = "3.0.3" version = "4.0.3"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -214,6 +214,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "c6e2522d1b0c6832101ba15fc062074ad790648e26f481e3419a171d3579bfc4" inputs-digest = "f0a207197cb502238ac87ca8e07b2640c02ec380a50b036e09ef87e40e31ca2d"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View File

@ -19,7 +19,3 @@
# [[override]] # [[override]]
# name = "github.com/x/y" # name = "github.com/x/y"
# version = "2.4.0" # version = "2.4.0"
[[constraint]]
name = "github.com/minio/minio-go"
version = "3.0.0"

View File

@ -2,8 +2,8 @@ package s3
import ( import (
"context" "context"
"fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"os" "os"
"path" "path"
@ -248,51 +248,20 @@ func (be *Backend) Path() string {
return be.cfg.Prefix return be.cfg.Prefix
} }
// nopCloserFile wraps *os.File and overwrites the Close() method with method // lenForFile returns the length of the file.
// that does nothing. In addition, the method Len() is implemented, which func lenForFile(f *os.File) (int64, error) {
// returns the size of the file (filesize - current offset).
type nopCloserFile struct {
*os.File
}
func (f nopCloserFile) Close() error {
debug.Log("prevented Close()")
return nil
}
// Len returns the remaining length of the file (filesize - current offset).
func (f nopCloserFile) Len() int {
debug.Log("Len() called")
fi, err := f.Stat() fi, err := f.Stat()
if err != nil { if err != nil {
panic(err) return 0, errors.Wrap(err, "Stat")
} }
pos, err := f.Seek(0, io.SeekCurrent) pos, err := f.Seek(0, io.SeekCurrent)
if err != nil { if err != nil {
panic(err) return 0, errors.Wrap(err, "Seek")
} }
size := fi.Size() - pos size := fi.Size() - pos
debug.Log("returning file size %v", size) return size, nil
return int(size)
}
type lenner interface {
Len() int
io.Reader
}
// nopCloserLenner wraps a lenner and overwrites the Close() method with method
// that does nothing. In addition, the method Size() is implemented, which
// returns the size of the file (filesize - current offset).
type nopCloserLenner struct {
lenner
}
func (f *nopCloserLenner) Close() error {
debug.Log("prevented Close()")
return nil
} }
// Save stores data in the backend at the handle. // Save stores data in the backend at the handle.
@ -309,26 +278,33 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err
defer be.sem.ReleaseToken() defer be.sem.ReleaseToken()
// Check key does not already exist // Check key does not already exist
_, err = be.client.StatObject(be.cfg.Bucket, objName) _, err = be.client.StatObject(be.cfg.Bucket, objName, minio.StatObjectOptions{})
if err == nil { if err == nil {
debug.Log("%v already exists", h) debug.Log("%v already exists", h)
return errors.New("key already exists") return errors.New("key already exists")
} }
// FIXME: This is a workaround once we move to minio-go 4.0.x this can be var size int64 = -1
// removed and size can be directly provided.
if f, ok := rd.(*os.File); ok { type lenner interface {
debug.Log("reader is %#T, using nopCloserFile{}", rd) Len() int
rd = nopCloserFile{f}
} else if l, ok := rd.(lenner); ok {
debug.Log("reader is %#T, using nopCloserLenner{}", rd)
rd = nopCloserLenner{l}
} else {
debug.Log("reader is %#T, no specific workaround enabled", rd)
} }
debug.Log("PutObject(%v, %v)", be.cfg.Bucket, objName) // find size for reader
n, err := be.client.PutObject(be.cfg.Bucket, objName, rd, "application/octet-stream") if f, ok := rd.(*os.File); ok {
size, err = lenForFile(f)
if err != nil {
return err
}
} else if l, ok := rd.(lenner); ok {
size = int64(l.Len())
}
opts := minio.PutObjectOptions{}
opts.ContentType = "application/octet-stream"
debug.Log("PutObject(%v, %v, %v)", be.cfg.Bucket, objName, size)
n, err := be.client.PutObjectWithContext(ctx, be.cfg.Bucket, objName, ioutil.NopCloser(rd), size, opts)
debug.Log("%v -> %v bytes, err %#v: %v", objName, n, err, err) debug.Log("%v -> %v bytes, err %#v: %v", objName, n, err, err)
@ -365,19 +341,24 @@ func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset
} }
objName := be.Filename(h) objName := be.Filename(h)
opts := minio.GetObjectOptions{}
byteRange := fmt.Sprintf("bytes=%d-", offset) var err error
if length > 0 { if length > 0 {
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1) debug.Log("range: %v-%v", offset, offset+int64(length)-1)
err = opts.SetRange(offset, offset+int64(length)-1)
} else if offset > 0 {
debug.Log("range: %v-", offset)
err = opts.SetRange(offset, 0)
}
if err != nil {
return nil, errors.Wrap(err, "SetRange")
} }
headers := minio.NewGetReqHeaders()
headers.Add("Range", byteRange)
be.sem.GetToken() be.sem.GetToken()
debug.Log("Load(%v) send range %v", h, byteRange)
coreClient := minio.Core{Client: be.client} coreClient := minio.Core{Client: be.client}
rd, _, err := coreClient.GetObject(be.cfg.Bucket, objName, headers) rd, err := coreClient.GetObjectWithContext(ctx, be.cfg.Bucket, objName, opts)
if err != nil { if err != nil {
be.sem.ReleaseToken() be.sem.ReleaseToken()
return nil, err return nil, err
@ -401,8 +382,10 @@ func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInf
objName := be.Filename(h) objName := be.Filename(h)
var obj *minio.Object var obj *minio.Object
opts := minio.GetObjectOptions{}
be.sem.GetToken() be.sem.GetToken()
obj, err = be.client.GetObject(be.cfg.Bucket, objName) obj, err = be.client.GetObjectWithContext(ctx, be.cfg.Bucket, objName, opts)
if err != nil { if err != nil {
debug.Log("GetObject() err %v", err) debug.Log("GetObject() err %v", err)
be.sem.ReleaseToken() be.sem.ReleaseToken()
@ -433,7 +416,7 @@ func (be *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
objName := be.Filename(h) objName := be.Filename(h)
be.sem.GetToken() be.sem.GetToken()
_, err := be.client.StatObject(be.cfg.Bucket, objName) _, err := be.client.StatObject(be.cfg.Bucket, objName, minio.StatObjectOptions{})
be.sem.ReleaseToken() be.sem.ReleaseToken()
if err == nil { if err == nil {

View File

@ -1,2 +1,3 @@
*~ *~
*.test *.test
validator

View File

@ -9,18 +9,22 @@ env:
- ARCH=i686 - ARCH=i686
go: go:
- 1.5.3
- 1.6
- 1.7.4 - 1.7.4
- 1.8 - 1.8.x
- 1.9.x
- tip
matrix:
fast_finish: true
allow_failures:
- go: tip
addons:
apt:
packages:
- devscripts
script: script:
- diff -au <(gofmt -d .) <(printf "") - diff -au <(gofmt -d .) <(printf "")
- go get -u github.com/cheggaaa/pb/... - diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "")
- go get -u github.com/sirupsen/logrus/... - make
- go get -u github.com/dustin/go-humanize/...
- go vet ./...
- SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go test -race -v ./...
- SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go run functional_tests.go
- mkdir /tmp/examples \
&& for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done

View File

@ -5,24 +5,25 @@
Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
### Making new releases ### Making new releases
Edit `libraryVersion` constant in `api.go`. Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key.
```sh
$ export GNUPGHOME=/media/${USER}/minio/trusted
$ git tag -s 4.0.0
$ git push
$ git push --tags
``` ```
### Update version
Once release has been made update `libraryVersion` constant in `api.go` to next to be released version.
```sh
$ grep libraryVersion api.go $ grep libraryVersion api.go
libraryVersion = "0.3.0" libraryVersion = "4.0.1"
``` ```
Commit your changes Commit your changes
``` ```
$ git commit -a -m "Bump to new release 0.3.0" --author "Minio Trusted <trusted@minio.io>" $ git commit -a -m "Update version for next release" --author "Minio Trusted <trusted@minio.io>"
```
Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key.
```
$ export GNUPGHOME=/path/to/trusted/key
$ git tag -s 0.3.0
$ git push
$ git push --tags
``` ```
### Announce ### Announce
@ -30,5 +31,5 @@ Announce new release by adding release notes at https://github.com/minio/minio-g
To generate `changelog` To generate `changelog`
```sh ```sh
git log --no-color --pretty=format:'-%d %s (%cr) <%an>' <latest_release_tag>..<last_release_tag> $ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' <last_release_tag>..<latest_release_tag>
``` ```

17
vendor/github.com/minio/minio-go/Makefile generated vendored Normal file
View File

@ -0,0 +1,17 @@
all: checks
checks:
@go get -u github.com/go-ini/ini/...
@go get -u github.com/minio/go-homedir/...
@go get -u github.com/cheggaaa/pb/...
@go get -u github.com/sirupsen/logrus/...
@go get -u github.com/dustin/go-humanize/...
@go vet ./...
@SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go test -race -v ./...
@SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go run functional_tests.go
@mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done
@go get -u github.com/a8m/mark/...
@go get -u github.com/minio/cli/...
@go get -u golang.org/x/tools/cmd/goimports
@go get -u github.com/gernest/wow/...
@go build docs/validator.go && ./validator -m docs/API.md -t docs/checker.go.tpl

2
vendor/github.com/minio/minio-go/NOTICE generated vendored Normal file
View File

@ -0,0 +1,2 @@
minio-go
Copyright 2015-2017 Minio, Inc.

View File

@ -1,19 +1,7 @@
# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) # Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
**Supported cloud storage providers:**
- AWS Signature Version 4
- Amazon S3
- Minio
- AWS Signature Version 2
- Google Cloud Storage (Compatibility Mode)
- Openstack Swift + Swift3 middleware
- Ceph Object Gateway
- Riak CS
This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference). This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang). This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
@ -55,6 +43,7 @@ func main() {
} }
log.Printf("%#v\n", minioClient) // minioClient is now setup log.Printf("%#v\n", minioClient) // minioClient is now setup
}
``` ```
## Quick Start Example - File Uploader ## Quick Start Example - File Uploader
@ -105,7 +94,7 @@ func main() {
contentType := "application/zip" contentType := "application/zip"
// Upload the zip file with FPutObject // Upload the zip file with FPutObject
n, err := minioClient.FPutObject(bucketName, objectName, filePath, contentType) n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType})
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
@ -152,10 +141,14 @@ The full API Reference is available here.
### API Reference : File Object Operations ### API Reference : File Object Operations
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) * [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) * [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext)
* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext)
### API Reference : Object Operations ### API Reference : Object Operations
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) * [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) * [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext)
* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext)
* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming) * [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject) * [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject) * [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
@ -204,10 +197,14 @@ The full API Reference is available here.
### Full Examples : File Object Operations ### Full Examples : File Object Operations
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) * [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) * [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go)
* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go)
### Full Examples : Object Operations ### Full Examples : Object Operations
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) * [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) * [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go)
* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go)
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) * [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) * [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) * [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
@ -217,6 +214,7 @@ The full API Reference is available here.
### Full Examples : Encrypted Object Operations ### Full Examples : Encrypted Object Operations
* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) * [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) * [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
### Full Examples : Presigned Operations ### Full Examples : Presigned Operations
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) * [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
@ -235,3 +233,5 @@ The full API Reference is available here.
[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) [![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go)
[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) [![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)
## License
This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](./LICENSE) and [NOTICE](./NOTICE) for more information.

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -17,6 +18,7 @@
package minio package minio
import ( import (
"context"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"net/http" "net/http"
@ -58,7 +60,7 @@ func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string {
return map[string]string{ return map[string]string{
"x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo, "x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo,
"x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key), "x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key),
"x-amz-" + cs + "server-side-encryption-customer-key-MD5": base64.StdEncoding.EncodeToString(sumMD5(s.key)), "x-amz-" + cs + "server-side-encryption-customer-key-MD5": sumMD5Base64(s.key),
} }
} }
@ -115,7 +117,7 @@ func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo,
k = k[len("x-amz-meta-"):] k = k[len("x-amz-meta-"):]
} }
if _, ok := m[k]; ok { if _, ok := m[k]; ok {
return d, fmt.Errorf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k) return d, ErrInvalidArgument(fmt.Sprintf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k))
} }
m[k] = v m[k] = v
} }
@ -243,13 +245,13 @@ func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[s
// Get object info - need size and etag here. Also, decryption // Get object info - need size and etag here. Also, decryption
// headers are added to the stat request if given. // headers are added to the stat request if given.
var objInfo ObjectInfo var objInfo ObjectInfo
rh := NewGetReqHeaders() opts := StatObjectOptions{}
for k, v := range s.decryptKey.getSSEHeaders(false) { for k, v := range s.decryptKey.getSSEHeaders(false) {
rh.Set(k, v) opts.Set(k, v)
} }
objInfo, err = c.statObject(s.bucket, s.object, rh) objInfo, err = c.statObject(s.bucket, s.object, opts)
if err != nil { if err != nil {
err = fmt.Errorf("Could not stat object - %s/%s: %v", s.bucket, s.object, err) err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err))
} else { } else {
size = objInfo.Size size = objInfo.Size
etag = objInfo.ETag etag = objInfo.ETag
@ -265,10 +267,55 @@ func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[s
return return
} }
// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
metadata map[string]string) (ObjectInfo, error) {
// Build headers.
headers := make(http.Header)
// Set all the metadata headers.
for k, v := range metadata {
headers.Set(k, v)
}
// Set the source header
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
// Send upload-part-copy request
resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
bucketName: destBucket,
objectName: destObject,
customHeader: headers,
})
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject)
}
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return ObjectInfo{}, err
}
objInfo := ObjectInfo{
Key: destObject,
ETag: strings.Trim(cpObjRes.ETag, "\""),
LastModified: cpObjRes.LastModified,
}
return objInfo, nil
}
// uploadPartCopy - helper function to create a part in a multipart // uploadPartCopy - helper function to create a part in a multipart
// upload via an upload-part-copy request // upload via an upload-part-copy request
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int, func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
headers http.Header) (p CompletePart, err error) { headers http.Header) (p CompletePart, err error) {
// Build query parameters // Build query parameters
@ -277,7 +324,7 @@ func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int,
urlValues.Set("uploadId", uploadID) urlValues.Set("uploadId", uploadID)
// Send upload-part-copy request // Send upload-part-copy request
resp, err := c.executeMethod("PUT", requestMetadata{ resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
bucketName: bucket, bucketName: bucket,
objectName: object, objectName: object,
customHeader: headers, customHeader: headers,
@ -311,7 +358,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
if len(srcs) < 1 || len(srcs) > maxPartsCount { if len(srcs) < 1 || len(srcs) > maxPartsCount {
return ErrInvalidArgument("There must be as least one and up to 10000 source objects.") return ErrInvalidArgument("There must be as least one and up to 10000 source objects.")
} }
ctx := context.Background()
srcSizes := make([]int64, len(srcs)) srcSizes := make([]int64, len(srcs))
var totalSize, size, totalParts int64 var totalSize, size, totalParts int64
var srcUserMeta map[string]string var srcUserMeta map[string]string
@ -320,7 +367,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
for i, src := range srcs { for i, src := range srcs {
size, etag, srcUserMeta, err = src.getProps(c) size, etag, srcUserMeta, err = src.getProps(c)
if err != nil { if err != nil {
return fmt.Errorf("Could not get source props for %s/%s: %v", src.bucket, src.object, err) return err
} }
// Error out if client side encryption is used in this source object when // Error out if client side encryption is used in this source object when
@ -396,7 +443,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
} }
// Send copy request // Send copy request
resp, err := c.executeMethod("PUT", requestMetadata{ resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
bucketName: dst.bucket, bucketName: dst.bucket,
objectName: dst.object, objectName: dst.object,
customHeader: h, customHeader: h,
@ -426,13 +473,13 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
if len(userMeta) == 0 && len(srcs) == 1 { if len(userMeta) == 0 && len(srcs) == 1 {
metaMap = srcUserMeta metaMap = srcUserMeta
} }
metaHeaders := make(map[string][]string) metaHeaders := make(map[string]string)
for k, v := range metaMap { for k, v := range metaMap {
metaHeaders[k] = append(metaHeaders[k], v) metaHeaders[k] = v
} }
uploadID, err := c.newUploadID(dst.bucket, dst.object, metaHeaders) uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{UserMetadata: metaHeaders})
if err != nil { if err != nil {
return fmt.Errorf("Error creating new upload: %v", err) return err
} }
// 2. Perform copy part uploads // 2. Perform copy part uploads
@ -457,10 +504,10 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
fmt.Sprintf("bytes=%d-%d", start, end)) fmt.Sprintf("bytes=%d-%d", start, end))
// make upload-part-copy request // make upload-part-copy request
complPart, err := c.uploadPartCopy(dst.bucket, complPart, err := c.uploadPartCopy(ctx, dst.bucket,
dst.object, uploadID, partIndex, h) dst.object, uploadID, partIndex, h)
if err != nil { if err != nil {
return fmt.Errorf("Error in upload-part-copy - %v", err) return err
} }
objParts = append(objParts, complPart) objParts = append(objParts, complPart)
partIndex++ partIndex++
@ -468,13 +515,13 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
} }
// 3. Make final complete-multipart request. // 3. Make final complete-multipart request.
_, err = c.completeMultipartUpload(dst.bucket, dst.object, uploadID, _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID,
completeMultipartUpload{Parts: objParts}) completeMultipartUpload{Parts: objParts})
if err != nil { if err != nil {
err = fmt.Errorf("Error in complete-multipart request - %v", err)
}
return err return err
} }
return nil
}
// partsRequired is ceiling(size / copyPartSize) // partsRequired is ceiling(size / copyPartSize)
func partsRequired(size int64) int64 { func partsRequired(size int64) int64 {

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -20,7 +21,6 @@ import (
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"net/http" "net/http"
"strconv"
) )
/* **** SAMPLE ERROR RESPONSE **** /* **** SAMPLE ERROR RESPONSE ****
@ -49,6 +49,9 @@ type ErrorResponse struct {
// only in HEAD bucket and ListObjects response. // only in HEAD bucket and ListObjects response.
Region string Region string
// Underlying HTTP status code for the returned error
StatusCode int `xml:"-" json:"-"`
// Headers of the returned S3 XML error // Headers of the returned S3 XML error
Headers http.Header `xml:"-" json:"-"` Headers http.Header `xml:"-" json:"-"`
} }
@ -100,7 +103,10 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
msg := "Response is empty. " + reportIssue msg := "Response is empty. " + reportIssue
return ErrInvalidArgument(msg) return ErrInvalidArgument(msg)
} }
var errResp ErrorResponse
errResp := ErrorResponse{
StatusCode: resp.StatusCode,
}
err := xmlDecoder(resp.Body, &errResp) err := xmlDecoder(resp.Body, &errResp)
// Xml decoding failed with no body, fall back to HTTP headers. // Xml decoding failed with no body, fall back to HTTP headers.
@ -109,12 +115,14 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
case http.StatusNotFound: case http.StatusNotFound:
if objectName == "" { if objectName == "" {
errResp = ErrorResponse{ errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "NoSuchBucket", Code: "NoSuchBucket",
Message: "The specified bucket does not exist.", Message: "The specified bucket does not exist.",
BucketName: bucketName, BucketName: bucketName,
} }
} else { } else {
errResp = ErrorResponse{ errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "NoSuchKey", Code: "NoSuchKey",
Message: "The specified key does not exist.", Message: "The specified key does not exist.",
BucketName: bucketName, BucketName: bucketName,
@ -123,6 +131,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
} }
case http.StatusForbidden: case http.StatusForbidden:
errResp = ErrorResponse{ errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "AccessDenied", Code: "AccessDenied",
Message: "Access Denied.", Message: "Access Denied.",
BucketName: bucketName, BucketName: bucketName,
@ -130,12 +139,14 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
} }
case http.StatusConflict: case http.StatusConflict:
errResp = ErrorResponse{ errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "Conflict", Code: "Conflict",
Message: "Bucket not empty.", Message: "Bucket not empty.",
BucketName: bucketName, BucketName: bucketName,
} }
case http.StatusPreconditionFailed: case http.StatusPreconditionFailed:
errResp = ErrorResponse{ errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "PreconditionFailed", Code: "PreconditionFailed",
Message: s3ErrorResponseMap["PreconditionFailed"], Message: s3ErrorResponseMap["PreconditionFailed"],
BucketName: bucketName, BucketName: bucketName,
@ -143,6 +154,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
} }
default: default:
errResp = ErrorResponse{ errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: resp.Status, Code: resp.Status,
Message: resp.Status, Message: resp.Status,
BucketName: bucketName, BucketName: bucketName,
@ -150,7 +162,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
} }
} }
// Save hodID, requestID and region information // Save hostID, requestID and region information
// from headers if not available through error XML. // from headers if not available through error XML.
if errResp.RequestID == "" { if errResp.RequestID == "" {
errResp.RequestID = resp.Header.Get("x-amz-request-id") errResp.RequestID = resp.Header.Get("x-amz-request-id")
@ -162,7 +174,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
errResp.Region = resp.Header.Get("x-amz-bucket-region") errResp.Region = resp.Header.Get("x-amz-bucket-region")
} }
if errResp.Code == "InvalidRegion" && errResp.Region != "" { if errResp.Code == "InvalidRegion" && errResp.Region != "" {
errResp.Message = fmt.Sprintf("Region does not match, expecting region '%s'.", errResp.Region) errResp.Message = fmt.Sprintf("Region does not match, expecting region %s.", errResp.Region)
} }
// Save headers returned in the API XML error // Save headers returned in the API XML error
@ -173,10 +185,10 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. // ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
func ErrTransferAccelerationBucket(bucketName string) error { func ErrTransferAccelerationBucket(bucketName string) error {
msg := fmt.Sprintf("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").")
return ErrorResponse{ return ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "InvalidArgument", Code: "InvalidArgument",
Message: msg, Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ..",
BucketName: bucketName, BucketName: bucketName,
} }
} }
@ -185,6 +197,7 @@ func ErrTransferAccelerationBucket(bucketName string) error {
func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size %d exceeds the maximum allowed object size %d for single PUT operation.", totalSize, maxObjectSize) msg := fmt.Sprintf("Your proposed upload size %d exceeds the maximum allowed object size %d for single PUT operation.", totalSize, maxObjectSize)
return ErrorResponse{ return ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "EntityTooLarge", Code: "EntityTooLarge",
Message: msg, Message: msg,
BucketName: bucketName, BucketName: bucketName,
@ -194,9 +207,10 @@ func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName st
// ErrEntityTooSmall - Input size is smaller than supported minimum. // ErrEntityTooSmall - Input size is smaller than supported minimum.
func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size %d is below the minimum allowed object size '0B' for single PUT operation.", totalSize) msg := fmt.Sprintf("Your proposed upload size %d is below the minimum allowed object size 0B for single PUT operation.", totalSize)
return ErrorResponse{ return ErrorResponse{
Code: "EntityTooLarge", StatusCode: http.StatusBadRequest,
Code: "EntityTooSmall",
Message: msg, Message: msg,
BucketName: bucketName, BucketName: bucketName,
Key: objectName, Key: objectName,
@ -205,9 +219,9 @@ func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
// ErrUnexpectedEOF - Unexpected end of file reached. // ErrUnexpectedEOF - Unexpected end of file reached.
func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Data read %s is not equal to the size %s of the input Reader.", msg := fmt.Sprintf("Data read %d is not equal to the size %d of the input Reader.", totalRead, totalSize)
strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
return ErrorResponse{ return ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "UnexpectedEOF", Code: "UnexpectedEOF",
Message: msg, Message: msg,
BucketName: bucketName, BucketName: bucketName,
@ -218,6 +232,7 @@ func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
// ErrInvalidBucketName - Invalid bucket name response. // ErrInvalidBucketName - Invalid bucket name response.
func ErrInvalidBucketName(message string) error { func ErrInvalidBucketName(message string) error {
return ErrorResponse{ return ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "InvalidBucketName", Code: "InvalidBucketName",
Message: message, Message: message,
RequestID: "minio", RequestID: "minio",
@ -227,6 +242,7 @@ func ErrInvalidBucketName(message string) error {
// ErrInvalidObjectName - Invalid object name response. // ErrInvalidObjectName - Invalid object name response.
func ErrInvalidObjectName(message string) error { func ErrInvalidObjectName(message string) error {
return ErrorResponse{ return ErrorResponse{
StatusCode: http.StatusNotFound,
Code: "NoSuchKey", Code: "NoSuchKey",
Message: message, Message: message,
RequestID: "minio", RequestID: "minio",
@ -240,6 +256,7 @@ var ErrInvalidObjectPrefix = ErrInvalidObjectName
// ErrInvalidArgument - Invalid argument response. // ErrInvalidArgument - Invalid argument response.
func ErrInvalidArgument(message string) error { func ErrInvalidArgument(message string) error {
return ErrorResponse{ return ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "InvalidArgument", Code: "InvalidArgument",
Message: message, Message: message,
RequestID: "minio", RequestID: "minio",
@ -250,6 +267,7 @@ func ErrInvalidArgument(message string) error {
// The specified bucket does not have a bucket policy. // The specified bucket does not have a bucket policy.
func ErrNoSuchBucketPolicy(message string) error { func ErrNoSuchBucketPolicy(message string) error {
return ErrorResponse{ return ErrorResponse{
StatusCode: http.StatusNotFound,
Code: "NoSuchBucketPolicy", Code: "NoSuchBucketPolicy",
Message: message, Message: message,
RequestID: "minio", RequestID: "minio",
@ -260,6 +278,7 @@ func ErrNoSuchBucketPolicy(message string) error {
// The specified API call is not supported // The specified API call is not supported
func ErrAPINotSupported(message string) error { func ErrAPINotSupported(message string) error {
return ErrorResponse{ return ErrorResponse{
StatusCode: http.StatusNotImplemented,
Code: "APINotSupported", Code: "APINotSupported",
Message: message, Message: message,
RequestID: "minio", RequestID: "minio",

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -7,7 +8,7 @@
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required bZy applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
@ -32,20 +33,23 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// 'genAPIErrorResponse' generates ErrorResponse for given APIError. // 'genAPIErrorResponse' generates ErrorResponse for given APIError.
// provides a encodable populated response values. // provides a encodable populated response values.
genAPIErrorResponse := func(err APIError, bucketName string) ErrorResponse { genAPIErrorResponse := func(err APIError, bucketName string) ErrorResponse {
var errResp = ErrorResponse{} return ErrorResponse{
errResp.Code = err.Code Code: err.Code,
errResp.Message = err.Description Message: err.Description,
errResp.BucketName = bucketName BucketName: bucketName,
return errResp }
} }
// Encodes the response headers into XML format. // Encodes the response headers into XML format.
encodeErr := func(response interface{}) []byte { encodeErr := func(response ErrorResponse) []byte {
var bytesBuffer bytes.Buffer buf := &bytes.Buffer{}
bytesBuffer.WriteString(xml.Header) buf.WriteString(xml.Header)
encode := xml.NewEncoder(&bytesBuffer) encoder := xml.NewEncoder(buf)
encode.Encode(response) err := encoder.Encode(response)
return bytesBuffer.Bytes() if err != nil {
t.Fatalf("error encoding response: %v", err)
}
return buf.Bytes()
} }
// `createAPIErrorResponse` Mocks XML error response from the server. // `createAPIErrorResponse` Mocks XML error response from the server.
@ -65,6 +69,7 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// 'genErrResponse' contructs error response based http Status Code // 'genErrResponse' contructs error response based http Status Code
genErrResponse := func(resp *http.Response, code, message, bucketName, objectName string) ErrorResponse { genErrResponse := func(resp *http.Response, code, message, bucketName, objectName string) ErrorResponse {
errResp := ErrorResponse{ errResp := ErrorResponse{
StatusCode: resp.StatusCode,
Code: code, Code: code,
Message: message, Message: message,
BucketName: bucketName, BucketName: bucketName,
@ -80,6 +85,7 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// Generate invalid argument error. // Generate invalid argument error.
genInvalidError := func(message string) error { genInvalidError := func(message string) error {
errResp := ErrorResponse{ errResp := ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "InvalidArgument", Code: "InvalidArgument",
Message: message, Message: message,
RequestID: "minio", RequestID: "minio",
@ -101,22 +107,22 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// Set the StatusCode to the argument supplied. // Set the StatusCode to the argument supplied.
// Sets common headers. // Sets common headers.
genEmptyBodyResponse := func(statusCode int) *http.Response { genEmptyBodyResponse := func(statusCode int) *http.Response {
resp := &http.Response{} resp := &http.Response{
// set empty response body. StatusCode: statusCode,
resp.Body = ioutil.NopCloser(bytes.NewBuffer([]byte(""))) Body: ioutil.NopCloser(bytes.NewReader(nil)),
// set headers. }
setCommonHeaders(resp) setCommonHeaders(resp)
// set status code.
resp.StatusCode = statusCode
return resp return resp
} }
// Decode XML error message from the http response body. // Decode XML error message from the http response body.
decodeXMLError := func(resp *http.Response, t *testing.T) error { decodeXMLError := func(resp *http.Response) error {
var errResp ErrorResponse errResp := ErrorResponse{
StatusCode: resp.StatusCode,
}
err := xmlDecoder(resp.Body, &errResp) err := xmlDecoder(resp.Body, &errResp)
if err != nil { if err != nil {
t.Fatal("XML decoding of response body failed") t.Fatalf("XML decoding of response body failed: %v", err)
} }
return errResp return errResp
} }
@ -134,12 +140,12 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// Used for asserting the actual response. // Used for asserting the actual response.
expectedErrResponse := []error{ expectedErrResponse := []error{
genInvalidError("Response is empty. " + "Please report this issue at https://github.com/minio/minio-go/issues."), genInvalidError("Response is empty. " + "Please report this issue at https://github.com/minio/minio-go/issues."),
decodeXMLError(createAPIErrorResponse(APIErrors[0], "minio-bucket"), t), decodeXMLError(createAPIErrorResponse(APIErrors[0], "minio-bucket")),
genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchBucket", "The specified bucket does not exist.", "minio-bucket", ""), genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusNotFound}), "NoSuchBucket", "The specified bucket does not exist.", "minio-bucket", ""),
genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchKey", "The specified key does not exist.", "minio-bucket", "Asia/"), genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusNotFound}), "NoSuchKey", "The specified key does not exist.", "minio-bucket", "Asia/"),
genErrResponse(setCommonHeaders(&http.Response{}), "AccessDenied", "Access Denied.", "minio-bucket", ""), genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusForbidden}), "AccessDenied", "Access Denied.", "minio-bucket", ""),
genErrResponse(setCommonHeaders(&http.Response{}), "Conflict", "Bucket not empty.", "minio-bucket", ""), genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusConflict}), "Conflict", "Bucket not empty.", "minio-bucket", ""),
genErrResponse(setCommonHeaders(&http.Response{}), "Bad Request", "Bad Request", "minio-bucket", ""), genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusBadRequest}), "Bad Request", "Bad Request", "minio-bucket", ""),
} }
// List of http response to be used as input. // List of http response to be used as input.
@ -182,6 +188,7 @@ func TestHttpRespToErrorResponse(t *testing.T) {
func TestErrEntityTooLarge(t *testing.T) { func TestErrEntityTooLarge(t *testing.T) {
msg := fmt.Sprintf("Your proposed upload size %d exceeds the maximum allowed object size %d for single PUT operation.", 1000000, 99999) msg := fmt.Sprintf("Your proposed upload size %d exceeds the maximum allowed object size %d for single PUT operation.", 1000000, 99999)
expectedResult := ErrorResponse{ expectedResult := ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "EntityTooLarge", Code: "EntityTooLarge",
Message: msg, Message: msg,
BucketName: "minio-bucket", BucketName: "minio-bucket",
@ -189,22 +196,23 @@ func TestErrEntityTooLarge(t *testing.T) {
} }
actualResult := ErrEntityTooLarge(1000000, 99999, "minio-bucket", "Asia/") actualResult := ErrEntityTooLarge(1000000, 99999, "minio-bucket", "Asia/")
if !reflect.DeepEqual(expectedResult, actualResult) { if !reflect.DeepEqual(expectedResult, actualResult) {
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult) t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
} }
} }
// Test validates 'ErrEntityTooSmall' error response. // Test validates 'ErrEntityTooSmall' error response.
func TestErrEntityTooSmall(t *testing.T) { func TestErrEntityTooSmall(t *testing.T) {
msg := fmt.Sprintf("Your proposed upload size %d is below the minimum allowed object size '0B' for single PUT operation.", -1) msg := fmt.Sprintf("Your proposed upload size %d is below the minimum allowed object size 0B for single PUT operation.", -1)
expectedResult := ErrorResponse{ expectedResult := ErrorResponse{
Code: "EntityTooLarge", StatusCode: http.StatusBadRequest,
Code: "EntityTooSmall",
Message: msg, Message: msg,
BucketName: "minio-bucket", BucketName: "minio-bucket",
Key: "Asia/", Key: "Asia/",
} }
actualResult := ErrEntityTooSmall(-1, "minio-bucket", "Asia/") actualResult := ErrEntityTooSmall(-1, "minio-bucket", "Asia/")
if !reflect.DeepEqual(expectedResult, actualResult) { if !reflect.DeepEqual(expectedResult, actualResult) {
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult) t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
} }
} }
@ -213,6 +221,7 @@ func TestErrUnexpectedEOF(t *testing.T) {
msg := fmt.Sprintf("Data read %s is not equal to the size %s of the input Reader.", msg := fmt.Sprintf("Data read %s is not equal to the size %s of the input Reader.",
strconv.FormatInt(100, 10), strconv.FormatInt(101, 10)) strconv.FormatInt(100, 10), strconv.FormatInt(101, 10))
expectedResult := ErrorResponse{ expectedResult := ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "UnexpectedEOF", Code: "UnexpectedEOF",
Message: msg, Message: msg,
BucketName: "minio-bucket", BucketName: "minio-bucket",
@ -220,46 +229,49 @@ func TestErrUnexpectedEOF(t *testing.T) {
} }
actualResult := ErrUnexpectedEOF(100, 101, "minio-bucket", "Asia/") actualResult := ErrUnexpectedEOF(100, 101, "minio-bucket", "Asia/")
if !reflect.DeepEqual(expectedResult, actualResult) { if !reflect.DeepEqual(expectedResult, actualResult) {
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult) t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
} }
} }
// Test validates 'ErrInvalidBucketName' error response. // Test validates 'ErrInvalidBucketName' error response.
func TestErrInvalidBucketName(t *testing.T) { func TestErrInvalidBucketName(t *testing.T) {
expectedResult := ErrorResponse{ expectedResult := ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "InvalidBucketName", Code: "InvalidBucketName",
Message: "Invalid Bucket name", Message: "Invalid Bucket name",
RequestID: "minio", RequestID: "minio",
} }
actualResult := ErrInvalidBucketName("Invalid Bucket name") actualResult := ErrInvalidBucketName("Invalid Bucket name")
if !reflect.DeepEqual(expectedResult, actualResult) { if !reflect.DeepEqual(expectedResult, actualResult) {
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult) t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
} }
} }
// Test validates 'ErrInvalidObjectName' error response. // Test validates 'ErrInvalidObjectName' error response.
func TestErrInvalidObjectName(t *testing.T) { func TestErrInvalidObjectName(t *testing.T) {
expectedResult := ErrorResponse{ expectedResult := ErrorResponse{
StatusCode: http.StatusNotFound,
Code: "NoSuchKey", Code: "NoSuchKey",
Message: "Invalid Object Key", Message: "Invalid Object Key",
RequestID: "minio", RequestID: "minio",
} }
actualResult := ErrInvalidObjectName("Invalid Object Key") actualResult := ErrInvalidObjectName("Invalid Object Key")
if !reflect.DeepEqual(expectedResult, actualResult) { if !reflect.DeepEqual(expectedResult, actualResult) {
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult) t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
} }
} }
// Test validates 'ErrInvalidArgument' response. // Test validates 'ErrInvalidArgument' response.
func TestErrInvalidArgument(t *testing.T) { func TestErrInvalidArgument(t *testing.T) {
expectedResult := ErrorResponse{ expectedResult := ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "InvalidArgument", Code: "InvalidArgument",
Message: "Invalid Argument", Message: "Invalid Argument",
RequestID: "minio", RequestID: "minio",
} }
actualResult := ErrInvalidArgument("Invalid Argument") actualResult := ErrInvalidArgument("Invalid Argument")
if !reflect.DeepEqual(expectedResult, actualResult) { if !reflect.DeepEqual(expectedResult, actualResult) {
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult) t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
} }
} }

View File

@ -1,8 +1,6 @@
// +build go1.5,!go1.6,!go1.7,!go1.8
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc. * Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -19,21 +17,10 @@
package minio package minio
import ( import "context"
"net/http"
"time"
)
// This default transport is similar to http.DefaultTransport // GetObjectWithContext - returns an seekable, readable object.
// but with additional DisableCompression: // The options can be used to specify the GET request further.
var defaultMinioTransport http.RoundTripper = &http.Transport{ func (c Client) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
Proxy: http.ProxyFromEnvironment, return c.getObjectWithContext(ctx, bucketName, objectName, opts)
TLSHandshakeTimeout: 10 * time.Second,
// Set this value so that the underlying transport round-tripper
// doesn't try to auto decode the body of objects with
// content-encoding set to `gzip`.
//
// Refer:
// https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
DisableCompression: true,
} }

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -21,11 +22,34 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/minio/minio-go/pkg/encrypt"
"context"
"github.com/minio/minio-go/pkg/s3utils" "github.com/minio/minio-go/pkg/s3utils"
) )
// FGetObjectWithContext - download contents of an object to a local file.
// The options can be used to specify the GET request further.
func (c Client) FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
return c.fGetObjectWithContext(ctx, bucketName, objectName, filePath, opts)
}
// FGetObject - download contents of an object to a local file. // FGetObject - download contents of an object to a local file.
func (c Client) FGetObject(bucketName, objectName, filePath string) error { func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error {
return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts)
}
// FGetEncryptedObject - Decrypt and store an object at filePath.
func (c Client) FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error {
if materials == nil {
return ErrInvalidArgument("Unable to recognize empty encryption properties")
}
return c.FGetObject(bucketName, objectName, filePath, GetObjectOptions{Materials: materials})
}
// fGetObjectWithContext - fgetObject wrapper function with context
func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err return err
@ -60,7 +84,7 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error {
} }
// Gather md5sum. // Gather md5sum.
objectStat, err := c.StatObject(bucketName, objectName) objectStat, err := c.StatObject(bucketName, objectName, StatObjectOptions{opts})
if err != nil { if err != nil {
return err return err
} }
@ -82,13 +106,12 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error {
// Initialize get object request headers to set the // Initialize get object request headers to set the
// appropriate range offsets to read from. // appropriate range offsets to read from.
reqHeaders := NewGetReqHeaders()
if st.Size() > 0 { if st.Size() > 0 {
reqHeaders.SetRange(st.Size(), 0) opts.SetRange(st.Size(), 0)
} }
// Seek to current position for incoming reader. // Seek to current position for incoming reader.
objectReader, objectStat, err := c.getObject(bucketName, objectName, reqHeaders) objectReader, objectStat, err := c.getObject(ctx, bucketName, objectName, opts)
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -17,6 +18,7 @@
package minio package minio
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -36,27 +38,16 @@ func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMateria
return nil, ErrInvalidArgument("Unable to recognize empty encryption properties") return nil, ErrInvalidArgument("Unable to recognize empty encryption properties")
} }
// Fetch encrypted object return c.GetObject(bucketName, objectName, GetObjectOptions{Materials: encryptMaterials})
encReader, err := c.GetObject(bucketName, objectName)
if err != nil {
return nil, err
}
// Stat object to get its encryption metadata
st, err := encReader.Stat()
if err != nil {
return nil, err
}
// Setup object for decrytion, object is transparently
// decrypted as the consumer starts reading.
encryptMaterials.SetupDecryptMode(encReader, st.Metadata.Get(amzHeaderIV), st.Metadata.Get(amzHeaderKey))
// Success.
return encryptMaterials, nil
} }
// GetObject - returns an seekable, readable object. // GetObject - returns an seekable, readable object.
func (c Client) GetObject(bucketName, objectName string) (*Object, error) { func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
return c.getObjectWithContext(context.Background(), bucketName, objectName, opts)
}
// GetObject wrapper function that accepts a request context
func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err return nil, err
@ -102,34 +93,26 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
if req.isFirstReq { if req.isFirstReq {
// First request is a Read/ReadAt. // First request is a Read/ReadAt.
if req.isReadOp { if req.isReadOp {
reqHeaders := NewGetReqHeaders()
// Differentiate between wanting the whole object and just a range. // Differentiate between wanting the whole object and just a range.
if req.isReadAt { if req.isReadAt {
// If this is a ReadAt request only get the specified range. // If this is a ReadAt request only get the specified range.
// Range is set with respect to the offset and length of the buffer requested. // Range is set with respect to the offset and length of the buffer requested.
// Do not set objectInfo from the first readAt request because it will not get // Do not set objectInfo from the first readAt request because it will not get
// the whole object. // the whole object.
reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders) } else if req.Offset > 0 {
} else { opts.SetRange(req.Offset, 0)
if req.Offset > 0 {
reqHeaders.SetRange(req.Offset, 0)
}
// First request is a Read request.
httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
} }
httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts)
if err != nil { if err != nil {
resCh <- getResponse{ resCh <- getResponse{Error: err}
Error: err,
}
return return
} }
etag = objectInfo.ETag etag = objectInfo.ETag
// Read at least firstReq.Buffer bytes, if not we have // Read at least firstReq.Buffer bytes, if not we have
// reached our EOF. // reached our EOF.
size, err := io.ReadFull(httpReader, req.Buffer) size, err := io.ReadFull(httpReader, req.Buffer)
if err == io.ErrUnexpectedEOF { if size > 0 && err == io.ErrUnexpectedEOF {
// If an EOF happens after reading some but not // If an EOF happens after reading some but not
// all the bytes ReadFull returns ErrUnexpectedEOF // all the bytes ReadFull returns ErrUnexpectedEOF
err = io.EOF err = io.EOF
@ -144,7 +127,7 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
} else { } else {
// First request is a Stat or Seek call. // First request is a Stat or Seek call.
// Only need to run a StatObject until an actual Read or ReadAt request comes through. // Only need to run a StatObject until an actual Read or ReadAt request comes through.
objectInfo, err = c.StatObject(bucketName, objectName) objectInfo, err = c.statObject(bucketName, objectName, StatObjectOptions{opts})
if err != nil { if err != nil {
resCh <- getResponse{ resCh <- getResponse{
Error: err, Error: err,
@ -159,11 +142,10 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
} }
} }
} else if req.settingObjectInfo { // Request is just to get objectInfo. } else if req.settingObjectInfo { // Request is just to get objectInfo.
reqHeaders := NewGetReqHeaders()
if etag != "" { if etag != "" {
reqHeaders.SetMatchETag(etag) opts.SetMatchETag(etag)
} }
objectInfo, err := c.statObject(bucketName, objectName, reqHeaders) objectInfo, err := c.statObject(bucketName, objectName, StatObjectOptions{opts})
if err != nil { if err != nil {
resCh <- getResponse{ resCh <- getResponse{
Error: err, Error: err,
@ -183,9 +165,8 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// new ones when they haven't been already. // new ones when they haven't been already.
// All readAt requests are new requests. // All readAt requests are new requests.
if req.DidOffsetChange || !req.beenRead { if req.DidOffsetChange || !req.beenRead {
reqHeaders := NewGetReqHeaders()
if etag != "" { if etag != "" {
reqHeaders.SetMatchETag(etag) opts.SetMatchETag(etag)
} }
if httpReader != nil { if httpReader != nil {
// Close previously opened http reader. // Close previously opened http reader.
@ -194,16 +175,11 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// If this request is a readAt only get the specified range. // If this request is a readAt only get the specified range.
if req.isReadAt { if req.isReadAt {
// Range is set with respect to the offset and length of the buffer requested. // Range is set with respect to the offset and length of the buffer requested.
reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
httpReader, _, err = c.getObject(bucketName, objectName, reqHeaders) } else if req.Offset > 0 { // Range is set with respect to the offset.
} else { opts.SetRange(req.Offset, 0)
// Range is set with respect to the offset.
if req.Offset > 0 {
reqHeaders.SetRange(req.Offset, 0)
}
httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
} }
httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts)
if err != nil { if err != nil {
resCh <- getResponse{ resCh <- getResponse{
Error: err, Error: err,
@ -626,7 +602,7 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<-
// //
// For more information about the HTTP Range header. // For more information about the HTTP Range header.
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) { func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) {
// Validate input arguments. // Validate input arguments.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, ObjectInfo{}, err return nil, ObjectInfo{}, err
@ -635,18 +611,12 @@ func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeade
return nil, ObjectInfo{}, err return nil, ObjectInfo{}, err
} }
// Set all the necessary reqHeaders.
customHeader := make(http.Header)
for key, value := range reqHeaders.Header {
customHeader[key] = value
}
// Execute GET on objectName. // Execute GET on objectName.
resp, err := c.executeMethod("GET", requestMetadata{ resp, err := c.executeMethod(ctx, "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
customHeader: customHeader, customHeader: opts.Header(),
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
if err != nil { if err != nil {
return nil, ObjectInfo{}, err return nil, ObjectInfo{}, err
@ -692,6 +662,15 @@ func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeade
Metadata: extractObjMetadata(resp.Header), Metadata: extractObjMetadata(resp.Header),
} }
// do not close body here, caller will close reader := resp.Body
return resp.Body, objectStat, nil if opts.Materials != nil {
err = opts.Materials.SetupDecryptMode(reader, objectStat.Metadata.Get(amzHeaderIV), objectStat.Metadata.Get(amzHeaderKey))
if err != nil {
return nil, ObjectInfo{}, err
}
reader = opts.Materials
}
// do not close body here, caller will close
return reader, objectStat, nil
} }

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016-17 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -20,80 +21,94 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"time" "time"
"github.com/minio/minio-go/pkg/encrypt"
) )
// RequestHeaders - implement methods for setting special // GetObjectOptions are used to specify additional headers or options
// request headers for GET, HEAD object operations. // during GET requests.
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html type GetObjectOptions struct {
type RequestHeaders struct { headers map[string]string
http.Header
Materials encrypt.Materials
} }
// NewGetReqHeaders - initializes a new request headers for GET request. // StatObjectOptions are used to specify additional headers or options
func NewGetReqHeaders() RequestHeaders { // during GET info/stat requests.
return RequestHeaders{ type StatObjectOptions struct {
Header: make(http.Header), GetObjectOptions
}
} }
// NewHeadReqHeaders - initializes a new request headers for HEAD request. // Header returns the http.Header representation of the GET options.
func NewHeadReqHeaders() RequestHeaders { func (o GetObjectOptions) Header() http.Header {
return RequestHeaders{ headers := make(http.Header, len(o.headers))
Header: make(http.Header), for k, v := range o.headers {
headers.Set(k, v)
} }
return headers
}
// Set adds a key value pair to the options. The
// key-value pair will be part of the HTTP GET request
// headers.
func (o *GetObjectOptions) Set(key, value string) {
if o.headers == nil {
o.headers = make(map[string]string)
}
o.headers[http.CanonicalHeaderKey(key)] = value
} }
// SetMatchETag - set match etag. // SetMatchETag - set match etag.
func (c RequestHeaders) SetMatchETag(etag string) error { func (o *GetObjectOptions) SetMatchETag(etag string) error {
if etag == "" { if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.") return ErrInvalidArgument("ETag cannot be empty.")
} }
c.Set("If-Match", "\""+etag+"\"") o.Set("If-Match", "\""+etag+"\"")
return nil return nil
} }
// SetMatchETagExcept - set match etag except. // SetMatchETagExcept - set match etag except.
func (c RequestHeaders) SetMatchETagExcept(etag string) error { func (o *GetObjectOptions) SetMatchETagExcept(etag string) error {
if etag == "" { if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.") return ErrInvalidArgument("ETag cannot be empty.")
} }
c.Set("If-None-Match", "\""+etag+"\"") o.Set("If-None-Match", "\""+etag+"\"")
return nil return nil
} }
// SetUnmodified - set unmodified time since. // SetUnmodified - set unmodified time since.
func (c RequestHeaders) SetUnmodified(modTime time.Time) error { func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error {
if modTime.IsZero() { if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.") return ErrInvalidArgument("Modified since cannot be empty.")
} }
c.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat))
return nil return nil
} }
// SetModified - set modified time since. // SetModified - set modified time since.
func (c RequestHeaders) SetModified(modTime time.Time) error { func (o *GetObjectOptions) SetModified(modTime time.Time) error {
if modTime.IsZero() { if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.") return ErrInvalidArgument("Modified since cannot be empty.")
} }
c.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) o.Set("If-Modified-Since", modTime.Format(http.TimeFormat))
return nil return nil
} }
// SetRange - set the start and end offset of the object to be read. // SetRange - set the start and end offset of the object to be read.
// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. // See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
func (c RequestHeaders) SetRange(start, end int64) error { func (o *GetObjectOptions) SetRange(start, end int64) error {
switch { switch {
case start == 0 && end < 0: case start == 0 && end < 0:
// Read last '-end' bytes. `bytes=-N`. // Read last '-end' bytes. `bytes=-N`.
c.Set("Range", fmt.Sprintf("bytes=%d", end)) o.Set("Range", fmt.Sprintf("bytes=%d", end))
case 0 < start && end == 0: case 0 < start && end == 0:
// Read everything starting from offset // Read everything starting from offset
// 'start'. `bytes=N-`. // 'start'. `bytes=N-`.
c.Set("Range", fmt.Sprintf("bytes=%d-", start)) o.Set("Range", fmt.Sprintf("bytes=%d-", start))
case 0 <= start && start <= end: case 0 <= start && start <= end:
// Read everything starting at 'start' till the // Read everything starting at 'start' till the
// 'end'. `bytes=N-M` // 'end'. `bytes=N-M`
c.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
default: default:
// All other cases such as // All other cases such as
// bytes=-3- // bytes=-3-

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -17,6 +18,7 @@
package minio package minio
import ( import (
"context"
"encoding/json" "encoding/json"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -79,10 +81,10 @@ func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, e
urlValues.Set("policy", "") urlValues.Set("policy", "")
// Execute GET on bucket to list objects. // Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -17,6 +18,7 @@
package minio package minio
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
@ -38,7 +40,7 @@ import (
// //
func (c Client) ListBuckets() ([]BucketInfo, error) { func (c Client) ListBuckets() ([]BucketInfo, error) {
// Execute GET on service. // Execute GET on service.
resp, err := c.executeMethod("GET", requestMetadata{contentSHA256Bytes: emptySHA256}) resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Hex: emptySHA256Hex})
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return nil, err return nil, err
@ -215,10 +217,10 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
// Execute GET on bucket to list objects. // Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -393,10 +395,10 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
// Execute GET on bucket to list objects. // Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -572,10 +574,10 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
// Execute GET on bucketName to list multipart uploads. // Execute GET on bucketName to list multipart uploads.
resp, err := c.executeMethod("GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -690,11 +692,11 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
// Execute GET on objectName to get list of parts. // Execute GET on objectName to get list of parts.
resp, err := c.executeMethod("GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -18,6 +19,7 @@ package minio
import ( import (
"bufio" "bufio"
"context"
"encoding/json" "encoding/json"
"io" "io"
"net/http" "net/http"
@ -46,10 +48,10 @@ func (c Client) getBucketNotification(bucketName string) (BucketNotification, er
urlValues.Set("notification", "") urlValues.Set("notification", "")
// Execute GET on bucket to list objects. // Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
@ -150,7 +152,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
// Check ARN partition to verify if listening bucket is supported // Check ARN partition to verify if listening bucket is supported
if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) { if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) {
notificationInfoCh <- NotificationInfo{ notificationInfoCh <- NotificationInfo{
Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"), Err: ErrAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"),
} }
return return
} }
@ -170,13 +172,16 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
urlValues["events"] = events urlValues["events"] = events
// Execute GET on bucket to list objects. // Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
if err != nil { if err != nil {
continue notificationInfoCh <- NotificationInfo{
Err: err,
}
return
} }
// Validate http response, upon error return quickly. // Validate http response, upon error return quickly.

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -18,6 +19,7 @@ package minio
import ( import (
"errors" "errors"
"net/http"
"net/url" "net/url"
"time" "time"
@ -25,16 +27,6 @@ import (
"github.com/minio/minio-go/pkg/s3utils" "github.com/minio/minio-go/pkg/s3utils"
) )
// supportedGetReqParams - supported request parameters for GET presigned request.
var supportedGetReqParams = map[string]struct{}{
"response-expires": {},
"response-content-type": {},
"response-cache-control": {},
"response-content-language": {},
"response-content-encoding": {},
"response-content-disposition": {},
}
// presignURL - Returns a presigned URL for an input 'method'. // presignURL - Returns a presigned URL for an input 'method'.
// Expires maximum is 7days - ie. 604800 and minimum is 1. // Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
@ -42,13 +34,10 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
if method == "" { if method == "" {
return nil, ErrInvalidArgument("method cannot be empty.") return nil, ErrInvalidArgument("method cannot be empty.")
} }
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err return nil, err
} }
if err := s3utils.CheckValidObjectName(objectName); err != nil { if err = isValidExpiry(expires); err != nil {
return nil, err
}
if err := isValidExpiry(expires); err != nil {
return nil, err return nil, err
} }
@ -59,25 +48,13 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
expires: expireSeconds, expires: expireSeconds,
} queryValues: reqParams,
// For "GET" we are handling additional request parameters to
// override its response headers.
if method == "GET" {
// Verify if input map has unsupported params, if yes exit.
for k := range reqParams {
if _, ok := supportedGetReqParams[k]; !ok {
return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.")
}
}
// Save the request parameters to be used in presigning for GET request.
reqMetadata.queryValues = reqParams
} }
// Instantiate a new request. // Instantiate a new request.
// Since expires is set newRequest will presign the request. // Since expires is set newRequest will presign the request.
req, err := c.newRequest(method, reqMetadata) var req *http.Request
if err != nil { if req, err = c.newRequest(method, reqMetadata); err != nil {
return nil, err return nil, err
} }
return req.URL, nil return req.URL, nil
@ -88,6 +65,9 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
// upto 7days or a minimum of 1sec. Additionally you can override // upto 7days or a minimum of 1sec. Additionally you can override
// a set of response headers using the query parameters. // a set of response headers using the query parameters.
func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
return c.presignURL("GET", bucketName, objectName, expires, reqParams) return c.presignURL("GET", bucketName, objectName, expires, reqParams)
} }
@ -96,6 +76,9 @@ func (c Client) PresignedGetObject(bucketName string, objectName string, expires
// upto 7days or a minimum of 1sec. Additionally you can override // upto 7days or a minimum of 1sec. Additionally you can override
// a set of response headers using the query parameters. // a set of response headers using the query parameters.
func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
return c.presignURL("HEAD", bucketName, objectName, expires, reqParams) return c.presignURL("HEAD", bucketName, objectName, expires, reqParams)
} }
@ -103,6 +86,9 @@ func (c Client) PresignedHeadObject(bucketName string, objectName string, expire
// without credentials. URL can have a maximum expiry of upto 7days // without credentials. URL can have a maximum expiry of upto 7days
// or a minimum of 1sec. // or a minimum of 1sec.
func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
return c.presignURL("PUT", bucketName, objectName, expires, nil) return c.presignURL("PUT", bucketName, objectName, expires, nil)
} }

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2015, 2016, 2017 Minio, Inc. * Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -19,6 +19,7 @@ package minio
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
@ -75,14 +76,14 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
if err != nil { if err != nil {
return err return err
} }
reqMetadata.contentMD5Bytes = sumMD5(createBucketConfigBytes) reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes)
reqMetadata.contentSHA256Bytes = sum256(createBucketConfigBytes) reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes)
reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
reqMetadata.contentLength = int64(len(createBucketConfigBytes)) reqMetadata.contentLength = int64(len(createBucketConfigBytes))
} }
// Execute PUT to create a new bucket. // Execute PUT to create a new bucket.
resp, err := c.executeMethod("PUT", reqMetadata) resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return err return err
@ -165,12 +166,12 @@ func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAcces
queryValues: urlValues, queryValues: urlValues,
contentBody: policyBuffer, contentBody: policyBuffer,
contentLength: int64(len(policyBytes)), contentLength: int64(len(policyBytes)),
contentMD5Bytes: sumMD5(policyBytes), contentMD5Base64: sumMD5Base64(policyBytes),
contentSHA256Bytes: sum256(policyBytes), contentSHA256Hex: sum256Hex(policyBytes),
} }
// Execute PUT to upload a new bucket policy. // Execute PUT to upload a new bucket policy.
resp, err := c.executeMethod("PUT", reqMetadata) resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return err return err
@ -195,10 +196,10 @@ func (c Client) removeBucketPolicy(bucketName string) error {
urlValues.Set("policy", "") urlValues.Set("policy", "")
// Execute DELETE on objectName. // Execute DELETE on objectName.
resp, err := c.executeMethod("DELETE", requestMetadata{ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -230,12 +231,12 @@ func (c Client) SetBucketNotification(bucketName string, bucketNotification Buck
queryValues: urlValues, queryValues: urlValues,
contentBody: notifBuffer, contentBody: notifBuffer,
contentLength: int64(len(notifBytes)), contentLength: int64(len(notifBytes)),
contentMD5Bytes: sumMD5(notifBytes), contentMD5Base64: sumMD5Base64(notifBytes),
contentSHA256Bytes: sum256(notifBytes), contentSHA256Hex: sum256Hex(notifBytes),
} }
// Execute PUT to upload a new bucket notification. // Execute PUT to upload a new bucket notification.
resp, err := c.executeMethod("PUT", reqMetadata) resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return err return err

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -17,6 +18,7 @@
package minio package minio
import ( import (
"context"
"io" "io"
"math" "math"
"os" "os"
@ -24,12 +26,6 @@ import (
"github.com/minio/minio-go/pkg/s3utils" "github.com/minio/minio-go/pkg/s3utils"
) )
// Verify if reader is *os.File
func isFile(reader io.Reader) (ok bool) {
_, ok = reader.(*os.File)
return
}
// Verify if reader is *minio.Object // Verify if reader is *minio.Object
func isObject(reader io.Reader) (ok bool) { func isObject(reader io.Reader) (ok bool) {
_, ok = reader.(*Object) _, ok = reader.(*Object)
@ -39,6 +35,26 @@ func isObject(reader io.Reader) (ok bool) {
// Verify if reader is a generic ReaderAt // Verify if reader is a generic ReaderAt
func isReadAt(reader io.Reader) (ok bool) { func isReadAt(reader io.Reader) (ok bool) {
_, ok = reader.(io.ReaderAt) _, ok = reader.(io.ReaderAt)
if ok {
var v *os.File
v, ok = reader.(*os.File)
if ok {
// Stdin, Stdout and Stderr all have *os.File type
// which happen to also be io.ReaderAt compatible
// we need to add special conditions for them to
// be ignored by this function.
for _, f := range []string{
"/dev/stdin",
"/dev/stdout",
"/dev/stderr",
} {
if f == v.Name() {
ok = false
break
}
}
}
}
return return
} }
@ -77,7 +93,7 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las
// getUploadID - fetch upload id if already present for an object name // getUploadID - fetch upload id if already present for an object name
// or initiate a new request to fetch a new upload id. // or initiate a new request to fetch a new upload id.
func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) { func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err return "", err
@ -87,7 +103,7 @@ func (c Client) newUploadID(bucketName, objectName string, metaData map[string][
} }
// Initiate multipart upload for an object. // Initiate multipart upload for an object.
initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData) initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -0,0 +1,39 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"context"
"io"
)
// PutObjectWithContext - Identical to PutObject call, but accepts context to facilitate request cancellation.
func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
opts PutObjectOptions) (n int64, err error) {
err = opts.validate()
if err != nil {
return 0, err
}
if opts.EncryptMaterials != nil {
if err = opts.EncryptMaterials.SetupEncryptMode(reader); err != nil {
return 0, err
}
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, opts.EncryptMaterials, opts)
}
return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts)
}

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -17,13 +18,14 @@
package minio package minio
import ( import (
"context"
"io" "io"
"github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/encrypt"
) )
// PutEncryptedObject - Encrypt and store object. // PutEncryptedObject - Encrypt and store object.
func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metadata map[string][]string, progress io.Reader) (n int64, err error) { func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int64, err error) {
if encryptMaterials == nil { if encryptMaterials == nil {
return 0, ErrInvalidArgument("Unable to recognize empty encryption properties") return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
@ -33,14 +35,10 @@ func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Read
return 0, err return 0, err
} }
if metadata == nil { return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, -1, PutObjectOptions{EncryptMaterials: encryptMaterials})
metadata = make(map[string][]string)
} }
// Set the necessary encryption headers, for future decryption. // FPutEncryptedObject - Encrypt and store an object with contents from file at filePath.
metadata[amzHeaderIV] = []string{encryptMaterials.GetIV()} func (c Client) FPutEncryptedObject(bucketName, objectName, filePath string, encryptMaterials encrypt.Materials) (n int64, err error) {
metadata[amzHeaderKey] = []string{encryptMaterials.GetKey()} return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, PutObjectOptions{EncryptMaterials: encryptMaterials})
metadata[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()}
return c.putObjectMultipartStreamNoLength(bucketName, objectName, encryptMaterials, metadata, progress)
} }

View File

@ -0,0 +1,64 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"context"
"mime"
"os"
"path/filepath"
"github.com/minio/minio-go/pkg/s3utils"
)
// FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation.
func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
// Open the referenced file.
fileReader, err := os.Open(filePath)
// If any error fail quickly here.
if err != nil {
return 0, err
}
defer fileReader.Close()
// Save the file stat.
fileStat, err := fileReader.Stat()
if err != nil {
return 0, err
}
// Save the file size.
fileSize := fileStat.Size()
// Set contentType based on filepath extension if not given or default
// value of "application/octet-stream" if the extension has no associated type.
if opts.ContentType == "" {
if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" {
opts.ContentType = "application/octet-stream"
}
}
return c.PutObjectWithContext(ctx, bucketName, objectName, fileReader, fileSize, opts)
}

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -17,50 +18,10 @@
package minio package minio
import ( import (
"mime" "context"
"os"
"path/filepath"
"github.com/minio/minio-go/pkg/s3utils"
) )
// FPutObject - Create an object in a bucket, with contents from file at filePath. // FPutObject - Create an object in a bucket, with contents from file at filePath
func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) { func (c Client) FPutObject(bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) {
// Input validation. return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, opts)
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
// Open the referenced file.
fileReader, err := os.Open(filePath)
// If any error fail quickly here.
if err != nil {
return 0, err
}
defer fileReader.Close()
// Save the file stat.
fileStat, err := fileReader.Stat()
if err != nil {
return 0, err
}
// Save the file size.
fileSize := fileStat.Size()
objMetadata := make(map[string][]string)
// Set contentType based on filepath extension if not given or default
// value of "binary/octet-stream" if the extension has no associated type.
if contentType == "" {
if contentType = mime.TypeByExtension(filepath.Ext(filePath)); contentType == "" {
contentType = "application/octet-stream"
}
}
objMetadata["Content-Type"] = []string{contentType}
return c.putObjectCommon(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
} }

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -18,6 +19,9 @@ package minio
import ( import (
"bytes" "bytes"
"context"
"encoding/base64"
"encoding/hex"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"io" "io"
@ -32,9 +36,9 @@ import (
"github.com/minio/minio-go/pkg/s3utils" "github.com/minio/minio-go/pkg/s3utils"
) )
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
metadata map[string][]string, progress io.Reader) (n int64, err error) { opts PutObjectOptions) (n int64, err error) {
n, err = c.putObjectMultipartNoStream(bucketName, objectName, reader, metadata, progress) n, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts)
if err != nil { if err != nil {
errResp := ToErrorResponse(err) errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not // Verify if multipart functionality is not available, if not
@ -45,13 +49,13 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
} }
// Fall back to uploading as single PutObject operation. // Fall back to uploading as single PutObject operation.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
} }
} }
return n, err return n, err
} }
func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) { func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) {
// Input validation. // Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil { if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err return 0, err
@ -74,14 +78,14 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
} }
// Initiate a new multipart upload. // Initiate a new multipart upload.
uploadID, err := c.newUploadID(bucketName, objectName, metadata) uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil { if err != nil {
return 0, err return 0, err
} }
defer func() { defer func() {
if err != nil { if err != nil {
c.abortMultipartUpload(bucketName, objectName, uploadID) c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
} }
}() }()
@ -117,12 +121,24 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
// Update progress reader appropriately to the latest offset // Update progress reader appropriately to the latest offset
// as we read from the source. // as we read from the source.
rd := newHook(bytes.NewReader(buf[:length]), progress) rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
// Checksums..
var (
md5Base64 string
sha256Hex string
)
if hashSums["md5"] != nil {
md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"])
}
if hashSums["sha256"] != nil {
sha256Hex = hex.EncodeToString(hashSums["sha256"])
}
// Proceed to upload the part. // Proceed to upload the part.
var objPart ObjectPart var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber, objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
hashSums["md5"], hashSums["sha256"], int64(length), metadata) md5Base64, sha256Hex, int64(length), opts.UserMetadata)
if err != nil { if err != nil {
return totalUploadedSize, err return totalUploadedSize, err
} }
@ -158,7 +174,7 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
// Sort all completed parts. // Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts)) sort.Sort(completedParts(complMultipartUpload.Parts))
if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil { if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil {
return totalUploadedSize, err return totalUploadedSize, err
} }
@ -167,7 +183,7 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
} }
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata map[string][]string) (initiateMultipartUploadResult, error) { func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err return initiateMultipartUploadResult{}, err
@ -181,17 +197,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata
urlValues.Set("uploads", "") urlValues.Set("uploads", "")
// Set ContentType header. // Set ContentType header.
customHeader := make(http.Header) customHeader := opts.Header()
for k, v := range metadata {
if len(v) > 0 {
customHeader.Set(k, v[0])
}
}
// Set a default content-type header if the latter is not provided
if v, ok := metadata["Content-Type"]; !ok || len(v) == 0 {
customHeader.Set("Content-Type", "application/octet-stream")
}
reqMetadata := requestMetadata{ reqMetadata := requestMetadata{
bucketName: bucketName, bucketName: bucketName,
@ -201,7 +207,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata
} }
// Execute POST on an objectName to initiate multipart upload. // Execute POST on an objectName to initiate multipart upload.
resp, err := c.executeMethod("POST", reqMetadata) resp, err := c.executeMethod(ctx, "POST", reqMetadata)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return initiateMultipartUploadResult{}, err return initiateMultipartUploadResult{}, err
@ -223,8 +229,8 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata
const serverEncryptionKeyPrefix = "x-amz-server-side-encryption" const serverEncryptionKeyPrefix = "x-amz-server-side-encryption"
// uploadPart - Uploads a part in a multipart upload. // uploadPart - Uploads a part in a multipart upload.
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader,
partNumber int, md5Sum, sha256Sum []byte, size int64, metadata map[string][]string) (ObjectPart, error) { partNumber int, md5Base64, sha256Hex string, size int64, metadata map[string]string) (ObjectPart, error) {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectPart{}, err return ObjectPart{}, err
@ -257,7 +263,7 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
for k, v := range metadata { for k, v := range metadata {
if len(v) > 0 { if len(v) > 0 {
if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) { if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) {
customHeader.Set(k, v[0]) customHeader.Set(k, v)
} }
} }
} }
@ -269,12 +275,12 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
customHeader: customHeader, customHeader: customHeader,
contentBody: reader, contentBody: reader,
contentLength: size, contentLength: size,
contentMD5Bytes: md5Sum, contentMD5Base64: md5Base64,
contentSHA256Bytes: sha256Sum, contentSHA256Hex: sha256Hex,
} }
// Execute PUT on each part. // Execute PUT on each part.
resp, err := c.executeMethod("PUT", reqMetadata) resp, err := c.executeMethod(ctx, "PUT", reqMetadata)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return ObjectPart{}, err return ObjectPart{}, err
@ -295,7 +301,7 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
} }
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
complete completeMultipartUpload) (completeMultipartUploadResult, error) { complete completeMultipartUpload) (completeMultipartUploadResult, error) {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
@ -308,7 +314,6 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
// Initialize url queries. // Initialize url queries.
urlValues := make(url.Values) urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID) urlValues.Set("uploadId", uploadID)
// Marshal complete multipart body. // Marshal complete multipart body.
completeMultipartUploadBytes, err := xml.Marshal(complete) completeMultipartUploadBytes, err := xml.Marshal(complete)
if err != nil { if err != nil {
@ -323,11 +328,11 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
queryValues: urlValues, queryValues: urlValues,
contentBody: completeMultipartUploadBuffer, contentBody: completeMultipartUploadBuffer,
contentLength: int64(len(completeMultipartUploadBytes)), contentLength: int64(len(completeMultipartUploadBytes)),
contentSHA256Bytes: sum256(completeMultipartUploadBytes), contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
} }
// Execute POST to complete multipart upload for an objectName. // Execute POST to complete multipart upload for an objectName.
resp, err := c.executeMethod("POST", reqMetadata) resp, err := c.executeMethod(ctx, "POST", reqMetadata)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return completeMultipartUploadResult{}, err return completeMultipartUploadResult{}, err

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -17,6 +18,7 @@
package minio package minio
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@ -26,33 +28,23 @@ import (
"github.com/minio/minio-go/pkg/s3utils" "github.com/minio/minio-go/pkg/s3utils"
) )
// PutObjectStreaming using AWS streaming signature V4
func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) {
return c.PutObjectWithProgress(bucketName, objectName, reader, nil, nil)
}
// putObjectMultipartStream - upload a large object using // putObjectMultipartStream - upload a large object using
// multipart upload and streaming signature for signing payload. // multipart upload and streaming signature for signing payload.
// Comprehensive put object operation involving multipart uploads. // Comprehensive put object operation involving multipart uploads.
// //
// Following code handles these types of readers. // Following code handles these types of readers.
// //
// - *os.File
// - *minio.Object // - *minio.Object
// - Any reader which has a method 'ReadAt()' // - Any reader which has a method 'ReadAt()'
// //
func (c Client) putObjectMultipartStream(bucketName, objectName string, func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
// Verify if reader is *minio.Object, *os.File or io.ReaderAt. if !isObject(reader) && isReadAt(reader) {
// NOTE: Verification of object is kept for a specific purpose // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
// while it is going to be duck typed similar to io.ReaderAt. n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
// It is to indicate that *minio.Object implements io.ReaderAt.
// and such a functionality is used in the subsequent code path.
if isFile(reader) || !isObject(reader) && isReadAt(reader) {
n, err = c.putObjectMultipartStreamFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metadata, progress)
} else { } else {
n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress) n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts)
} }
if err != nil { if err != nil {
errResp := ToErrorResponse(err) errResp := ToErrorResponse(err)
@ -64,7 +56,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string,
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
} }
// Fall back to uploading as single PutObject operation. // Fall back to uploading as single PutObject operation.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
} }
} }
return n, err return n, err
@ -94,8 +86,8 @@ type uploadPartReq struct {
// temporary files for staging all the data, these temporary files are // temporary files for staging all the data, these temporary files are
// cleaned automatically when the caller i.e http client closes the // cleaned automatically when the caller i.e http client closes the
// stream after uploading all the contents successfully. // stream after uploading all the contents successfully.
func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string, func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
reader io.ReaderAt, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { reader io.ReaderAt, size int64, opts PutObjectOptions) (n int64, err error) {
// Input validation. // Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil { if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err return 0, err
@ -111,7 +103,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
} }
// Initiate a new multipart upload. // Initiate a new multipart upload.
uploadID, err := c.newUploadID(bucketName, objectName, metadata) uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -122,7 +114,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
// to relinquish storage space. // to relinquish storage space.
defer func() { defer func() {
if err != nil { if err != nil {
c.abortMultipartUpload(bucketName, objectName, uploadID) c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
} }
}() }()
@ -150,9 +142,8 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
} }
close(uploadPartsCh) close(uploadPartsCh)
// Receive each part number from the channel allowing three parallel uploads. // Receive each part number from the channel allowing three parallel uploads.
for w := 1; w <= totalWorkers; w++ { for w := 1; w <= opts.getNumThreads(); w++ {
go func(partSize int64) { go func(partSize int64) {
// Each worker will draw from the part channel and upload in parallel. // Each worker will draw from the part channel and upload in parallel.
for uploadReq := range uploadPartsCh { for uploadReq := range uploadPartsCh {
@ -170,13 +161,13 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
} }
// Get a section reader on a particular offset. // Get a section reader on a particular offset.
sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), progress) sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
// Proceed to upload the part. // Proceed to upload the part.
var objPart ObjectPart var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
sectionReader, uploadReq.PartNum, sectionReader, uploadReq.PartNum,
nil, nil, partSize, metadata) "", "", partSize, opts.UserMetadata)
if err != nil { if err != nil {
uploadedPartsCh <- uploadedPartRes{ uploadedPartsCh <- uploadedPartRes{
Size: 0, Size: 0,
@ -229,7 +220,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
// Sort all completed parts. // Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts)) sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
if err != nil { if err != nil {
return totalUploadedSize, err return totalUploadedSize, err
} }
@ -238,8 +229,8 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
return totalUploadedSize, nil return totalUploadedSize, nil
} }
func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string, func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketName, objectName string,
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
// Input validation. // Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil { if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err return 0, err
@ -253,9 +244,8 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
if err != nil { if err != nil {
return 0, err return 0, err
} }
// Initiates a new multipart request // Initiates a new multipart request
uploadID, err := c.newUploadID(bucketName, objectName, metadata) uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -266,7 +256,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
// storage space. // storage space.
defer func() { defer func() {
if err != nil { if err != nil {
c.abortMultipartUpload(bucketName, objectName, uploadID) c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
} }
}() }()
@ -281,17 +271,16 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
// Update progress reader appropriately to the latest offset // Update progress reader appropriately to the latest offset
// as we read from the source. // as we read from the source.
hookReader := newHook(reader, progress) hookReader := newHook(reader, opts.Progress)
// Proceed to upload the part. // Proceed to upload the part.
if partNumber == totalPartsCount { if partNumber == totalPartsCount {
partSize = lastPartSize partSize = lastPartSize
} }
var objPart ObjectPart var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
io.LimitReader(hookReader, partSize), io.LimitReader(hookReader, partSize),
partNumber, nil, nil, partSize, metadata) partNumber, "", "", partSize, opts.UserMetadata)
if err != nil { if err != nil {
return totalUploadedSize, err return totalUploadedSize, err
} }
@ -328,7 +317,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
// Sort all completed parts. // Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts)) sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
if err != nil { if err != nil {
return totalUploadedSize, err return totalUploadedSize, err
} }
@ -339,7 +328,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
// putObjectNoChecksum special function used Google Cloud Storage. This special function // putObjectNoChecksum special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. // is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err return 0, err
@ -355,17 +344,22 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
} }
if size > 0 { if size > 0 {
if isReadAt(reader) && !isObject(reader) { if isReadAt(reader) && !isObject(reader) {
reader = io.NewSectionReader(reader.(io.ReaderAt), 0, size) seeker, _ := reader.(io.Seeker)
offset, err := seeker.Seek(0, io.SeekCurrent)
if err != nil {
return 0, ErrInvalidArgument(err.Error())
}
reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size)
} }
} }
// Update progress reader appropriately to the latest offset as we // Update progress reader appropriately to the latest offset as we
// read from the source. // read from the source.
readSeeker := newHook(reader, progress) readSeeker := newHook(reader, opts.Progress)
// This function does not calculate sha256 and md5sum for payload. // This function does not calculate sha256 and md5sum for payload.
// Execute put object. // Execute put object.
st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData) st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, "", "", size, opts)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -377,7 +371,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
// putObjectDo - executes the put object http operation. // putObjectDo - executes the put object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it. // NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) { func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (ObjectInfo, error) {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
@ -385,21 +379,8 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5
if err := s3utils.CheckValidObjectName(objectName); err != nil { if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
} }
// Set headers. // Set headers.
customHeader := make(http.Header) customHeader := opts.Header()
// Set metadata to headers
for k, v := range metaData {
if len(v) > 0 {
customHeader.Set(k, v[0])
}
}
// If Content-Type is not provided, set the default application/octet-stream one
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
customHeader.Set("Content-Type", "application/octet-stream")
}
// Populate request metadata. // Populate request metadata.
reqMetadata := requestMetadata{ reqMetadata := requestMetadata{
@ -408,12 +389,12 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5
customHeader: customHeader, customHeader: customHeader,
contentBody: reader, contentBody: reader,
contentLength: size, contentLength: size,
contentMD5Bytes: md5Sum, contentMD5Base64: md5Base64,
contentSHA256Bytes: sha256Sum, contentSHA256Hex: sha256Hex,
} }
// Execute PUT an objectName. // Execute PUT an objectName.
resp, err := c.executeMethod("PUT", reqMetadata) resp, err := c.executeMethod(ctx, "PUT", reqMetadata)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -18,119 +19,83 @@ package minio
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"os" "net/http"
"reflect"
"runtime"
"runtime/debug" "runtime/debug"
"sort" "sort"
"strings"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils" "github.com/minio/minio-go/pkg/s3utils"
) )
// toInt - converts go value to its integer representation based // PutObjectOptions represents options specified by user for PutObject call
// on the value kind if it is an integer. type PutObjectOptions struct {
func toInt(value reflect.Value) (size int64) { UserMetadata map[string]string
size = -1 Progress io.Reader
if value.IsValid() { ContentType string
switch value.Kind() { ContentEncoding string
case reflect.Int: ContentDisposition string
fallthrough CacheControl string
case reflect.Int8: EncryptMaterials encrypt.Materials
fallthrough NumThreads uint
case reflect.Int16:
fallthrough
case reflect.Int32:
fallthrough
case reflect.Int64:
size = value.Int()
}
}
return size
} }
// getReaderSize - Determine the size of Reader if available. // getNumThreads - gets the number of threads to be used in the multipart
func getReaderSize(reader io.Reader) (size int64, err error) { // put object operation
size = -1 func (opts PutObjectOptions) getNumThreads() (numThreads int) {
if reader == nil { if opts.NumThreads > 0 {
return -1, nil numThreads = int(opts.NumThreads)
}
// Verify if there is a method by name 'Size'.
sizeFn := reflect.ValueOf(reader).MethodByName("Size")
// Verify if there is a method by name 'Len'.
lenFn := reflect.ValueOf(reader).MethodByName("Len")
if sizeFn.IsValid() {
if sizeFn.Kind() == reflect.Func {
// Call the 'Size' function and save its return value.
result := sizeFn.Call([]reflect.Value{})
if len(result) == 1 {
size = toInt(result[0])
}
}
} else if lenFn.IsValid() {
if lenFn.Kind() == reflect.Func {
// Call the 'Len' function and save its return value.
result := lenFn.Call([]reflect.Value{})
if len(result) == 1 {
size = toInt(result[0])
}
}
} else { } else {
// Fallback to Stat() method, two possible Stat() structs exist. numThreads = totalWorkers
switch v := reader.(type) { }
case *os.File: return
var st os.FileInfo }
st, err = v.Stat()
if err != nil { // Header - constructs the headers from metadata entered by user in
// Handle this case specially for "windows", // PutObjectOptions struct
// certain files for example 'Stdin', 'Stdout' and func (opts PutObjectOptions) Header() (header http.Header) {
// 'Stderr' it is not allowed to fetch file information. header = make(http.Header)
if runtime.GOOS == "windows" {
if strings.Contains(err.Error(), "GetFileInformationByHandle") { if opts.ContentType != "" {
return -1, nil header["Content-Type"] = []string{opts.ContentType}
} else {
header["Content-Type"] = []string{"application/octet-stream"}
}
if opts.ContentEncoding != "" {
header["Content-Encoding"] = []string{opts.ContentEncoding}
}
if opts.ContentDisposition != "" {
header["Content-Disposition"] = []string{opts.ContentDisposition}
}
if opts.CacheControl != "" {
header["Cache-Control"] = []string{opts.CacheControl}
}
if opts.EncryptMaterials != nil {
header[amzHeaderIV] = []string{opts.EncryptMaterials.GetIV()}
header[amzHeaderKey] = []string{opts.EncryptMaterials.GetKey()}
header[amzHeaderMatDesc] = []string{opts.EncryptMaterials.GetDesc()}
}
for k, v := range opts.UserMetadata {
if !isAmzHeader(k) && !isStandardHeader(k) && !isSSEHeader(k) {
header["X-Amz-Meta-"+k] = []string{v}
} else {
header[k] = []string{v}
} }
} }
return return
} }
// Ignore if input is a directory, throw an error.
if st.Mode().IsDir() { // validate() checks if the UserMetadata map has standard headers or client side
return -1, ErrInvalidArgument("Input file cannot be a directory.") // encryption headers and raises an error if so.
} func (opts PutObjectOptions) validate() (err error) {
// Ignore 'Stdin', 'Stdout' and 'Stderr', since they for k := range opts.UserMetadata {
// represent *os.File type but internally do not if isStandardHeader(k) || isCSEHeader(k) {
// implement Seekable calls. Ignore them and treat return ErrInvalidArgument(k + " unsupported request parameter for user defined metadata")
// them like a stream with unknown length.
switch st.Name() {
case "stdin", "stdout", "stderr":
return
// Ignore read/write stream of os.Pipe() which have unknown length too.
case "|0", "|1":
return
}
var pos int64
pos, err = v.Seek(0, 1) // SeekCurrent.
if err != nil {
return -1, err
}
size = st.Size() - pos
case *Object:
var st ObjectInfo
st, err = v.Stat()
if err != nil {
return
}
var pos int64
pos, err = v.Seek(0, 1) // SeekCurrent.
if err != nil {
return -1, err
}
size = st.Size - pos
} }
} }
// Returns the size here. return nil
return size, err
} }
// completedParts is a collection of parts sortable by their part numbers. // completedParts is a collection of parts sortable by their part numbers.
@ -152,40 +117,12 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
// - For size input as -1 PutObject does a multipart Put operation // - For size input as -1 PutObject does a multipart Put operation
// until input stream reaches EOF. Maximum object size that can // until input stream reaches EOF. Maximum object size that can
// be uploaded through this operation will be 5TiB. // be uploaded through this operation will be 5TiB.
func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) { func (c Client) PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,
return c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{ opts PutObjectOptions) (n int64, err error) {
"Content-Type": []string{contentType}, return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, objectSize, opts)
}, nil)
} }
// PutObjectWithSize - is a helper PutObject similar in behavior to PutObject() func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
// but takes the size argument explicitly, this function avoids doing reflection
// internally to figure out the size of input stream. Also if the input size is
// lesser than 0 this function returns an error.
func (c Client) PutObjectWithSize(bucketName, objectName string, reader io.Reader, readerSize int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
return c.putObjectCommon(bucketName, objectName, reader, readerSize, metadata, progress)
}
// PutObjectWithMetadata using AWS streaming signature V4
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
return c.PutObjectWithProgress(bucketName, objectName, reader, metadata, progress)
}
// PutObjectWithProgress using AWS streaming signature V4
func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
// Size of the object.
var size int64
// Get reader size.
size, err = getReaderSize(reader)
if err != nil {
return 0, err
}
return c.putObjectCommon(bucketName, objectName, reader, size, metadata, progress)
}
func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
// Check for largest object size allowed. // Check for largest object size allowed.
if size > int64(maxMultipartPutObjectSize) { if size > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
@ -194,30 +131,27 @@ func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader,
// NOTE: Streaming signature is not supported by GCS. // NOTE: Streaming signature is not supported by GCS.
if s3utils.IsGoogleEndpoint(c.endpointURL) { if s3utils.IsGoogleEndpoint(c.endpointURL) {
// Do not compute MD5 for Google Cloud Storage. // Do not compute MD5 for Google Cloud Storage.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
} }
if c.overrideSignerType.IsV2() { if c.overrideSignerType.IsV2() {
if size >= 0 && size < minPartSize { if size >= 0 && size < minPartSize {
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
} }
return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress) return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts)
} }
if size < 0 { if size < 0 {
return c.putObjectMultipartStreamNoLength(bucketName, objectName, reader, metadata, progress) return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
} }
if size < minPartSize { if size < minPartSize {
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
} }
// For all sizes greater than 64MiB do multipart. // For all sizes greater than 64MiB do multipart.
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress) return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts)
} }
func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string, reader io.Reader, metadata map[string][]string, func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) {
progress io.Reader) (n int64, err error) {
// Input validation. // Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil { if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err return 0, err
@ -238,16 +172,15 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string,
if err != nil { if err != nil {
return 0, err return 0, err
} }
// Initiate a new multipart upload. // Initiate a new multipart upload.
uploadID, err := c.newUploadID(bucketName, objectName, metadata) uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil { if err != nil {
return 0, err return 0, err
} }
defer func() { defer func() {
if err != nil { if err != nil {
c.abortMultipartUpload(bucketName, objectName, uploadID) c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
} }
}() }()
@ -263,21 +196,20 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string,
for partNumber <= totalPartsCount { for partNumber <= totalPartsCount {
length, rErr := io.ReadFull(reader, buf) length, rErr := io.ReadFull(reader, buf)
if rErr == io.EOF { if rErr == io.EOF && partNumber > 1 {
break break
} }
if rErr != nil && rErr != io.ErrUnexpectedEOF { if rErr != nil && rErr != io.ErrUnexpectedEOF {
return 0, rErr return 0, rErr
} }
// Update progress reader appropriately to the latest offset // Update progress reader appropriately to the latest offset
// as we read from the source. // as we read from the source.
rd := newHook(bytes.NewReader(buf[:length]), progress) rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
// Proceed to upload the part. // Proceed to upload the part.
var objPart ObjectPart var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber, objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
nil, nil, int64(length), metadata) "", "", int64(length), opts.UserMetadata)
if err != nil { if err != nil {
return totalUploadedSize, err return totalUploadedSize, err
} }
@ -313,7 +245,7 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string,
// Sort all completed parts. // Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts)) sort.Sort(completedParts(complMultipartUpload.Parts))
if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil { if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil {
return totalUploadedSize, err return totalUploadedSize, err
} }

View File

@ -0,0 +1,53 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"testing"
)
func TestPutObjectOptionsValidate(t *testing.T) {
testCases := []struct {
metadata map[string]string
shouldPass bool
}{
{map[string]string{"Content-Type": "custom/content-type"}, false},
{map[string]string{"content-type": "custom/content-type"}, false},
{map[string]string{"Content-Encoding": "gzip"}, false},
{map[string]string{"Cache-Control": "blah"}, false},
{map[string]string{"Content-Disposition": "something"}, false},
{map[string]string{"my-custom-header": "blah"}, true},
{map[string]string{"X-Amz-Iv": "blah"}, false},
{map[string]string{"X-Amz-Key": "blah"}, false},
{map[string]string{"X-Amz-Key-prefixed-header": "blah"}, false},
{map[string]string{"custom-X-Amz-Key-middle": "blah"}, true},
{map[string]string{"my-custom-header-X-Amz-Key": "blah"}, true},
{map[string]string{"X-Amz-Matdesc": "blah"}, false},
{map[string]string{"blah-X-Amz-Matdesc": "blah"}, true},
{map[string]string{"X-Amz-MatDesc-suffix": "blah"}, true},
{map[string]string{"x-amz-meta-X-Amz-Iv": "blah"}, false},
{map[string]string{"x-amz-meta-X-Amz-Key": "blah"}, false},
{map[string]string{"x-amz-meta-X-Amz-Matdesc": "blah"}, false},
}
for i, testCase := range testCases {
err := PutObjectOptions{UserMetadata: testCase.metadata}.validate()
if testCase.shouldPass && err != nil {
t.Errorf("Test %d - output did not match with reference results", i+1)
}
}
}

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -18,6 +19,7 @@ package minio
import ( import (
"bytes" "bytes"
"context"
"encoding/xml" "encoding/xml"
"io" "io"
"net/http" "net/http"
@ -36,9 +38,9 @@ func (c Client) RemoveBucket(bucketName string) error {
return err return err
} }
// Execute DELETE on bucket. // Execute DELETE on bucket.
resp, err := c.executeMethod("DELETE", requestMetadata{ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -66,10 +68,10 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
return err return err
} }
// Execute DELETE on objectName. // Execute DELETE on objectName.
resp, err := c.executeMethod("DELETE", requestMetadata{ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -187,13 +189,13 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
// Generate remove multi objects XML request // Generate remove multi objects XML request
removeBytes := generateRemoveMultiObjectsRequest(batch) removeBytes := generateRemoveMultiObjectsRequest(batch)
// Execute GET on bucket to list objects. // Execute GET on bucket to list objects.
resp, err := c.executeMethod("POST", requestMetadata{ resp, err := c.executeMethod(context.Background(), "POST", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentBody: bytes.NewReader(removeBytes), contentBody: bytes.NewReader(removeBytes),
contentLength: int64(len(removeBytes)), contentLength: int64(len(removeBytes)),
contentMD5Bytes: sumMD5(removeBytes), contentMD5Base64: sumMD5Base64(removeBytes),
contentSHA256Bytes: sum256(removeBytes), contentSHA256Hex: sum256Hex(removeBytes),
}) })
if err != nil { if err != nil {
for _, b := range batch { for _, b := range batch {
@ -227,7 +229,7 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
} }
if uploadID != "" { if uploadID != "" {
// Upload id found, abort the incomplete multipart upload. // Upload id found, abort the incomplete multipart upload.
err := c.abortMultipartUpload(bucketName, objectName, uploadID) err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID)
if err != nil { if err != nil {
return err return err
} }
@ -237,7 +239,7 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
// abortMultipartUpload aborts a multipart upload for the given // abortMultipartUpload aborts a multipart upload for the given
// uploadID, all previously uploaded parts are deleted. // uploadID, all previously uploaded parts are deleted.
func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error { func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err return err
@ -251,11 +253,11 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er
urlValues.Set("uploadId", uploadID) urlValues.Set("uploadId", uploadID)
// Execute DELETE on multipart upload. // Execute DELETE on multipart upload.
resp, err := c.executeMethod("DELETE", requestMetadata{ resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -128,7 +129,7 @@ type initiator struct {
// copyObjectResult container for copy object response. // copyObjectResult container for copy object response.
type copyObjectResult struct { type copyObjectResult struct {
ETag string ETag string
LastModified string // time string format "2006-01-02T15:04:05.000Z" LastModified time.Time // time string format "2006-01-02T15:04:05.000Z"
} }
// ObjectPart container for particular part of an object. // ObjectPart container for particular part of an object.

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -17,6 +18,7 @@
package minio package minio
import ( import (
"context"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
@ -33,9 +35,9 @@ func (c Client) BucketExists(bucketName string) (bool, error) {
} }
// Execute HEAD on bucketName. // Execute HEAD on bucketName.
resp, err := c.executeMethod("HEAD", requestMetadata{ resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -80,7 +82,7 @@ func extractObjMetadata(header http.Header) http.Header {
} }
// StatObject verifies if object exists and you have permission to access. // StatObject verifies if object exists and you have permission to access.
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) { func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
@ -88,12 +90,11 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
if err := s3utils.CheckValidObjectName(objectName); err != nil { if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
} }
reqHeaders := NewHeadReqHeaders() return c.statObject(bucketName, objectName, opts)
return c.statObject(bucketName, objectName, reqHeaders)
} }
// Lower level API for statObject supporting pre-conditions and range headers. // Lower level API for statObject supporting pre-conditions and range headers.
func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) { func (c Client) statObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
@ -102,17 +103,12 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead
return ObjectInfo{}, err return ObjectInfo{}, err
} }
customHeader := make(http.Header)
for k, v := range reqHeaders.Header {
customHeader[k] = v
}
// Execute HEAD on objectName. // Execute HEAD on objectName.
resp, err := c.executeMethod("HEAD", requestMetadata{ resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
customHeader: customHeader, customHeader: opts.Header(),
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2015, 2016, 2017 Minio, Inc. * Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -19,10 +19,9 @@ package minio
import ( import (
"bytes" "bytes"
"context"
"crypto/md5" "crypto/md5"
"crypto/sha256" "crypto/sha256"
"encoding/base64"
"encoding/hex"
"errors" "errors"
"fmt" "fmt"
"hash" "hash"
@ -87,7 +86,7 @@ type Client struct {
// Global constants. // Global constants.
const ( const (
libraryName = "minio-go" libraryName = "minio-go"
libraryVersion = "3.0.3" libraryVersion = "4.0.3"
) )
// User Agent should always following the below style. // User Agent should always following the below style.
@ -178,18 +177,6 @@ func (r *lockedRandSource) Seed(seed int64) {
r.lk.Unlock() r.lk.Unlock()
} }
// redirectHeaders copies all headers when following a redirect URL.
// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
func redirectHeaders(req *http.Request, via []*http.Request) error {
if len(via) == 0 {
return nil
}
for key, val := range via[0].Header {
req.Header[key] = val
}
return nil
}
// getRegionFromURL - parse region from URL if present. // getRegionFromURL - parse region from URL if present.
func getRegionFromURL(u url.URL) (region string) { func getRegionFromURL(u url.URL) (region string) {
region = "" region = ""
@ -237,7 +224,6 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re
// Instantiate http client and bucket location cache. // Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{ clnt.httpClient = &http.Client{
Transport: defaultMinioTransport, Transport: defaultMinioTransport,
CheckRedirect: redirectHeaders,
} }
// Sets custom region, if region is empty bucket location cache is used automatically. // Sets custom region, if region is empty bucket location cache is used automatically.
@ -359,8 +345,8 @@ type requestMetadata struct {
bucketLocation string bucketLocation string
contentBody io.Reader contentBody io.Reader
contentLength int64 contentLength int64
contentSHA256Bytes []byte contentMD5Base64 string // carries base64 encoded md5sum
contentMD5Bytes []byte contentSHA256Hex string // carries hex encoded sha256sum
} }
// dumpHTTP - dump HTTP request and response. // dumpHTTP - dump HTTP request and response.
@ -494,9 +480,11 @@ var successStatus = []int{
// executeMethod - instantiates a given method, and retries the // executeMethod - instantiates a given method, and retries the
// request upon any error up to maxRetries attempts in a binomially // request upon any error up to maxRetries attempts in a binomially
// delayed manner using a standard back off algorithm. // delayed manner using a standard back off algorithm.
func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) { func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
var isRetryable bool // Indicates if request can be retried. var isRetryable bool // Indicates if request can be retried.
var bodySeeker io.Seeker // Extracted seeker from io.Reader. var bodySeeker io.Seeker // Extracted seeker from io.Reader.
var reqRetry = MaxRetry // Indicates how many times we can retry the request
if metadata.contentBody != nil { if metadata.contentBody != nil {
// Check if body is seekable then it is retryable. // Check if body is seekable then it is retryable.
bodySeeker, isRetryable = metadata.contentBody.(io.Seeker) bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
@ -504,6 +492,11 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
case os.Stdin, os.Stdout, os.Stderr: case os.Stdin, os.Stdout, os.Stderr:
isRetryable = false isRetryable = false
} }
// Retry only when reader is seekable
if !isRetryable {
reqRetry = 1
}
// Figure out if the body can be closed - if yes // Figure out if the body can be closed - if yes
// we will definitely close it upon the function // we will definitely close it upon the function
// return. // return.
@ -522,7 +515,7 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Blank indentifier is kept here on purpose since 'range' without // Blank indentifier is kept here on purpose since 'range' without
// blank identifiers is only supported since go1.4 // blank identifiers is only supported since go1.4
// https://golang.org/doc/go1.4#forrange. // https://golang.org/doc/go1.4#forrange.
for range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
// Retry executes the following function body if request has an // Retry executes the following function body if request has an
// error until maxRetries have been exhausted, retry attempts are // error until maxRetries have been exhausted, retry attempts are
// performed after waiting for a given period of time in a // performed after waiting for a given period of time in a
@ -545,6 +538,8 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
} }
return nil, err return nil, err
} }
// Add context to request
req = req.WithContext(ctx)
// Initiate the request. // Initiate the request.
res, err = c.do(req) res, err = c.do(req)
@ -720,8 +715,8 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
} }
// set md5Sum for content protection. // set md5Sum for content protection.
if metadata.contentMD5Bytes != nil { if len(metadata.contentMD5Base64) > 0 {
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes)) req.Header.Set("Content-Md5", metadata.contentMD5Base64)
} }
// For anonymous requests just return. // For anonymous requests just return.
@ -742,8 +737,8 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
default: default:
// Set sha256 sum for signature calculation only with signature version '4'. // Set sha256 sum for signature calculation only with signature version '4'.
shaHeader := unsignedPayload shaHeader := unsignedPayload
if len(metadata.contentSHA256Bytes) > 0 { if metadata.contentSHA256Hex != "" {
shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes) shaHeader = metadata.contentSHA256Hex
} }
req.Header.Set("X-Amz-Content-Sha256", shaHeader) req.Header.Set("X-Amz-Content-Sha256", shaHeader)

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2015, 2016, 2017 Minio, Inc. * Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -18,13 +18,8 @@
package minio package minio
import ( import (
"bytes"
"io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os"
"strings"
"testing" "testing"
"github.com/minio/minio-go/pkg/credentials" "github.com/minio/minio-go/pkg/credentials"
@ -41,129 +36,6 @@ func (c *customReader) Size() (n int64) {
return 10 return 10
} }
// Tests getReaderSize() for various Reader types.
func TestGetReaderSize(t *testing.T) {
var reader io.Reader
size, err := getReaderSize(reader)
if err != nil {
t.Fatal("Error:", err)
}
if size != -1 {
t.Fatal("Reader shouldn't have any length.")
}
bytesReader := bytes.NewReader([]byte("Hello World"))
size, err = getReaderSize(bytesReader)
if err != nil {
t.Fatal("Error:", err)
}
if size != int64(len("Hello World")) {
t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World"))
}
size, err = getReaderSize(new(customReader))
if err != nil {
t.Fatal("Error:", err)
}
if size != int64(10) {
t.Fatalf("Reader length doesn't match got: %v, want: %v", size, 10)
}
stringsReader := strings.NewReader("Hello World")
size, err = getReaderSize(stringsReader)
if err != nil {
t.Fatal("Error:", err)
}
if size != int64(len("Hello World")) {
t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World"))
}
// Create request channel.
reqCh := make(chan getRequest, 1)
// Create response channel.
resCh := make(chan getResponse, 1)
// Create done channel.
doneCh := make(chan struct{})
objectInfo := ObjectInfo{Size: 10}
// Create the first request.
firstReq := getRequest{
isReadOp: false, // Perform only a HEAD object to get objectInfo.
isFirstReq: true,
}
// Create the expected response.
firstRes := getResponse{
objectInfo: objectInfo,
}
// Send the expected response.
resCh <- firstRes
// Test setting size on the first request.
objectReaderFirstReq := newObject(reqCh, resCh, doneCh)
defer objectReaderFirstReq.Close()
// Not checking the response here...just that the reader size is correct.
_, err = objectReaderFirstReq.doGetRequest(firstReq)
if err != nil {
t.Fatal("Error:", err)
}
// Validate that the reader size is the objectInfo size.
size, err = getReaderSize(objectReaderFirstReq)
if err != nil {
t.Fatal("Error:", err)
}
if size != int64(10) {
t.Fatalf("Reader length doesn't match got: %d, wanted %d", size, objectInfo.Size)
}
fileReader, err := ioutil.TempFile(os.TempDir(), "prefix")
if err != nil {
t.Fatal("Error:", err)
}
defer fileReader.Close()
defer os.RemoveAll(fileReader.Name())
size, err = getReaderSize(fileReader)
if err != nil {
t.Fatal("Error:", err)
}
if size == -1 {
t.Fatal("Reader length for file cannot be -1.")
}
// Verify for standard input, output and error file descriptors.
size, err = getReaderSize(os.Stdin)
if err != nil {
t.Fatal("Error:", err)
}
if size != -1 {
t.Fatal("Stdin should have length of -1.")
}
size, err = getReaderSize(os.Stdout)
if err != nil {
t.Fatal("Error:", err)
}
if size != -1 {
t.Fatal("Stdout should have length of -1.")
}
size, err = getReaderSize(os.Stderr)
if err != nil {
t.Fatal("Error:", err)
}
if size != -1 {
t.Fatal("Stderr should have length of -1.")
}
file, err := os.Open(os.TempDir())
if err != nil {
t.Fatal("Error:", err)
}
defer file.Close()
_, err = getReaderSize(file)
if err == nil {
t.Fatal("Input file as directory should throw an error.")
}
}
// Tests get region from host URL. // Tests get region from host URL.
func TestGetRegionFromURL(t *testing.T) { func TestGetRegionFromURL(t *testing.T) {
testCases := []struct { testCases := []struct {
@ -352,7 +224,7 @@ func TestMakeTargetURL(t *testing.T) {
// Test 6 // Test 6
{"localhost:9000", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject"}, nil}, {"localhost:9000", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject"}, nil},
// Test 7, testing with query // Test 7, testing with query
{"localhost:9000", false, "mybucket", "myobject", "", map[string][]string{"param": []string{"val"}}, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject", RawQuery: "param=val"}, nil}, {"localhost:9000", false, "mybucket", "myobject", "", map[string][]string{"param": {"val"}}, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject", RawQuery: "param=val"}, nil},
// Test 8, testing with port 80 // Test 8, testing with port 80
{"localhost:80", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost", Scheme: "http", Path: "/mybucket/myobject"}, nil}, {"localhost:80", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost", Scheme: "http", Path: "/mybucket/myobject"}, nil},
// Test 9, testing with port 443 // Test 9, testing with port 443

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2015, 2016, 2017 Minio, Inc. * Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -18,7 +18,6 @@
package minio package minio
import ( import (
"encoding/hex"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@ -209,11 +208,9 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
} }
// Set sha256 sum for signature calculation only with signature version '4'. // Set sha256 sum for signature calculation only with signature version '4'.
var contentSha256 string contentSha256 := emptySHA256Hex
if c.secure { if c.secure {
contentSha256 = unsignedPayload contentSha256 = unsignedPayload
} else {
contentSha256 = hex.EncodeToString(sum256([]byte{}))
} }
req.Header.Set("X-Amz-Content-Sha256", contentSha256) req.Header.Set("X-Amz-Content-Sha256", contentSha256)

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Copyright
* (C) 2015, 2016, 2017 Minio, Inc. * 2015, 2016, 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -19,7 +19,6 @@ package minio
import ( import (
"bytes" "bytes"
"encoding/hex"
"encoding/xml" "encoding/xml"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -116,11 +115,9 @@ func TestGetBucketLocationRequest(t *testing.T) {
// with signature version '4'. // with signature version '4'.
switch { switch {
case signerType.IsV4(): case signerType.IsV4():
var contentSha256 string contentSha256 := emptySHA256Hex
if c.secure { if c.secure {
contentSha256 = unsignedPayload contentSha256 = unsignedPayload
} else {
contentSha256 = hex.EncodeToString(sum256([]byte{}))
} }
req.Header.Set("X-Amz-Content-Sha256", contentSha256) req.Header.Set("X-Amz-Content-Sha256", contentSha256)
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -50,7 +51,7 @@ const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
const unsignedPayload = "UNSIGNED-PAYLOAD" const unsignedPayload = "UNSIGNED-PAYLOAD"
// Total number of parallel workers used for multipart operation. // Total number of parallel workers used for multipart operation.
var totalWorkers = 3 const totalWorkers = 4
// Signature related constants. // Signature related constants.
const ( const (

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -17,7 +18,9 @@
package minio package minio
import ( import (
"context"
"io" "io"
"strings"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
) )
@ -52,14 +55,35 @@ func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string,
return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys) return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys)
} }
// PutObject - Upload object. Uploads using single PUT call. // CopyObject - copies an object from source object to destination object on server side.
func (c Core) PutObject(bucket, object string, size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectInfo, error) { func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) {
return c.putObjectDo(bucket, object, data, md5Sum, sha256Sum, size, metadata) return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata)
} }
// NewMultipartUpload - Initiates new multipart upload and returns the new uploaID. // PutObject - Upload object. Uploads using single PUT call.
func (c Core) NewMultipartUpload(bucket, object string, metadata map[string][]string) (uploadID string, err error) { func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectInfo, error) {
result, err := c.initiateMultipartUpload(bucket, object, metadata) opts := PutObjectOptions{}
m := make(map[string]string)
for k, v := range metadata {
if strings.ToLower(k) == "content-encoding" {
opts.ContentEncoding = v
} else if strings.ToLower(k) == "content-disposition" {
opts.ContentDisposition = v
} else if strings.ToLower(k) == "content-type" {
opts.ContentType = v
} else if strings.ToLower(k) == "cache-control" {
opts.CacheControl = v
} else {
m[k] = metadata[k]
}
}
opts.UserMetadata = m
return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts)
}
// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID.
func (c Core) NewMultipartUpload(bucket, object string, opts PutObjectOptions) (uploadID string, err error) {
result, err := c.initiateMultipartUpload(context.Background(), bucket, object, opts)
return result.UploadID, err return result.UploadID, err
} }
@ -69,14 +93,14 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de
} }
// PutObjectPart - Upload an object part. // PutObjectPart - Upload an object part.
func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) { func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string) (ObjectPart, error) {
return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, size, data, md5Sum, sha256Sum, nil) return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, data, size, md5Base64, sha256Hex, nil)
} }
// PutObjectPartWithMetadata - upload an object part with additional request metadata. // PutObjectPartWithMetadata - upload an object part with additional request metadata.
func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, data io.Reader,
size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectPart, error) { size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectPart, error) {
return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size, metadata) return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, metadata)
} }
// ListObjectParts - List uploaded parts of an incomplete upload.x // ListObjectParts - List uploaded parts of an incomplete upload.x
@ -86,7 +110,7 @@ func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. // CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error { func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error {
_, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{ _, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{
Parts: parts, Parts: parts,
}) })
return err return err
@ -94,7 +118,7 @@ func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []C
// AbortMultipartUpload - Abort an incomplete upload. // AbortMultipartUpload - Abort an incomplete upload.
func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error { func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error {
return c.abortMultipartUpload(bucket, object, uploadID) return c.abortMultipartUpload(context.Background(), bucket, object, uploadID)
} }
// GetBucketPolicy - fetches bucket access policy for a given bucket. // GetBucketPolicy - fetches bucket access policy for a given bucket.
@ -110,12 +134,12 @@ func (c Core) PutBucketPolicy(bucket string, bucketPolicy policy.BucketAccessPol
// GetObject is a lower level API implemented to support reading // GetObject is a lower level API implemented to support reading
// partial objects and also downloading objects with special conditions // partial objects and also downloading objects with special conditions
// matching etag, modtime etc. // matching etag, modtime etc.
func (c Core) GetObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) { func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) {
return c.getObject(bucketName, objectName, reqHeaders) return c.getObject(context.Background(), bucketName, objectName, opts)
} }
// StatObject is a lower level API implemented to support special // StatObject is a lower level API implemented to support special
// conditions matching etag, modtime on a request. // conditions matching etag, modtime on a request.
func (c Core) StatObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) { func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
return c.statObject(bucketName, objectName, reqHeaders) return c.statObject(bucketName, objectName, opts)
} }

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -25,7 +26,6 @@ import (
"testing" "testing"
"time" "time"
"crypto/md5"
"math/rand" "math/rand"
) )
@ -103,7 +103,9 @@ func TestGetObjectCore(t *testing.T) {
// Save the data // Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), PutObjectOptions{
ContentType: "binary/octet-stream",
})
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName, objectName) t.Fatal("Error:", err, bucketName, objectName)
} }
@ -112,8 +114,6 @@ func TestGetObjectCore(t *testing.T) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
} }
reqHeaders := NewGetReqHeaders()
offset := int64(2048) offset := int64(2048)
// read directly // read directly
@ -122,8 +122,9 @@ func TestGetObjectCore(t *testing.T) {
buf3 := make([]byte, n) buf3 := make([]byte, n)
buf4 := make([]byte, 1) buf4 := make([]byte, 1)
reqHeaders.SetRange(offset, offset+int64(len(buf1))-1) opts := GetObjectOptions{}
reader, objectInfo, err := c.GetObject(bucketName, objectName, reqHeaders) opts.SetRange(offset, offset+int64(len(buf1))-1)
reader, objectInfo, err := c.GetObject(bucketName, objectName, opts)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -141,8 +142,8 @@ func TestGetObjectCore(t *testing.T) {
} }
offset += 512 offset += 512
reqHeaders.SetRange(offset, offset+int64(len(buf2))-1) opts.SetRange(offset, offset+int64(len(buf2))-1)
reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders) reader, objectInfo, err = c.GetObject(bucketName, objectName, opts)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -160,8 +161,8 @@ func TestGetObjectCore(t *testing.T) {
t.Fatal("Error: Incorrect read between two GetObject from same offset.") t.Fatal("Error: Incorrect read between two GetObject from same offset.")
} }
reqHeaders.SetRange(0, int64(len(buf3))) opts.SetRange(0, int64(len(buf3)))
reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders) reader, objectInfo, err = c.GetObject(bucketName, objectName, opts)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -180,9 +181,9 @@ func TestGetObjectCore(t *testing.T) {
t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.") t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
} }
reqHeaders = NewGetReqHeaders() opts = GetObjectOptions{}
reqHeaders.SetMatchETag("etag") opts.SetMatchETag("etag")
_, _, err = c.GetObject(bucketName, objectName, reqHeaders) _, _, err = c.GetObject(bucketName, objectName, opts)
if err == nil { if err == nil {
t.Fatal("Unexpected GetObject should fail with mismatching etags") t.Fatal("Unexpected GetObject should fail with mismatching etags")
} }
@ -190,9 +191,9 @@ func TestGetObjectCore(t *testing.T) {
t.Fatalf("Expected \"PreconditionFailed\" as code, got %s instead", errResp.Code) t.Fatalf("Expected \"PreconditionFailed\" as code, got %s instead", errResp.Code)
} }
reqHeaders = NewGetReqHeaders() opts = GetObjectOptions{}
reqHeaders.SetMatchETagExcept("etag") opts.SetMatchETagExcept("etag")
reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders) reader, objectInfo, err = c.GetObject(bucketName, objectName, opts)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -210,9 +211,9 @@ func TestGetObjectCore(t *testing.T) {
t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.") t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
} }
reqHeaders = NewGetReqHeaders() opts = GetObjectOptions{}
reqHeaders.SetRange(0, 0) opts.SetRange(0, 0)
reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders) reader, objectInfo, err = c.GetObject(bucketName, objectName, opts)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -275,12 +276,12 @@ func TestGetObjectContentEncoding(t *testing.T) {
// Generate data more than 32K // Generate data more than 32K
buf := bytes.Repeat([]byte("3"), rand.Intn(1<<20)+32*1024) buf := bytes.Repeat([]byte("3"), rand.Intn(1<<20)+32*1024)
m := make(map[string][]string)
m["Content-Encoding"] = []string{"gzip"}
// Save the data // Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
n, err := c.Client.PutObjectWithMetadata(bucketName, objectName, bytes.NewReader(buf), m, nil) n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), PutObjectOptions{
ContentEncoding: "gzip",
})
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName, objectName) t.Fatal("Error:", err, bucketName, objectName)
} }
@ -289,8 +290,7 @@ func TestGetObjectContentEncoding(t *testing.T) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
} }
reqHeaders := NewGetReqHeaders() rwc, objInfo, err := c.GetObject(bucketName, objectName, GetObjectOptions{})
rwc, objInfo, err := c.GetObject(bucketName, objectName, reqHeaders)
if err != nil { if err != nil {
t.Fatalf("Error: %v", err) t.Fatalf("Error: %v", err)
} }
@ -370,6 +370,120 @@ func TestGetBucketPolicy(t *testing.T) {
} }
} }
// Tests Core CopyObject API implementation.
func TestCoreCopyObject(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Instantiate new minio client object.
c, err := NewCore(
os.Getenv(serverEndpoint),
os.Getenv(accessKey),
os.Getenv(secretKey),
mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
buf := bytes.Repeat([]byte("a"), 32*1024)
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
"Content-Type": "binary/octet-stream",
})
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if objInfo.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size)
}
destBucketName := bucketName
destObjectName := objectName + "-dest"
cobjInfo, err := c.CopyObject(bucketName, objectName, destBucketName, destObjectName, map[string]string{
"X-Amz-Metadata-Directive": "REPLACE",
"Content-Type": "application/javascript",
})
if err != nil {
t.Fatal("Error:", err, bucketName, objectName, destBucketName, destObjectName)
}
if cobjInfo.ETag != objInfo.ETag {
t.Fatalf("Error: expected etag to be same as source object %s, but found different etag :%s", objInfo.ETag, cobjInfo.ETag)
}
// Attempt to read from destBucketName and object name.
r, err := c.Client.GetObject(destBucketName, destObjectName, GetObjectOptions{})
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
st, err := r.Stat()
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if st.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
if st.ContentType != "application/javascript" {
t.Fatalf("Error: Content types don't match, expected: application/javascript, found: %+v\n", st.ContentType)
}
if st.ETag != objInfo.ETag {
t.Fatalf("Error: expected etag to be same as source object %s, but found different etag :%s", objInfo.ETag, st.ETag)
}
if err := r.Close(); err != nil {
t.Fatal("Error:", err)
}
if err := r.Close(); err == nil {
t.Fatal("Error: object is already closed, should return error")
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveObject(destBucketName, destObjectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
// Do not need to remove destBucketName its same as bucketName.
}
// Test Core PutObject. // Test Core PutObject.
func TestCorePutObject(t *testing.T) { func TestCorePutObject(t *testing.T) {
if testing.Short() { if testing.Short() {
@ -405,21 +519,21 @@ func TestCorePutObject(t *testing.T) {
t.Fatal("Error:", err, bucketName) t.Fatal("Error:", err, bucketName)
} }
buf := bytes.Repeat([]byte("a"), minPartSize) buf := bytes.Repeat([]byte("a"), 32*1024)
// Save the data // Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
// Object content type // Object content type
objectContentType := "binary/octet-stream" objectContentType := "binary/octet-stream"
metadata := make(map[string][]string) metadata := make(map[string]string)
metadata["Content-Type"] = []string{objectContentType} metadata["Content-Type"] = objectContentType
objInfo, err := c.PutObject(bucketName, objectName, int64(len(buf)), bytes.NewReader(buf), md5.New().Sum(nil), nil, metadata) objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "1B2M2Y8AsgTpgAmY7PhCfg==", "", metadata)
if err == nil { if err == nil {
t.Fatal("Error expected: nil, got: ", err) t.Fatal("Error expected: error, got: nil(success)")
} }
objInfo, err = c.PutObject(bucketName, objectName, int64(len(buf)), bytes.NewReader(buf), nil, nil, metadata) objInfo, err = c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", metadata)
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName, objectName) t.Fatal("Error:", err, bucketName, objectName)
} }
@ -429,7 +543,7 @@ func TestCorePutObject(t *testing.T) {
} }
// Read the data back // Read the data back
r, err := c.Client.GetObject(bucketName, objectName) r, err := c.Client.GetObject(bucketName, objectName, GetObjectOptions{})
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName, objectName) t.Fatal("Error:", err, bucketName, objectName)
} }
@ -490,18 +604,17 @@ func TestCoreGetObjectMetadata(t *testing.T) {
t.Fatal("Error:", err, bucketName) t.Fatal("Error:", err, bucketName)
} }
metadata := map[string][]string{ metadata := map[string]string{
"X-Amz-Meta-Key-1": {"Val-1"}, "X-Amz-Meta-Key-1": "Val-1",
} }
_, err = core.PutObject(bucketName, "my-objectname", 5, _, err = core.PutObject(bucketName, "my-objectname",
bytes.NewReader([]byte("hello")), nil, nil, metadata) bytes.NewReader([]byte("hello")), 5, "", "", metadata)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
reader, objInfo, err := core.GetObject(bucketName, "my-objectname", reader, objInfo, err := core.GetObject(bucketName, "my-objectname", GetObjectOptions{})
RequestHeaders{})
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,21 @@
package main
import (
"fmt"
"github.com/minio/minio-go"
)
func main() {
// Use a secure connection.
ssl := true
// Initialize minio client object.
minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl)
if err != nil {
fmt.Println(err)
return
}
{{.Text}}
}

227
vendor/github.com/minio/minio-go/docs/validator.go generated vendored Normal file
View File

@ -0,0 +1,227 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"text/template"
"github.com/a8m/mark"
"github.com/gernest/wow"
"github.com/gernest/wow/spin"
"github.com/minio/cli"
)
func init() {
// Validate go binary.
if _, err := exec.LookPath("go"); err != nil {
panic(err)
}
}
var globalFlags = []cli.Flag{
cli.StringFlag{
Name: "m",
Value: "API.md",
Usage: "Path to markdown api documentation.",
},
cli.StringFlag{
Name: "t",
Value: "checker.go.template",
Usage: "Template used for generating the programs.",
},
cli.IntFlag{
Name: "skip",
Value: 2,
Usage: "Skip entries before validating the code.",
},
}
func runGofmt(path string) (msg string, err error) {
cmdArgs := []string{"-s", "-w", "-l", path}
cmd := exec.Command("gofmt", cmdArgs...)
stdoutStderr, err := cmd.CombinedOutput()
if err != nil {
return "", err
}
return string(stdoutStderr), nil
}
func runGoImports(path string) (msg string, err error) {
cmdArgs := []string{"-w", path}
cmd := exec.Command("goimports", cmdArgs...)
stdoutStderr, err := cmd.CombinedOutput()
if err != nil {
return string(stdoutStderr), err
}
return string(stdoutStderr), nil
}
func runGoBuild(path string) (msg string, err error) {
// Go build the path.
cmdArgs := []string{"build", "-o", "/dev/null", path}
cmd := exec.Command("go", cmdArgs...)
stdoutStderr, err := cmd.CombinedOutput()
if err != nil {
return string(stdoutStderr), err
}
return string(stdoutStderr), nil
}
func validatorAction(ctx *cli.Context) error {
if !ctx.IsSet("m") || !ctx.IsSet("t") {
return nil
}
docPath := ctx.String("m")
var err error
docPath, err = filepath.Abs(docPath)
if err != nil {
return err
}
data, err := ioutil.ReadFile(docPath)
if err != nil {
return err
}
templatePath := ctx.String("t")
templatePath, err = filepath.Abs(templatePath)
if err != nil {
return err
}
skipEntries := ctx.Int("skip")
m := mark.New(string(data), &mark.Options{
Gfm: true, // Github markdown support is enabled by default.
})
t, err := template.ParseFiles(templatePath)
if err != nil {
return err
}
tmpDir, err := ioutil.TempDir("", "md-verifier")
if err != nil {
return err
}
defer os.RemoveAll(tmpDir)
entryN := 1
for i := mark.NodeText; i < mark.NodeCheckbox; i++ {
if mark.NodeCode != mark.NodeType(i) {
m.AddRenderFn(mark.NodeType(i), func(node mark.Node) (s string) {
return ""
})
continue
}
m.AddRenderFn(mark.NodeCode, func(node mark.Node) (s string) {
p, ok := node.(*mark.CodeNode)
if !ok {
return
}
p.Text = strings.NewReplacer("&lt;", "<", "&gt;", ">", "&quot;", `"`, "&amp;", "&").Replace(p.Text)
if skipEntries > 0 {
skipEntries--
return
}
testFilePath := filepath.Join(tmpDir, "example.go")
w, werr := os.Create(testFilePath)
if werr != nil {
panic(werr)
}
t.Execute(w, p)
w.Sync()
w.Close()
entryN++
msg, err := runGofmt(testFilePath)
if err != nil {
fmt.Printf("Failed running gofmt on %s, with (%s):(%s)\n", testFilePath, msg, err)
os.Exit(-1)
}
msg, err = runGoImports(testFilePath)
if err != nil {
fmt.Printf("Failed running gofmt on %s, with (%s):(%s)\n", testFilePath, msg, err)
os.Exit(-1)
}
msg, err = runGoBuild(testFilePath)
if err != nil {
fmt.Printf("Failed running gobuild on %s, with (%s):(%s)\n", testFilePath, msg, err)
fmt.Printf("Code with possible issue in %s:\n%s", docPath, p.Text)
fmt.Printf("To test `go build %s`\n", testFilePath)
os.Exit(-1)
}
// Once successfully built remove the test file
os.Remove(testFilePath)
return
})
}
w := wow.New(os.Stdout, spin.Get(spin.Moon), fmt.Sprintf(" Running validation tests in %s", tmpDir))
w.Start()
// Render markdown executes our checker on each code blocks.
_ = m.Render()
w.PersistWith(spin.Get(spin.Runner), " Successfully finished tests")
w.Stop()
return nil
}
func main() {
app := cli.NewApp()
app.Action = validatorAction
app.HideVersion = true
app.HideHelpCommand = true
app.Usage = "Validates code block sections inside API.md"
app.Author = "Minio.io"
app.Flags = globalFlags
// Help template for validator
app.CustomAppHelpTemplate = `NAME:
{{.Name}} - {{.Usage}}
USAGE:
{{.Name}} {{if .VisibleFlags}}[FLAGS] {{end}}COMMAND{{if .VisibleFlags}} [COMMAND FLAGS | -h]{{end}} [ARGUMENTS...]
COMMANDS:
{{range .VisibleCommands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
{{end}}{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
TEMPLATE:
Validator uses Go's 'text/template' formatting so you need to ensure
your template is formatted correctly, check 'docs/checker.go.template'
USAGE:
go run docs/validator.go -m docs/API.md -t /tmp/mycode.go.template
`
app.Run(os.Args)
}

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -0,0 +1,54 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"time"
"context"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
// and my-filename.csv are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
if err := s3Client.FGetObjectWithContext(ctx, "my-bucketname", "my-objectname", "my-filename.csv", minio.GetObjectOptions{}); err != nil {
log.Fatalln(err)
}
log.Println("Successfully saved my-filename.csv")
}

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -38,7 +39,7 @@ func main() {
log.Fatalln(err) log.Fatalln(err)
} }
if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv"); err != nil { if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv", minio.GetObjectOptions{}); err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
log.Println("Successfully saved my-filename.csv") log.Println("Successfully saved my-filename.csv")

View File

@ -0,0 +1,80 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/encrypt"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
// my-objectname are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
// Specify a local file that we will upload
filePath := "my-testfile"
//// Build an asymmetric key from private and public files
//
// privateKey, err := ioutil.ReadFile("private.key")
// if err != nil {
// t.Fatal(err)
// }
//
// publicKey, err := ioutil.ReadFile("public.key")
// if err != nil {
// t.Fatal(err)
// }
//
// asymmetricKey, err := NewAsymmetricKey(privateKey, publicKey)
// if err != nil {
// t.Fatal(err)
// }
////
// Build a symmetric key
symmetricKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
// Build encryption materials which will encrypt uploaded data
cbcMaterials, err := encrypt.NewCBCSecureMaterials(symmetricKey)
if err != nil {
log.Fatalln(err)
}
// Encrypt file content and upload to the server
n, err := s3Client.FPutEncryptedObject("my-bucketname", "my-objectname", filePath, cbcMaterials)
if err != nil {
log.Fatalln(err)
}
log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
}

View File

@ -0,0 +1,53 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"time"
"context"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
// and my-filename.csv are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
if _, err := s3Client.FPutObjectWithContext(ctx, "my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{ContentType: "application/csv"}); err != nil {
log.Fatalln(err)
}
log.Println("Successfully uploaded my-filename.csv")
}

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -38,7 +39,9 @@ func main() {
log.Fatalln(err) log.Fatalln(err)
} }
if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", "application/csv"); err != nil { if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{
ContentType: "application/csv",
}); err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
log.Println("Successfully uploaded my-filename.csv") log.Println("Successfully uploaded my-filename.csv")

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -0,0 +1,73 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"io"
"log"
"os"
"time"
"context"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
// my-testfile are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true)
if err != nil {
log.Fatalln(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
opts := minio.GetObjectOptions{}
opts.SetModified(time.Now().Round(10 * time.Minute)) // get object if was modified within the last 10 minutes
reader, err := s3Client.GetObjectWithContext(ctx, "my-bucketname", "my-objectname", opts)
if err != nil {
log.Fatalln(err)
}
defer reader.Close()
localFile, err := os.Create("my-testfile")
if err != nil {
log.Fatalln(err)
}
defer localFile.Close()
stat, err := reader.Stat()
if err != nil {
log.Fatalln(err)
}
if _, err := io.CopyN(localFile, reader, stat.Size); err != nil {
log.Fatalln(err)
}
}

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -40,7 +41,7 @@ func main() {
log.Fatalln(err) log.Fatalln(err)
} }
reader, err := s3Client.GetObject("my-bucketname", "my-objectname") reader, err := s3Client.GetObject("my-bucketname", "my-objectname", minio.GetObjectOptions{})
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -75,7 +76,7 @@ func main() {
} }
// Encrypt file content and upload to the server // Encrypt file content and upload to the server
n, err := s3Client.PutEncryptedObject("my-bucketname", "my-objectname", file, cbcMaterials, nil, nil) n, err := s3Client.PutEncryptedObject("my-bucketname", "my-objectname", file, cbcMaterials)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View File

@ -0,0 +1,68 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"os"
"time"
"context"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
// my-objectname are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
object, err := os.Open("my-testfile")
if err != nil {
log.Fatalln(err)
}
defer object.Close()
objectStat, err := object.Stat()
if err != nil {
log.Fatalln(err)
}
n, err := s3Client.PutObjectWithContext(ctx, "my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{
ContentType: "application/octet-stream",
})
if err != nil {
log.Fatalln(err)
}
log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
}

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -24,7 +25,6 @@ import (
"encoding/base64" "encoding/base64"
"io/ioutil" "io/ioutil"
"log" "log"
"net/http"
minio "github.com/minio/minio-go" minio "github.com/minio/minio-go"
) )
@ -54,24 +54,24 @@ func main() {
// of the encryption key or to decrypt the contents of the // of the encryption key or to decrypt the contents of the
// encrypted object. That means, if you lose the encryption // encrypted object. That means, if you lose the encryption
// key, you lose the object. // key, you lose the object.
var metadata = map[string][]string{ var metadata = map[string]string{
"x-amz-server-side-encryption-customer-algorithm": []string{"AES256"}, "x-amz-server-side-encryption-customer-algorithm": "AES256",
"x-amz-server-side-encryption-customer-key": []string{encryptionKey}, "x-amz-server-side-encryption-customer-key": encryptionKey,
"x-amz-server-side-encryption-customer-key-MD5": []string{encryptionKeyMD5}, "x-amz-server-side-encryption-customer-key-MD5": encryptionKeyMD5,
} }
// minioClient.TraceOn(os.Stderr) // Enable to debug. // minioClient.TraceOn(os.Stderr) // Enable to debug.
_, err = minioClient.PutObjectWithMetadata("mybucket", "my-encrypted-object.txt", content, metadata, nil) _, err = minioClient.PutObject("mybucket", "my-encrypted-object.txt", content, 11, minio.PutObjectOptions{UserMetadata: metadata})
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
var reqHeaders = minio.RequestHeaders{Header: http.Header{}} opts := minio.GetObjectOptions{}
for k, v := range metadata { for k, v := range metadata {
reqHeaders.Set(k, v[0]) opts.Set(k, v)
} }
coreClient := minio.Core{minioClient} coreClient := minio.Core{minioClient}
reader, _, err := coreClient.GetObject("mybucket", "my-encrypted-object.txt", reqHeaders) reader, _, err := coreClient.GetObject("mybucket", "my-encrypted-object.txt", opts)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -39,7 +40,7 @@ func main() {
log.Fatalln(err) log.Fatalln(err)
} }
reader, err := s3Client.GetObject("my-bucketname", "my-objectname") reader, err := s3Client.GetObject("my-bucketname", "my-objectname", minio.GetObjectOptions{})
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
@ -54,10 +55,8 @@ func main() {
// the Reads inside. // the Reads inside.
progress := pb.New64(objectInfo.Size) progress := pb.New64(objectInfo.Size)
progress.Start() progress.Start()
n, err := s3Client.PutObject("my-bucketname", "my-objectname-progress", reader, objectInfo.Size, minio.PutObjectOptions{ContentType: "application/octet-stream", Progress: progress})
n, err := s3Client.PutObjectWithProgress("my-bucketname", "my-objectname-progress", reader, map[string][]string{
"Content-Type": []string{"application/octet-stream"},
}, progress)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -48,7 +49,12 @@ func main() {
} }
defer object.Close() defer object.Close()
n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream") objectStat, err := object.Stat()
if err != nil {
log.Fatalln(err)
}
n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -45,7 +46,7 @@ func main() {
} }
defer object.Close() defer object.Close()
n, err := s3Client.PutObjectStreaming("my-bucketname", "my-objectname", object) n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, -1, minio.PutObjectOptions{})
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -44,8 +45,12 @@ func main() {
log.Fatalln(err) log.Fatalln(err)
} }
defer object.Close() defer object.Close()
objectStat, err := object.Stat()
if err != nil {
log.Fatalln(err)
}
n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream") n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -20,7 +21,6 @@ package main
import ( import (
"log" "log"
"strconv"
"github.com/minio/minio-go" "github.com/minio/minio-go"
) )
@ -44,8 +44,12 @@ func main() {
// Send object names that are needed to be removed to objectsCh // Send object names that are needed to be removed to objectsCh
go func() { go func() {
defer close(objectsCh) defer close(objectsCh)
for i := 0; i < 10; i++ { // List all objects from a bucket-name with a matching prefix.
objectsCh <- "/path/to/my-objectname" + strconv.Itoa(i) for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) {
if object.Err != nil {
log.Fatalln(object.Err)
}
objectsCh <- object.Key
} }
}() }()

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,7 +1,8 @@
// +build ignore // +build ignore
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -37,7 +38,7 @@ func main() {
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
stat, err := s3Client.StatObject("my-bucketname", "my-objectname") stat, err := s3Client.StatObject("my-bucketname", "my-objectname", minio.StatObjectOptions{})
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -40,17 +41,17 @@ func TestSetHeader(t *testing.T) {
{1, -5, fmt.Errorf("Invalid range specified: start=1 end=-5"), ""}, {1, -5, fmt.Errorf("Invalid range specified: start=1 end=-5"), ""},
} }
for i, testCase := range testCases { for i, testCase := range testCases {
rh := NewGetReqHeaders() opts := GetObjectOptions{}
err := rh.SetRange(testCase.start, testCase.end) err := opts.SetRange(testCase.start, testCase.end)
if err == nil && testCase.errVal != nil { if err == nil && testCase.errVal != nil {
t.Errorf("Test %d: Expected to fail with '%v' but it passed", t.Errorf("Test %d: Expected to fail with '%v' but it passed",
i+1, testCase.errVal) i+1, testCase.errVal)
} else if err != nil && testCase.errVal.Error() != err.Error() { } else if err != nil && testCase.errVal.Error() != err.Error() {
t.Errorf("Test %d: Expected error '%v' but got error '%v'", t.Errorf("Test %d: Expected error '%v' but got error '%v'",
i+1, testCase.errVal, err) i+1, testCase.errVal, err)
} else if err == nil && rh.Get("Range") != testCase.expected { } else if err == nil && opts.headers["Range"] != testCase.expected {
t.Errorf("Test %d: Expected range header '%s', but got '%s'", t.Errorf("Test %d: Expected range header '%s', but got '%s'",
i+1, testCase.expected, rh.Get("Range")) i+1, testCase.expected, opts.headers["Range"])
} }
} }
} }

View File

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc. * Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc. * Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc. * Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc. * Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,3 +1,20 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package credentials provides credential retrieval and management // Package credentials provides credential retrieval and management
// for S3 compatible object storage. // for S3 compatible object storage.
// //

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc. * Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc. * Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc. * Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc. * Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc. * Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

Some files were not shown because too many files have changed in this diff Show More