2
2
mirror of https://github.com/octoleo/restic.git synced 2024-06-06 11:00:48 +00:00

Update minio-go

This commit is contained in:
Alexander Neumann 2017-07-17 20:19:04 +02:00
parent d6da9211bc
commit 724b5bf4fe
37 changed files with 5917 additions and 4809 deletions

2
vendor/manifest vendored
View File

@ -46,7 +46,7 @@
{
"importpath": "github.com/minio/minio-go",
"repository": "https://github.com/minio/minio-go",
"revision": "fe53a65ebc43b5d22626b29a19a3de81170e42d3",
"revision": "bd8e1d8a93f006a0207e026353bf0644ffcdd320",
"branch": "master"
},
{

View File

@ -0,0 +1,532 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/base64"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
)
// SSEInfo - represents Server-Side-Encryption parameters specified by
// a user.
type SSEInfo struct {
key []byte
algo string
}
// NewSSEInfo - specifies (binary or un-encoded) encryption key and
// algorithm name. If algo is empty, it defaults to "AES256". Ref:
// https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
func NewSSEInfo(key []byte, algo string) SSEInfo {
if algo == "" {
algo = "AES256"
}
return SSEInfo{key, algo}
}
// internal method that computes SSE-C headers
func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string {
if s == nil {
return nil
}
cs := ""
if isCopySource {
cs = "copy-source-"
}
return map[string]string{
"x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo,
"x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key),
"x-amz-" + cs + "server-side-encryption-customer-key-MD5": base64.StdEncoding.EncodeToString(sumMD5(s.key)),
}
}
// GetSSEHeaders - computes and returns headers for SSE-C as key-value
// pairs. They can be set as metadata in PutObject* requests (for
// encryption) or be set as request headers in `Core.GetObject` (for
// decryption).
func (s *SSEInfo) GetSSEHeaders() map[string]string {
return s.getSSEHeaders(false)
}
// DestinationInfo - type with information about the object to be
// created via server-side copy requests, using the Compose API.
type DestinationInfo struct {
bucket, object string
// key for encrypting destination
encryption *SSEInfo
// if no user-metadata is provided, it is copied from source
// (when there is only once source object in the compose
// request)
userMetadata map[string]string
}
// NewDestinationInfo - creates a compose-object/copy-source
// destination info object.
//
// `encSSEC` is the key info for server-side-encryption with customer
// provided key. If it is nil, no encryption is performed.
//
// `userMeta` is the user-metadata key-value pairs to be set on the
// destination. The keys are automatically prefixed with `x-amz-meta-`
// if needed. If nil is passed, and if only a single source (of any
// size) is provided in the ComposeObject call, then metadata from the
// source is copied to the destination.
func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo,
userMeta map[string]string) (d DestinationInfo, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucket); err != nil {
return d, err
}
if err = s3utils.CheckValidObjectName(object); err != nil {
return d, err
}
// Process custom-metadata to remove a `x-amz-meta-` prefix if
// present and validate that keys are distinct (after this
// prefix removal).
m := make(map[string]string)
for k, v := range userMeta {
if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
k = k[len("x-amz-meta-"):]
}
if _, ok := m[k]; ok {
return d, fmt.Errorf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)
}
m[k] = v
}
return DestinationInfo{
bucket: bucket,
object: object,
encryption: encryptSSEC,
userMetadata: m,
}, nil
}
// getUserMetaHeadersMap - construct appropriate key-value pairs to send
// as headers from metadata map to pass into copy-object request. For
// single part copy-object (i.e. non-multipart object), enable the
// withCopyDirectiveHeader to set the `x-amz-metadata-directive` to
// `REPLACE`, so that metadata headers from the source are not copied
// over.
func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string {
if len(d.userMetadata) == 0 {
return nil
}
r := make(map[string]string)
if withCopyDirectiveHeader {
r["x-amz-metadata-directive"] = "REPLACE"
}
for k, v := range d.userMetadata {
r["x-amz-meta-"+k] = v
}
return r
}
// SourceInfo - represents a source object to be copied, using
// server-side copying APIs.
type SourceInfo struct {
bucket, object string
start, end int64
decryptKey *SSEInfo
// Headers to send with the upload-part-copy request involving
// this source object.
Headers http.Header
}
// NewSourceInfo - create a compose-object/copy-object source info
// object.
//
// `decryptSSEC` is the decryption key using server-side-encryption
// with customer provided key. It may be nil if the source is not
// encrypted.
func NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo {
r := SourceInfo{
bucket: bucket,
object: object,
start: -1, // range is unspecified by default
decryptKey: decryptSSEC,
Headers: make(http.Header),
}
// Set the source header
r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object))
// Assemble decryption headers for upload-part-copy request
for k, v := range decryptSSEC.getSSEHeaders(true) {
r.Headers.Set(k, v)
}
return r
}
// SetRange - Set the start and end offset of the source object to be
// copied. If this method is not called, the whole source object is
// copied.
func (s *SourceInfo) SetRange(start, end int64) error {
if start > end || start < 0 {
return ErrInvalidArgument("start must be non-negative, and start must be at most end.")
}
// Note that 0 <= start <= end
s.start, s.end = start, end
return nil
}
// SetMatchETagCond - Set ETag match condition. The object is copied
// only if the etag of the source matches the value given here.
func (s *SourceInfo) SetMatchETagCond(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
s.Headers.Set("x-amz-copy-source-if-match", etag)
return nil
}
// SetMatchETagExceptCond - Set the ETag match exception
// condition. The object is copied only if the etag of the source is
// not the value given here.
func (s *SourceInfo) SetMatchETagExceptCond(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
s.Headers.Set("x-amz-copy-source-if-none-match", etag)
return nil
}
// SetModifiedSinceCond - Set the modified since condition.
func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error {
if modTime.IsZero() {
return ErrInvalidArgument("Input time cannot be 0.")
}
s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat))
return nil
}
// SetUnmodifiedSinceCond - Set the unmodified since condition.
func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error {
if modTime.IsZero() {
return ErrInvalidArgument("Input time cannot be 0.")
}
s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat))
return nil
}
// Helper to fetch size and etag of an object using a StatObject call.
func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) {
// Get object info - need size and etag here. Also, decryption
// headers are added to the stat request if given.
var objInfo ObjectInfo
rh := NewGetReqHeaders()
for k, v := range s.decryptKey.getSSEHeaders(false) {
rh.Set(k, v)
}
objInfo, err = c.statObject(s.bucket, s.object, rh)
if err != nil {
err = fmt.Errorf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)
} else {
size = objInfo.Size
etag = objInfo.ETag
userMeta = make(map[string]string)
for k, v := range objInfo.Metadata {
if strings.HasPrefix(k, "x-amz-meta-") {
if len(v) > 0 {
userMeta[k] = v[0]
}
}
}
}
return
}
// uploadPartCopy - helper function to create a part in a multipart
// upload via an upload-part-copy request
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int,
headers http.Header) (p CompletePart, err error) {
// Build query parameters
urlValues := make(url.Values)
urlValues.Set("partNumber", strconv.Itoa(partNumber))
urlValues.Set("uploadId", uploadID)
// Send upload-part-copy request
resp, err := c.executeMethod("PUT", requestMetadata{
bucketName: bucket,
objectName: object,
customHeader: headers,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return p, err
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return p, httpRespToErrorResponse(resp, bucket, object)
}
// Decode copy-part response on success.
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return p, err
}
p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
return p, nil
}
// ComposeObject - creates an object using server-side copying of
// existing objects. It takes a list of source objects (with optional
// offsets) and concatenates them into a new object using only
// server-side copying operations.
func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
if len(srcs) < 1 || len(srcs) > maxPartsCount {
return ErrInvalidArgument("There must be as least one and upto 10000 source objects.")
}
srcSizes := make([]int64, len(srcs))
var totalSize, size, totalParts int64
var srcUserMeta map[string]string
var etag string
var err error
for i, src := range srcs {
size, etag, srcUserMeta, err = src.getProps(c)
if err != nil {
return fmt.Errorf("Could not get source props for %s/%s: %v", src.bucket, src.object, err)
}
// Error out if client side encryption is used in this source object when
// more than one source objects are given.
if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" {
return ErrInvalidArgument(
fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object))
}
// Since we did a HEAD to get size, we use the ETag
// value to make sure the object has not changed by
// the time we perform the copy. This is done, only if
// the user has not set their own ETag match
// condition.
if src.Headers.Get("x-amz-copy-source-if-match") == "" {
src.SetMatchETagCond(etag)
}
// Check if a segment is specified, and if so, is the
// segment within object bounds?
if src.start != -1 {
// Since range is specified,
// 0 <= src.start <= src.end
// so only invalid case to check is:
if src.end >= size {
return ErrInvalidArgument(
fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)",
i, src.start, src.end, size))
}
size = src.end - src.start + 1
}
// Only the last source may be less than `absMinPartSize`
if size < absMinPartSize && i < len(srcs)-1 {
return ErrInvalidArgument(
fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size))
}
// Is data to copy too large?
totalSize += size
if totalSize > maxMultipartPutObjectSize {
return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
}
// record source size
srcSizes[i] = size
// calculate parts needed for current source
totalParts += partsRequired(size)
// Do we need more parts than we are allowed?
if totalParts > maxPartsCount {
return ErrInvalidArgument(fmt.Sprintf(
"Your proposed compose object requires more than %d parts", maxPartsCount))
}
}
// Single source object case (i.e. when only one source is
// involved, it is being copied wholly and at most 5GiB in
// size).
if totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize {
h := srcs[0].Headers
// Add destination encryption headers
for k, v := range dst.encryption.getSSEHeaders(false) {
h.Set(k, v)
}
// If no user metadata is specified (and so, the
// for-loop below is not entered), metadata from the
// source is copied to the destination (due to
// single-part copy-object PUT request behaviour).
for k, v := range dst.getUserMetaHeadersMap(true) {
h.Set(k, v)
}
// Send copy request
resp, err := c.executeMethod("PUT", requestMetadata{
bucketName: dst.bucket,
objectName: dst.object,
customHeader: h,
})
defer closeResponse(resp)
if err != nil {
return err
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, dst.bucket, dst.object)
}
// Return nil on success.
return nil
}
// Now, handle multipart-copy cases.
// 1. Initiate a new multipart upload.
// Set user-metadata on the destination object. If no
// user-metadata is specified, and there is only one source,
// (only) then metadata from source is copied.
userMeta := dst.getUserMetaHeadersMap(false)
metaMap := userMeta
if len(userMeta) == 0 && len(srcs) == 1 {
metaMap = srcUserMeta
}
metaHeaders := make(map[string][]string)
for k, v := range metaMap {
metaHeaders[k] = append(metaHeaders[k], v)
}
uploadID, err := c.newUploadID(dst.bucket, dst.object, metaHeaders)
if err != nil {
return fmt.Errorf("Error creating new upload: %v", err)
}
// 2. Perform copy part uploads
objParts := []CompletePart{}
partIndex := 1
for i, src := range srcs {
h := src.Headers
// Add destination encryption headers
for k, v := range dst.encryption.getSSEHeaders(false) {
h.Set(k, v)
}
// calculate start/end indices of parts after
// splitting.
startIdx, endIdx := calculateEvenSplits(srcSizes[i], src)
for j, start := range startIdx {
end := endIdx[j]
// Add (or reset) source range header for
// upload part copy request.
h.Set("x-amz-copy-source-range",
fmt.Sprintf("bytes=%d-%d", start, end))
// make upload-part-copy request
complPart, err := c.uploadPartCopy(dst.bucket,
dst.object, uploadID, partIndex, h)
if err != nil {
return fmt.Errorf("Error in upload-part-copy - %v", err)
}
objParts = append(objParts, complPart)
partIndex++
}
}
// 3. Make final complete-multipart request.
_, err = c.completeMultipartUpload(dst.bucket, dst.object, uploadID,
completeMultipartUpload{Parts: objParts})
if err != nil {
err = fmt.Errorf("Error in complete-multipart request - %v", err)
}
return err
}
// partsRequired is ceiling(size / copyPartSize)
func partsRequired(size int64) int64 {
r := size / copyPartSize
if size%copyPartSize > 0 {
r++
}
return r
}
// calculateEvenSplits - computes splits for a source and returns
// start and end index slices. Splits happen evenly to be sure that no
// part is less than 5MiB, as that could fail the multipart request if
// it is not the last part.
func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) {
if size == 0 {
return
}
reqParts := partsRequired(size)
startIndex = make([]int64, reqParts)
endIndex = make([]int64, reqParts)
// Compute number of required parts `k`, as:
//
// k = ceiling(size / copyPartSize)
//
// Now, distribute the `size` bytes in the source into
// k parts as evenly as possible:
//
// r parts sized (q+1) bytes, and
// (k - r) parts sized q bytes, where
//
// size = q * k + r (by simple division of size by k,
// so that 0 <= r < k)
//
start := src.start
if start == -1 {
start = 0
}
quot, rem := size/reqParts, size%reqParts
nextStart := start
for j := int64(0); j < reqParts; j++ {
curPartSize := quot
if j < rem {
curPartSize++
}
cStart := nextStart
cEnd := cStart + curPartSize - 1
nextStart = cEnd + 1
startIndex[j], endIndex[j] = cStart, cEnd
}
return
}

View File

@ -0,0 +1,88 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"reflect"
"testing"
)
const (
gb1 = 1024 * 1024 * 1024
gb5 = 5 * gb1
gb5p1 = gb5 + 1
gb10p1 = 2*gb5 + 1
gb10p2 = 2*gb5 + 2
)
func TestPartsRequired(t *testing.T) {
testCases := []struct {
size, ref int64
}{
{0, 0},
{1, 1},
{gb5, 1},
{2 * gb5, 2},
{gb10p1, 3},
{gb10p2, 3},
}
for i, testCase := range testCases {
res := partsRequired(testCase.size)
if res != testCase.ref {
t.Errorf("Test %d - output did not match with reference results", i+1)
}
}
}
func TestCalculateEvenSplits(t *testing.T) {
testCases := []struct {
// input size and source object
size int64
src SourceInfo
// output part-indexes
starts, ends []int64
}{
{0, SourceInfo{start: -1}, nil, nil},
{1, SourceInfo{start: -1}, []int64{0}, []int64{0}},
{1, SourceInfo{start: 0}, []int64{0}, []int64{0}},
{gb1, SourceInfo{start: -1}, []int64{0}, []int64{gb1 - 1}},
{gb5, SourceInfo{start: -1}, []int64{0}, []int64{gb5 - 1}},
// 2 part splits
{gb5p1, SourceInfo{start: -1}, []int64{0, gb5/2 + 1}, []int64{gb5 / 2, gb5}},
{gb5p1, SourceInfo{start: -1}, []int64{0, gb5/2 + 1}, []int64{gb5 / 2, gb5}},
// 3 part splits
{gb10p1, SourceInfo{start: -1},
[]int64{0, gb10p1/3 + 1, 2*gb10p1/3 + 1},
[]int64{gb10p1 / 3, 2 * gb10p1 / 3, gb10p1 - 1}},
{gb10p2, SourceInfo{start: -1},
[]int64{0, gb10p2 / 3, 2 * gb10p2 / 3},
[]int64{gb10p2/3 - 1, 2*gb10p2/3 - 1, gb10p2 - 1}},
}
for i, testCase := range testCases {
resStart, resEnd := calculateEvenSplits(testCase.size, testCase.src)
if !reflect.DeepEqual(testCase.starts, resStart) || !reflect.DeepEqual(testCase.ends, resEnd) {
t.Errorf("Test %d - output did not match with reference results", i+1)
}
}
}

View File

@ -679,12 +679,18 @@ func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeade
if contentType == "" {
contentType = "application/octet-stream"
}
var objectStat ObjectInfo
objectStat.ETag = md5sum
objectStat.Key = objectName
objectStat.Size = resp.ContentLength
objectStat.LastModified = date
objectStat.ContentType = contentType
objectStat := ObjectInfo{
ETag: md5sum,
Key: objectName,
Size: resp.ContentLength,
LastModified: date,
ContentType: contentType,
// Extract only the relevant header keys describing the object.
// following function filters out a list of standard set of keys
// which are not part of object metadata.
Metadata: extractObjMetadata(resp.Header),
}
// do not close body here, caller will close
return resp.Body, objectStat, nil

View File

@ -17,10 +17,8 @@
package minio
import (
"fmt"
"hash"
"io"
"io/ioutil"
"math"
"os"
@ -78,55 +76,6 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las
return totalPartsCount, partSize, lastPartSize, nil
}
// hashCopyBuffer is identical to hashCopyN except that it doesn't take
// any size argument but takes a buffer argument and reader should be
// of io.ReaderAt interface.
//
// Stages reads from offsets into the buffer, if buffer is nil it is
// initialized to optimalBufferSize.
func hashCopyBuffer(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.ReaderAt, buf []byte) (size int64, err error) {
hashWriter := writer
for _, v := range hashAlgorithms {
hashWriter = io.MultiWriter(hashWriter, v)
}
// Buffer is nil, initialize.
if buf == nil {
buf = make([]byte, optimalReadBufferSize)
}
// Offset to start reading from.
var readAtOffset int64
// Following block reads data at an offset from the input
// reader and copies data to into local temporary file.
for {
readAtSize, rerr := reader.ReadAt(buf, readAtOffset)
if rerr != nil {
if rerr != io.EOF {
return 0, rerr
}
}
writeSize, werr := hashWriter.Write(buf[:readAtSize])
if werr != nil {
return 0, werr
}
if readAtSize != writeSize {
return 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue)
}
readAtOffset += int64(writeSize)
size += int64(writeSize)
if rerr == io.EOF {
break
}
}
for k, v := range hashAlgorithms {
hashSums[k] = v.Sum(nil)
}
return size, err
}
// hashCopyN - Calculates chosen hashes up to partSize amount of bytes.
func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.Reader, partSize int64) (size int64, err error) {
hashWriter := writer
@ -167,27 +116,3 @@ func (c Client) newUploadID(bucketName, objectName string, metaData map[string][
}
return initMultipartUploadResult.UploadID, nil
}
// computeHash - Calculates hashes for an input read Seeker.
func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) {
hashWriter := ioutil.Discard
for _, v := range hashAlgorithms {
hashWriter = io.MultiWriter(hashWriter, v)
}
// If no buffer is provided, no need to allocate just use io.Copy.
size, err = io.Copy(hashWriter, reader)
if err != nil {
return 0, err
}
// Seek back reader to the beginning location.
if _, err := reader.Seek(0, 0); err != nil {
return 0, err
}
for k, v := range hashAlgorithms {
hashSums[k] = v.Sum(nil)
}
return size, nil
}

View File

@ -16,57 +16,7 @@
package minio
import (
"net/http"
"github.com/minio/minio-go/pkg/s3utils"
)
// CopyObject - copy a source object into a new object with the provided name in the provided bucket
func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
if objectSource == "" {
return ErrInvalidArgument("Object source cannot be empty.")
}
// customHeaders apply headers.
customHeaders := make(http.Header)
for _, cond := range cpCond.conditions {
customHeaders.Set(cond.key, cond.value)
}
// Set copy source.
customHeaders.Set("x-amz-copy-source", s3utils.EncodePath(objectSource))
// Execute PUT on objectName.
resp, err := c.executeMethod("PUT", requestMetadata{
bucketName: bucketName,
objectName: objectName,
customHeader: customHeaders,
})
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode copy response on success.
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return err
}
// Return nil on success.
return nil
// CopyObject - copy a source object into a new object
func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error {
return c.ComposeObject(dst, []SourceInfo{src})
}

View File

@ -0,0 +1,46 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"io"
"github.com/minio/minio-go/pkg/encrypt"
)
// PutEncryptedObject - Encrypt and store object.
func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metadata map[string][]string, progress io.Reader) (n int64, err error) {
if encryptMaterials == nil {
return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
}
if err := encryptMaterials.SetupEncryptMode(reader); err != nil {
return 0, err
}
if metadata == nil {
metadata = make(map[string][]string)
}
// Set the necessary encryption headers, for future decryption.
metadata[amzHeaderIV] = []string{encryptMaterials.GetIV()}
metadata[amzHeaderKey] = []string{encryptMaterials.GetKey()}
metadata[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()}
return c.putObjectMultipart(bucketName, objectName, encryptMaterials, -1, metadata, progress)
}

View File

@ -17,13 +17,9 @@
package minio
import (
"fmt"
"io"
"io/ioutil"
"mime"
"os"
"path/filepath"
"sort"
"github.com/minio/minio-go/pkg/s3utils"
)
@ -55,11 +51,6 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
// Save the file size.
fileSize := fileStat.Size()
// Check for largest object size allowed.
if fileSize > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
}
objMetadata := make(map[string][]string)
// Set contentType based on filepath extension if not given or default
@ -71,195 +62,5 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
}
objMetadata["Content-Type"] = []string{contentType}
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
if s3utils.IsGoogleEndpoint(c.endpointURL) {
// Do not compute MD5 for Google Cloud Storage.
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
// Small object upload is initiated for uploads for input data size smaller than 5MiB.
if fileSize < minPartSize && fileSize >= 0 {
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
// Upload all large objects as multipart.
n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "NotImplemented" {
// If size of file is greater than '5GiB' fail.
if fileSize > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
return n, err
}
return n, nil
}
// putObjectMultipartFromFile - Creates object from contents of *os.File
//
// NOTE: This function is meant to be used for readers with local
// file as in *os.File. This function effectively utilizes file
// system capabilities of reading from specific sections and not
// having to create temporary files.
func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
// Initiate a new multipart upload.
uploadID, err := c.newUploadID(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(fileSize)
if err != nil {
return 0, err
}
// Create a channel to communicate a part was uploaded.
// Buffer this to 10000, the maximum number of parts allowed by S3.
uploadedPartsCh := make(chan uploadedPartRes, 10000)
// Create a channel to communicate which part to upload.
// Buffer this to 10000, the maximum number of parts allowed by S3.
uploadPartsCh := make(chan uploadPartReq, 10000)
// Just for readability.
lastPartNumber := totalPartsCount
// Initialize parts uploaded map.
partsInfo := make(map[int]ObjectPart)
// Send each part through the partUploadCh to be uploaded.
for p := 1; p <= totalPartsCount; p++ {
part, ok := partsInfo[p]
if ok {
uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
} else {
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
}
}
close(uploadPartsCh)
// Use three 'workers' to upload parts in parallel.
for w := 1; w <= totalWorkers; w++ {
go func() {
// Deal with each part as it comes through the channel.
for uploadReq := range uploadPartsCh {
// Add hash algorithms that need to be calculated by computeHash()
// In case of a non-v4 signature or https connection, sha256 is not needed.
hashAlgos, hashSums := c.hashMaterials()
// If partNumber was not uploaded we calculate the missing
// part offset and size. For all other part numbers we
// calculate offset based on multiples of partSize.
readOffset := int64(uploadReq.PartNum-1) * partSize
missingPartSize := partSize
// As a special case if partNumber is lastPartNumber, we
// calculate the offset based on the last part size.
if uploadReq.PartNum == lastPartNumber {
readOffset = (fileSize - lastPartSize)
missingPartSize = lastPartSize
}
// Get a section reader on a particular offset.
sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize)
var prtSize int64
var err error
prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Error: err,
}
// Exit the goroutine.
return
}
// Proceed to upload the part.
var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum,
hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Error: err,
}
// Exit the goroutine.
return
}
// Save successfully uploaded part metadata.
uploadReq.Part = &objPart
// Return through the channel the part size.
uploadedPartsCh <- uploadedPartRes{
Size: missingPartSize,
PartNum: uploadReq.PartNum,
Part: uploadReq.Part,
Error: nil,
}
}
}()
}
// Retrieve each uploaded part once it is done.
for u := 1; u <= totalPartsCount; u++ {
uploadRes := <-uploadedPartsCh
if uploadRes.Error != nil {
return totalUploadedSize, uploadRes.Error
}
// Retrieve each uploaded part and store it to be completed.
part := uploadRes.Part
if part == nil {
return totalUploadedSize, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
}
// Update the total uploaded size.
totalUploadedSize += uploadRes.Size
// Update the progress bar if there is one.
if progress != nil {
if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
return totalUploadedSize, err
}
}
// Store the part to be completed.
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
// Verify if we uploaded all data.
if totalUploadedSize != fileSize {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
return c.putObjectCommon(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}

View File

@ -24,7 +24,6 @@ import (
"io/ioutil"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"strings"
@ -32,161 +31,60 @@ import (
"github.com/minio/minio-go/pkg/s3utils"
)
// Comprehensive put object operation involving multipart uploads.
//
// Following code handles these types of readers.
//
// - *os.File
// - *minio.Object
// - Any reader which has a method 'ReadAt()'
//
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
if size > 0 && size > minPartSize {
// Verify if reader is *os.File, then use file system functionalities.
if isFile(reader) {
return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, metaData, progress)
}
// Verify if reader is *minio.Object or io.ReaderAt.
// NOTE: Verification of object is kept for a specific purpose
// while it is going to be duck typed similar to io.ReaderAt.
// It is to indicate that *minio.Object implements io.ReaderAt.
// and such a functionality is used in the subsequent code
// path.
if isObject(reader) || isReadAt(reader) {
return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metaData, progress)
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64,
metadata map[string][]string, progress io.Reader) (n int64, err error) {
n, err = c.putObjectMultipartNoStream(bucketName, objectName, reader, size, metadata, progress)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
}
// For any other data size and reader type we do generic multipart
// approach by staging data in temporary files and uploading them.
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress)
return n, err
}
// putObjectMultipartStreamNoChecksum - upload a large object using
// multipart upload and streaming signature for signing payload.
func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) {
func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader io.Reader, size int64,
metadata map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
// Initiates a new multipart request
uploadID, err := c.newUploadID(bucketName, objectName, metadata)
if err != nil {
return 0, err
}
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
if err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Initialize parts uploaded map.
partsInfo := make(map[int]ObjectPart)
// Part number always starts with '1'.
var partNumber int
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
// Update progress reader appropriately to the latest offset
// as we read from the source.
hookReader := newHook(reader, progress)
// Proceed to upload the part.
if partNumber == totalPartsCount {
partSize = lastPartSize
}
var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID,
io.LimitReader(hookReader, partSize), partNumber, nil, nil, partSize)
// For unknown size, Read EOF we break away.
// We do not have to upload till totalPartsCount.
if err == io.EOF && size < 0 {
break
}
if err != nil {
return totalUploadedSize, err
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
// Save successfully uploaded size.
totalUploadedSize += partSize
}
// Verify if we uploaded all the data.
if size > 0 {
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
}
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Loop over total uploaded parts to save them in
// Parts array before completing the multipart request.
for i := 1; i < partNumber; i++ {
part, ok := partsInfo[i]
if !ok {
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}
// putObjectStream uploads files bigger than 64MiB, and also supports
// special case where size is unknown i.e '-1'.
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
// Total data read and written to server. should be equal to
// 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Initiate a new multipart upload.
uploadID, err := c.newUploadID(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := optimalPartInfo(size)
if err != nil {
return 0, err
}
// Initiate a new multipart upload.
uploadID, err := c.newUploadID(bucketName, objectName, metadata)
if err != nil {
return 0, err
}
defer func() {
if err != nil {
c.abortMultipartUpload(bucketName, objectName, uploadID)
}
}()
// Part number always starts with '1'.
partNumber := 1
@ -197,8 +95,9 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
partsInfo := make(map[int]ObjectPart)
for partNumber <= totalPartsCount {
// Choose hash algorithms to be calculated by hashCopyN, avoid sha256
// with non-v4 signature request or HTTPS connection
// Choose hash algorithms to be calculated by hashCopyN,
// avoid sha256 with non-v4 signature request or
// HTTPS connection.
hashAlgos, hashSums := c.hashMaterials()
// Calculates hash sums while copying partSize bytes into tmpBuffer.
@ -214,7 +113,8 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// Proceed to upload the part.
var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber,
hashSums["md5"], hashSums["sha256"], prtSize, metadata)
if err != nil {
// Reset the temporary buffer upon any error.
tmpBuffer.Reset()
@ -224,13 +124,6 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
// Update the progress reader for the skipped part.
if progress != nil {
if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
return totalUploadedSize, err
}
}
// Reset the temporary buffer.
tmpBuffer.Reset()
@ -269,8 +162,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil {
return totalUploadedSize, err
}
@ -279,7 +171,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) {
func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata map[string][]string) (initiateMultipartUploadResult, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
@ -294,14 +186,14 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData
// Set ContentType header.
customHeader := make(http.Header)
for k, v := range metaData {
for k, v := range metadata {
if len(v) > 0 {
customHeader.Set(k, v[0])
}
}
// Set a default content-type header if the latter is not provided
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
if v, ok := metadata["Content-Type"]; !ok || len(v) == 0 {
customHeader.Set("Content-Type", "application/octet-stream")
}
@ -332,8 +224,11 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData
return initiateMultipartUploadResult, nil
}
const serverEncryptionKeyPrefix = "x-amz-server-side-encryption"
// uploadPart - Uploads a part in a multipart upload.
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (ObjectPart, error) {
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader,
partNumber int, md5Sum, sha256Sum []byte, size int64, metadata map[string][]string) (ObjectPart, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectPart{}, err
@ -361,10 +256,21 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
// Set upload id.
urlValues.Set("uploadId", uploadID)
// Set encryption headers, if any.
customHeader := make(http.Header)
for k, v := range metadata {
if len(v) > 0 {
if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) {
customHeader.Set(k, v[0])
}
}
}
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
customHeader: customHeader,
contentBody: reader,
contentLength: size,
contentMD5Bytes: md5Sum,
@ -393,7 +299,8 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
}
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
complete completeMultipartUpload) (completeMultipartUploadResult, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return completeMultipartUploadResult{}, err

View File

@ -1,191 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"io"
"strings"
"github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
)
// PutObjectWithProgress - with progress.
func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) {
metaData := make(map[string][]string)
metaData["Content-Type"] = []string{contentType}
return c.PutObjectWithMetadata(bucketName, objectName, reader, metaData, progress)
}
// PutEncryptedObject - Encrypt and store object.
func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metaData map[string][]string, progress io.Reader) (n int64, err error) {
if encryptMaterials == nil {
return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
}
if err := encryptMaterials.SetupEncryptMode(reader); err != nil {
return 0, err
}
if metaData == nil {
metaData = make(map[string][]string)
}
// Set the necessary encryption headers, for future decryption.
metaData[amzHeaderIV] = []string{encryptMaterials.GetIV()}
metaData[amzHeaderKey] = []string{encryptMaterials.GetKey()}
metaData[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()}
return c.PutObjectWithMetadata(bucketName, objectName, encryptMaterials, metaData, progress)
}
// PutObjectWithMetadata - with metadata.
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
if reader == nil {
return 0, ErrInvalidArgument("Input reader is invalid, cannot be nil.")
}
// Size of the object.
var size int64
// Get reader size.
size, err = getReaderSize(reader)
if err != nil {
return 0, err
}
// Check for largest object size allowed.
if size > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
}
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
if s3utils.IsGoogleEndpoint(c.endpointURL) {
// Do not compute MD5 for Google Cloud Storage.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
}
// putSmall object.
if size < minPartSize && size >= 0 {
return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
}
// For all sizes greater than 5MiB do multipart.
n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
}
return n, err
}
return n, nil
}
// PutObjectStreaming using AWS streaming signature V4
func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) {
return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, nil, nil)
}
// PutObjectStreamingWithMetadata using AWS streaming signature V4
func (c Client) PutObjectStreamingWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string) (n int64, err error) {
return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, metadata, nil)
}
// PutObjectStreamingWithProgress using AWS streaming signature V4
func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
// NOTE: Streaming signature is not supported by GCS.
if s3utils.IsGoogleEndpoint(c.endpointURL) {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: "AWS streaming signature v4 is not supported with Google Cloud Storage",
Key: objectName,
BucketName: bucketName,
}
}
if c.overrideSignerType.IsV2() {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: "AWS streaming signature v4 is not supported with minio client initialized for AWS signature v2",
Key: objectName,
BucketName: bucketName,
}
}
// Size of the object.
var size int64
// Get reader size.
size, err = getReaderSize(reader)
if err != nil {
return 0, err
}
// Check for largest object size allowed.
if size > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
}
// If size cannot be found on a stream, it is not possible
// to upload using streaming signature, fall back to multipart.
if size < 0 {
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress)
}
// Set streaming signature.
c.overrideSignerType = credentials.SignatureV4Streaming
if size < minPartSize && size >= 0 {
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
// For all sizes greater than 64MiB do multipart.
n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
return n, err
}
return n, nil
}

View File

@ -1,219 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"sort"
"github.com/minio/minio-go/pkg/s3utils"
)
// uploadedPartRes - the response received from a part upload.
type uploadedPartRes struct {
Error error // Any error encountered while uploading the part.
PartNum int // Number of the part uploaded.
Size int64 // Size of the part uploaded.
Part *ObjectPart
}
type uploadPartReq struct {
PartNum int // Number of the part uploaded.
Part *ObjectPart // Size of the part uploaded.
}
// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader
// of type which implements io.ReaderAt interface (ReadAt method).
//
// NOTE: This function is meant to be used for all readers which
// implement io.ReaderAt which allows us for resuming multipart
// uploads but reading at an offset, which would avoid re-read the
// data which was already uploaded. Internally this function uses
// temporary files for staging all the data, these temporary files are
// cleaned automatically when the caller i.e http client closes the
// stream after uploading all the contents successfully.
func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
// Initiate a new multipart upload.
uploadID, err := c.newUploadID(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
if err != nil {
return 0, err
}
// Declare a channel that sends the next part number to be uploaded.
// Buffered to 10000 because thats the maximum number of parts allowed
// by S3.
uploadPartsCh := make(chan uploadPartReq, 10000)
// Declare a channel that sends back the response of a part upload.
// Buffered to 10000 because thats the maximum number of parts allowed
// by S3.
uploadedPartsCh := make(chan uploadedPartRes, 10000)
// Used for readability, lastPartNumber is always totalPartsCount.
lastPartNumber := totalPartsCount
// Initialize parts uploaded map.
partsInfo := make(map[int]ObjectPart)
// Send each part number to the channel to be processed.
for p := 1; p <= totalPartsCount; p++ {
part, ok := partsInfo[p]
if ok {
uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
} else {
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
}
}
close(uploadPartsCh)
// Receive each part number from the channel allowing three parallel uploads.
for w := 1; w <= totalWorkers; w++ {
go func() {
// Read defaults to reading at 5MiB buffer.
readAtBuffer := make([]byte, optimalReadBufferSize)
// Each worker will draw from the part channel and upload in parallel.
for uploadReq := range uploadPartsCh {
// Declare a new tmpBuffer.
tmpBuffer := new(bytes.Buffer)
// If partNumber was not uploaded we calculate the missing
// part offset and size. For all other part numbers we
// calculate offset based on multiples of partSize.
readOffset := int64(uploadReq.PartNum-1) * partSize
missingPartSize := partSize
// As a special case if partNumber is lastPartNumber, we
// calculate the offset based on the last part size.
if uploadReq.PartNum == lastPartNumber {
readOffset = (size - lastPartSize)
missingPartSize = lastPartSize
}
// Get a section reader on a particular offset.
sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
// Choose the needed hash algorithms to be calculated by hashCopyBuffer.
// Sha256 is avoided in non-v4 signature requests or HTTPS connections
hashAlgos, hashSums := c.hashMaterials()
var prtSize int64
var err error
prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
if err != nil {
// Send the error back through the channel.
uploadedPartsCh <- uploadedPartRes{
Size: 0,
Error: err,
}
// Exit the goroutine.
return
}
// Proceed to upload the part.
var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer,
uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Size: 0,
Error: err,
}
// Exit the goroutine.
return
}
// Save successfully uploaded part metadata.
uploadReq.Part = &objPart
// Send successful part info through the channel.
uploadedPartsCh <- uploadedPartRes{
Size: missingPartSize,
PartNum: uploadReq.PartNum,
Part: uploadReq.Part,
Error: nil,
}
}
}()
}
// Gather the responses as they occur and update any
// progress bar.
for u := 1; u <= totalPartsCount; u++ {
uploadRes := <-uploadedPartsCh
if uploadRes.Error != nil {
return totalUploadedSize, uploadRes.Error
}
// Retrieve each uploaded part and store it to be completed.
// part, ok := partsInfo[uploadRes.PartNum]
part := uploadRes.Part
if part == nil {
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
}
// Update the totalUploadedSize.
totalUploadedSize += uploadRes.Size
// Update the progress bar if there is one.
if progress != nil {
if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
return totalUploadedSize, err
}
}
// Store the parts to be completed in order.
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
// Verify if we uploaded all the data.
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}

View File

@ -0,0 +1,436 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"fmt"
"io"
"net/http"
"sort"
"strings"
"github.com/minio/minio-go/pkg/s3utils"
)
// PutObjectStreaming using AWS streaming signature V4
func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) {
return c.PutObjectWithProgress(bucketName, objectName, reader, nil, nil)
}
// putObjectMultipartStream - upload a large object using
// multipart upload and streaming signature for signing payload.
// Comprehensive put object operation involving multipart uploads.
//
// Following code handles these types of readers.
//
// - *os.File
// - *minio.Object
// - Any reader which has a method 'ReadAt()'
//
func (c Client) putObjectMultipartStream(bucketName, objectName string,
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
// Verify if reader is *minio.Object, *os.File or io.ReaderAt.
// NOTE: Verification of object is kept for a specific purpose
// while it is going to be duck typed similar to io.ReaderAt.
// It is to indicate that *minio.Object implements io.ReaderAt.
// and such a functionality is used in the subsequent code path.
if isFile(reader) || !isObject(reader) && isReadAt(reader) {
n, err = c.putObjectMultipartStreamFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metadata, progress)
} else {
n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
}
return n, err
}
// uploadedPartRes - the response received from a part upload.
type uploadedPartRes struct {
Error error // Any error encountered while uploading the part.
PartNum int // Number of the part uploaded.
Size int64 // Size of the part uploaded.
Part *ObjectPart
}
type uploadPartReq struct {
PartNum int // Number of the part uploaded.
Part *ObjectPart // Size of the part uploaded.
}
// putObjectMultipartFromReadAt - Uploads files bigger than 64MiB.
// Supports all readers which implements io.ReaderAt interface
// (ReadAt method).
//
// NOTE: This function is meant to be used for all readers which
// implement io.ReaderAt which allows us for resuming multipart
// uploads but reading at an offset, which would avoid re-read the
// data which was already uploaded. Internally this function uses
// temporary files for staging all the data, these temporary files are
// cleaned automatically when the caller i.e http client closes the
// stream after uploading all the contents successfully.
func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string,
reader io.ReaderAt, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
if err != nil {
return 0, err
}
// Initiate a new multipart upload.
uploadID, err := c.newUploadID(bucketName, objectName, metadata)
if err != nil {
return 0, err
}
// Aborts the multipart upload in progress, if the
// function returns any error, since we do not resume
// we should purge the parts which have been uploaded
// to relinquish storage space.
defer func() {
if err != nil {
c.abortMultipartUpload(bucketName, objectName, uploadID)
}
}()
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Declare a channel that sends the next part number to be uploaded.
// Buffered to 10000 because thats the maximum number of parts allowed
// by S3.
uploadPartsCh := make(chan uploadPartReq, 10000)
// Declare a channel that sends back the response of a part upload.
// Buffered to 10000 because thats the maximum number of parts allowed
// by S3.
uploadedPartsCh := make(chan uploadedPartRes, 10000)
// Used for readability, lastPartNumber is always totalPartsCount.
lastPartNumber := totalPartsCount
// Send each part number to the channel to be processed.
for p := 1; p <= totalPartsCount; p++ {
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
}
close(uploadPartsCh)
// Receive each part number from the channel allowing three parallel uploads.
for w := 1; w <= totalWorkers; w++ {
go func() {
// Each worker will draw from the part channel and upload in parallel.
for uploadReq := range uploadPartsCh {
// If partNumber was not uploaded we calculate the missing
// part offset and size. For all other part numbers we
// calculate offset based on multiples of partSize.
readOffset := int64(uploadReq.PartNum-1) * partSize
// As a special case if partNumber is lastPartNumber, we
// calculate the offset based on the last part size.
if uploadReq.PartNum == lastPartNumber {
readOffset = (size - lastPartSize)
partSize = lastPartSize
}
// Get a section reader on a particular offset.
sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), progress)
// Proceed to upload the part.
var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID,
sectionReader, uploadReq.PartNum,
nil, nil, partSize, metadata)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Size: 0,
Error: err,
}
// Exit the goroutine.
return
}
// Save successfully uploaded part metadata.
uploadReq.Part = &objPart
// Send successful part info through the channel.
uploadedPartsCh <- uploadedPartRes{
Size: objPart.Size,
PartNum: uploadReq.PartNum,
Part: uploadReq.Part,
Error: nil,
}
}
}()
}
// Gather the responses as they occur and update any
// progress bar.
for u := 1; u <= totalPartsCount; u++ {
uploadRes := <-uploadedPartsCh
if uploadRes.Error != nil {
return totalUploadedSize, uploadRes.Error
}
// Retrieve each uploaded part and store it to be completed.
// part, ok := partsInfo[uploadRes.PartNum]
part := uploadRes.Part
if part == nil {
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
}
// Update the totalUploadedSize.
totalUploadedSize += uploadRes.Size
// Store the parts to be completed in order.
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
// Verify if we uploaded all the data.
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}
func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
if err != nil {
return 0, err
}
// Initiates a new multipart request
uploadID, err := c.newUploadID(bucketName, objectName, metadata)
if err != nil {
return 0, err
}
// Aborts the multipart upload if the function returns
// any error, since we do not resume we should purge
// the parts which have been uploaded to relinquish
// storage space.
defer func() {
if err != nil {
c.abortMultipartUpload(bucketName, objectName, uploadID)
}
}()
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Initialize parts uploaded map.
partsInfo := make(map[int]ObjectPart)
// Part number always starts with '1'.
var partNumber int
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
// Update progress reader appropriately to the latest offset
// as we read from the source.
hookReader := newHook(reader, progress)
// Proceed to upload the part.
if partNumber == totalPartsCount {
partSize = lastPartSize
}
var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID,
io.LimitReader(hookReader, partSize),
partNumber, nil, nil, partSize, metadata)
if err != nil {
return totalUploadedSize, err
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
// Save successfully uploaded size.
totalUploadedSize += partSize
}
// Verify if we uploaded all the data.
if size > 0 {
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
}
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Loop over total uploaded parts to save them in
// Parts array before completing the multipart request.
for i := 1; i < partNumber; i++ {
part, ok := partsInfo[i]
if !ok {
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}
// putObjectNoChecksum special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
// Size -1 is only supported on Google Cloud Storage, we error
// out in all other situations.
if size < 0 && !s3utils.IsGoogleEndpoint(c.endpointURL) {
return 0, ErrEntityTooSmall(size, bucketName, objectName)
}
if size > 0 {
if isReadAt(reader) && !isObject(reader) {
reader = io.NewSectionReader(reader.(io.ReaderAt), 0, size)
}
}
// Update progress reader appropriately to the latest offset as we
// read from the source.
readSeeker := newHook(reader, progress)
// This function does not calculate sha256 and md5sum for payload.
// Execute put object.
st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
if err != nil {
return 0, err
}
if st.Size != size {
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
}
return size, nil
}
// putObjectDo - executes the put object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
// Set headers.
customHeader := make(http.Header)
// Set metadata to headers
for k, v := range metaData {
if len(v) > 0 {
customHeader.Set(k, v[0])
}
}
// If Content-Type is not provided, set the default application/octet-stream one
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
customHeader.Set("Content-Type", "application/octet-stream")
}
// Populate request metadata.
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
customHeader: customHeader,
contentBody: reader,
contentLength: size,
contentMD5Bytes: md5Sum,
contentSHA256Bytes: sha256Sum,
}
// Execute PUT an objectName.
resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
var objInfo ObjectInfo
// Trim off the odd double quotes from ETag in the beginning and end.
objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
// A success here means data was written to server successfully.
objInfo.Size = size
// Return here.
return objInfo, nil
}

View File

@ -18,13 +18,12 @@ package minio
import (
"io"
"io/ioutil"
"net/http"
"os"
"reflect"
"runtime"
"strings"
"github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/s3utils"
)
@ -143,164 +142,79 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
//
// You must have WRITE permissions on a bucket to create an object.
//
// - For size smaller than 64MiB PutObject automatically does a single atomic Put operation.
// - For size larger than 64MiB PutObject automatically does a multipart Put operation.
// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF.
// Maximum object size that can be uploaded through this operation will be 5TiB.
//
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
// So we fall back to single PUT operation with the maximum limit of 5GiB.
// - For size smaller than 64MiB PutObject automatically does a
// single atomic Put operation.
// - For size larger than 64MiB PutObject automatically does a
// multipart Put operation.
// - For size input as -1 PutObject does a multipart Put operation
// until input stream reaches EOF. Maximum object size that can
// be uploaded through this operation will be 5TiB.
func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil)
return c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{
"Content-Type": []string{contentType},
}, nil)
}
// putObjectNoChecksum special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
if size > 0 {
readerAt, ok := reader.(io.ReaderAt)
if ok {
reader = io.NewSectionReader(readerAt, 0, size)
}
}
// PutObjectWithSize - is a helper PutObject similar in behavior to PutObject()
// but takes the size argument explicitly, this function avoids doing reflection
// internally to figure out the size of input stream. Also if the input size is
// lesser than 0 this function returns an error.
func (c Client) PutObjectWithSize(bucketName, objectName string, reader io.Reader, readerSize int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
return c.putObjectCommon(bucketName, objectName, reader, readerSize, metadata, progress)
}
// Update progress reader appropriately to the latest offset as we
// read from the source.
readSeeker := newHook(reader, progress)
// PutObjectWithMetadata using AWS streaming signature V4
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
return c.PutObjectWithProgress(bucketName, objectName, reader, metadata, progress)
}
// This function does not calculate sha256 and md5sum for payload.
// Execute put object.
st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
// PutObjectWithProgress using AWS streaming signature V4
func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
// Size of the object.
var size int64
// Get reader size.
size, err = getReaderSize(reader)
if err != nil {
return 0, err
}
if st.Size != size {
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
}
return size, nil
return c.putObjectCommon(bucketName, objectName, reader, size, metadata, progress)
}
// putObjectSingle is a special function for uploading single put object request.
// This special function is used as a fallback when multipart upload fails.
func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// If size is a stream, upload up to 5GiB.
if size <= -1 {
size = maxSinglePutObjectSize
func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
// Check for largest object size allowed.
if size > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
}
// Add the appropriate hash algorithms that need to be calculated by hashCopyN
// In case of non-v4 signature request or HTTPS connection, sha256 is not needed.
hashAlgos, hashSums := c.hashMaterials()
// Initialize a new temporary file.
tmpFile, err := newTempFile("single$-putobject-single")
if err != nil {
return 0, err
}
defer tmpFile.Close()
size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
// Return error if its not io.EOF.
if err != nil && err != io.EOF {
return 0, err
// NOTE: Streaming signature is not supported by GCS.
if s3utils.IsGoogleEndpoint(c.endpointURL) {
// Do not compute MD5 for Google Cloud Storage.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
// Seek back to beginning of the temporary file.
if _, err = tmpFile.Seek(0, 0); err != nil {
return 0, err
}
reader = tmpFile
// Execute put object.
st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData)
if err != nil {
return 0, err
}
if st.Size != size {
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
}
// Progress the reader to the size if putObjectDo is successful.
if progress != nil {
if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil {
return size, err
if c.overrideSignerType.IsV2() {
if size >= 0 && size < minPartSize {
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress)
}
return size, nil
}
// putObjectDo - executes the put object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
// Set headers.
customHeader := make(http.Header)
// Set metadata to headers
for k, v := range metaData {
if len(v) > 0 {
customHeader.Set(k, v[0])
}
}
// If Content-Type is not provided, set the default application/octet-stream one
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
customHeader.Set("Content-Type", "application/octet-stream")
}
// Populate request metadata.
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
customHeader: customHeader,
contentBody: reader,
contentLength: size,
contentMD5Bytes: md5Sum,
contentSHA256Bytes: sha256Sum,
}
// Execute PUT an objectName.
resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
var objInfo ObjectInfo
// Trim off the odd double quotes from ETag in the beginning and end.
objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
// A success here means data was written to server successfully.
objInfo.Size = size
// Return here.
return objInfo, nil
// If size cannot be found on a stream, it is not possible
// to upload using streaming signature, fall back to multipart.
if size < 0 {
// Set regular signature calculation.
c.overrideSignerType = credentials.SignatureV4
return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress)
}
// Set streaming signature.
c.overrideSignerType = credentials.SignatureV4Streaming
if size < minPartSize {
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
// For all sizes greater than 64MiB do multipart.
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress)
}

View File

@ -167,11 +167,6 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead
contentType = "application/octet-stream"
}
// Extract only the relevant header keys describing the object.
// following function filters out a list of standard set of keys
// which are not part of object metadata.
metadata := extractObjMetadata(resp.Header)
// Save object metadata info.
return ObjectInfo{
ETag: md5sum,
@ -179,6 +174,9 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead
Size: size,
LastModified: date,
ContentType: contentType,
Metadata: metadata,
// Extract only the relevant header keys describing the object.
// following function filters out a list of standard set of keys
// which are not part of object metadata.
Metadata: extractObjMetadata(resp.Header),
}, nil
}

View File

@ -87,7 +87,7 @@ type Client struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "2.1.0"
libraryVersion = "3.0.0"
)
// User Agent should always following the below style.
@ -211,7 +211,7 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
Transport: http.DefaultTransport,
Transport: defaultMinioTransport,
CheckRedirect: redirectHeaders,
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -182,27 +182,6 @@ func TestValidBucketLocation(t *testing.T) {
}
}
// Tests temp file.
func TestTempFile(t *testing.T) {
tmpFile, err := newTempFile("testing")
if err != nil {
t.Fatal("Error:", err)
}
fileName := tmpFile.Name()
// Closing temporary file purges the file.
err = tmpFile.Close()
if err != nil {
t.Fatal("Error:", err)
}
st, err := os.Stat(fileName)
if err != nil && !os.IsNotExist(err) {
t.Fatal("Error:", err)
}
if err == nil && st != nil {
t.Fatal("Error: file should be deleted and should not exist.")
}
}
// Tests error response structure.
func TestErrorResponse(t *testing.T) {
var err error

View File

@ -213,20 +213,24 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
signerType = credentials.SignatureAnonymous
}
// Set sha256 sum for signature calculation only with signature version '4'.
switch {
case signerType.IsV4():
var contentSha256 string
if c.secure {
contentSha256 = unsignedPayload
} else {
contentSha256 = hex.EncodeToString(sum256([]byte{}))
}
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
case signerType.IsV2():
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
if signerType.IsAnonymous() {
return req, nil
}
if signerType.IsV2() {
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
return req, nil
}
// Set sha256 sum for signature calculation only with signature version '4'.
var contentSha256 string
if c.secure {
contentSha256 = unsignedPayload
} else {
contentSha256 = hex.EncodeToString(sum256([]byte{}))
}
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
return req, nil
}

View File

@ -18,10 +18,18 @@ package minio
/// Multipart upload defaults.
// miniPartSize - minimum part size 64MiB per object after which
// absMinPartSize - absolute minimum part size (5 MiB) below which
// a part in a multipart upload may not be uploaded.
const absMinPartSize = 1024 * 1024 * 5
// minPartSize - minimum part size 64MiB per object after which
// putObject behaves internally as multipart.
const minPartSize = 1024 * 1024 * 64
// copyPartSize - default (and maximum) part size to copy in a
// copy-object request (5GiB)
const copyPartSize = 1024 * 1024 * 1024 * 5
// maxPartsCount - maximum number of parts for a single multipart session.
const maxPartsCount = 10000
@ -37,10 +45,6 @@ const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
// Multipart operation.
const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
// optimalReadBufferSize - optimal buffer 5MiB used for reading
// through Read operation.
const optimalReadBufferSize = 1024 * 1024 * 5
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
// we don't want to sign the request payload
const unsignedPayload = "UNSIGNED-PAYLOAD"

View File

@ -1,99 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"net/http"
"time"
)
// copyCondition explanation:
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
//
// Example:
//
// copyCondition {
// key: "x-amz-copy-if-modified-since",
// value: "Tue, 15 Nov 1994 12:45:26 GMT",
// }
//
type copyCondition struct {
key string
value string
}
// CopyConditions - copy conditions.
type CopyConditions struct {
conditions []copyCondition
}
// NewCopyConditions - Instantiate new list of conditions. This
// function is left behind for backward compatibility. The idiomatic
// way to set an empty set of copy conditions is,
// ``copyConditions := CopyConditions{}``.
//
func NewCopyConditions() CopyConditions {
return CopyConditions{}
}
// SetMatchETag - set match etag.
func (c *CopyConditions) SetMatchETag(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
c.conditions = append(c.conditions, copyCondition{
key: "x-amz-copy-source-if-match",
value: etag,
})
return nil
}
// SetMatchETagExcept - set match etag except.
func (c *CopyConditions) SetMatchETagExcept(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
c.conditions = append(c.conditions, copyCondition{
key: "x-amz-copy-source-if-none-match",
value: etag,
})
return nil
}
// SetUnmodified - set unmodified time since.
func (c *CopyConditions) SetUnmodified(modTime time.Time) error {
if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.")
}
c.conditions = append(c.conditions, copyCondition{
key: "x-amz-copy-source-if-unmodified-since",
value: modTime.Format(http.TimeFormat),
})
return nil
}
// SetModified - set modified time since.
func (c *CopyConditions) SetModified(modTime time.Time) error {
if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.")
}
c.conditions = append(c.conditions, copyCondition{
key: "x-amz-copy-source-if-modified-since",
value: modTime.Format(http.TimeFormat),
})
return nil
}

View File

@ -70,7 +70,13 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de
// PutObjectPart - Upload an object part.
func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) {
return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size)
return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, size, data, md5Sum, sha256Sum, nil)
}
// PutObjectPartWithMetadata - upload an object part with additional request metadata.
func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int,
size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectPart, error) {
return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size, metadata)
}
// ListObjectParts - List uploaded parts of an incomplete upload.x
@ -80,7 +86,9 @@ func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error {
_, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{Parts: parts})
_, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{
Parts: parts,
})
return err
}

View File

@ -18,14 +18,15 @@ package minio
import (
"bytes"
"crypto/md5"
"io"
"math/rand"
"log"
"os"
"reflect"
"testing"
"time"
"crypto/md5"
"math/rand"
)
const (
@ -35,6 +36,33 @@ const (
enableSecurity = "ENABLE_HTTPS"
)
// Minimum part size
const MinPartSize = 1024 * 1024 * 64
const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
// randString generates random names and prepends them with a known prefix.
func randString(n int, src rand.Source, prefix string) string {
b := make([]byte, n)
// A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return prefix + string(b[0:30-len(prefix)])
}
// Tests for Core GetObject() function.
func TestGetObjectCore(t *testing.T) {
if testing.Short() {
@ -209,6 +237,76 @@ func TestGetObjectCore(t *testing.T) {
}
}
// Tests GetObject to return Content-Encoding properly set
// and overrides any auto decoding.
func TestGetObjectContentEncoding(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Instantiate new minio core client object.
c, err := NewCore(
os.Getenv(serverEndpoint),
os.Getenv(accessKey),
os.Getenv(secretKey),
mustParseBool(os.Getenv(enableSecurity)),
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := bytes.Repeat([]byte("3"), rand.Intn(1<<20)+32*1024)
m := make(map[string][]string)
m["Content-Encoding"] = []string{"gzip"}
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
n, err := c.Client.PutObjectWithMetadata(bucketName, objectName, bytes.NewReader(buf), m, nil)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
reqHeaders := NewGetReqHeaders()
rwc, objInfo, err := c.GetObject(bucketName, objectName, reqHeaders)
if err != nil {
t.Fatalf("Error: %v", err)
}
rwc.Close()
if objInfo.Size <= 0 {
t.Fatalf("Unexpected size of the object %v, expected %v", objInfo.Size, n)
}
value, ok := objInfo.Metadata["Content-Encoding"]
if !ok {
t.Fatalf("Expected Content-Encoding metadata to be set.")
}
if value[0] != "gzip" {
t.Fatalf("Unexpected content-encoding found, want gzip, got %v", value)
}
}
// Tests get bucket policy core API.
func TestGetBucketPolicy(t *testing.T) {
if testing.Short() {
@ -373,3 +471,48 @@ func TestCorePutObject(t *testing.T) {
t.Fatal("Error:", err)
}
}
func TestCoreGetObjectMetadata(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
core, err := NewCore(
os.Getenv(serverEndpoint),
os.Getenv(accessKey),
os.Getenv(secretKey),
mustParseBool(os.Getenv(enableSecurity)))
if err != nil {
log.Fatalln(err)
}
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
// Make a new bucket.
err = core.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
metadata := map[string][]string{
"X-Amz-Meta-Key-1": {"Val-1"},
}
_, err = core.PutObject(bucketName, "my-objectname", 5,
bytes.NewReader([]byte("hello")), nil, nil, metadata)
if err != nil {
log.Fatalln(err)
}
reader, objInfo, err := core.GetObject(bucketName, "my-objectname",
RequestHeaders{})
if err != nil {
log.Fatalln(err)
}
defer reader.Close()
if objInfo.Metadata.Get("X-Amz-Meta-Key-1") != "Val-1" {
log.Fatalln("Expected metadata to be available but wasn't")
}
}

View File

@ -50,17 +50,21 @@ func main() {
}
```
| Bucket operations |Object operations | Encrypted Object operations | Presigned operations | Bucket Policy/Notification Operations | Client custom settings |
|:---|:---|:---|:---|:---|:---|
|[`MakeBucket`](#MakeBucket) |[`GetObject`](#GetObject) | [`NewSymmetricKey`](#NewSymmetricKey) | [`PresignedGetObject`](#PresignedGetObject) |[`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
|[`ListBuckets`](#ListBuckets) |[`PutObject`](#PutObject) | [`NewAsymmetricKey`](#NewAsymmetricKey) |[`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) | [`GetEncryptedObject`](#GetEncryptedObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | [`PutObjectStreaming`](#PutObjectStreaming) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
|[`ListObjects`](#ListObjects) |[`RemoveObject`](#RemoveObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
|[`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) |
|[`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`ListenBucketNotification`](#ListenBucketNotification) |
| | [`FPutObject`](#FPutObject) | | | |
| | [`FGetObject`](#FGetObject) | | | |
| Bucket operations | Object operations | Encrypted Object operations | Presigned operations | Bucket Policy/Notification Operations | Client custom settings |
| :--- | :--- | :--- | :--- | :--- | :--- |
| [`MakeBucket`](#MakeBucket) | [`GetObject`](#GetObject) | [`NewSymmetricKey`](#NewSymmetricKey) | [`PresignedGetObject`](#PresignedGetObject) | [`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
| [`ListBuckets`](#ListBuckets) | [`PutObject`](#PutObject) | [`NewAsymmetricKey`](#NewAsymmetricKey) | [`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
| [`BucketExists`](#BucketExists) | [`CopyObject`](#CopyObject) | [`GetEncryptedObject`](#GetEncryptedObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
| [`RemoveBucket`](#RemoveBucket) | [`StatObject`](#StatObject) | [`PutObjectStreaming`](#PutObjectStreaming) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
| [`ListObjects`](#ListObjects) | [`RemoveObject`](#RemoveObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | [`NewSSEInfo`](#NewSSEInfo) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | |
| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`ListenBucketNotification`](#ListenBucketNotification) | |
| | [`FPutObject`](#FPutObject) | | | | |
| | [`FGetObject`](#FGetObject) | | | | |
| | [`ComposeObject`](#ComposeObject) | | | | |
| | [`NewSourceInfo`](#NewSourceInfo) | | | | |
| | [`NewDestinationInfo`](#NewDestinationInfo) | | | | |
## 1. Constructor
<a name="Minio"></a>
@ -502,9 +506,11 @@ if err != nil {
<a name="CopyObject"></a>
### CopyObject(bucketName, objectName, objectSource string, conditions CopyConditions) error
### CopyObject(dst DestinationInfo, src SourceInfo) error
Copy a source object into a new object with the provided name in the provided bucket.
Create or replace an object through server-side copying of an existing object. It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. See the `SourceInfo` and `DestinationInfo` types for further details.
To copy multiple source objects into a single destination object see the `ComposeObject` API.
__Parameters__
@ -512,50 +518,169 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
|`objectSource` | _string_ |Name of the source object |
|`conditions` | _CopyConditions_ |Collection of supported CopyObject conditions. [`x-amz-copy-source`, `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, `x-amz-copy-source-if-modified-since`]|
|`dst` | _DestinationInfo_ |Argument describing the destination object |
|`src` | _SourceInfo_ |Argument describing the source object |
__Example__
```go
// Use-case-1
// To copy an existing object to a new object with _no_ copy conditions.
copyConds := minio.CopyConditions{}
err := minioClient.CopyObject("mybucket", "myobject", "my-sourcebucketname/my-sourceobjectname", copyConds)
// Use-case 1: Simple copy object with no conditions, etc
// Source object
src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil)
// Destination object
dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
if err != nil {
fmt.Println(err)
return
}
// Use-case-2
// To copy an existing object to a new object with the following copy conditions
// Copy object call
err = s3Client.CopyObject(dst, src)
if err != nil {
fmt.Println(err)
return
}
// Use-case 2: Copy object with copy-conditions, and copying only part of the source object.
// 1. that matches a given ETag
// 2. and modified after 1st April 2014
// 3. but unmodified since 23rd April 2014
// 4. copy only first 1MiB of object.
// Initialize empty copy conditions.
var copyConds = minio.CopyConditions{}
// Source object
src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil)
// copy object that matches the given ETag.
copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
// Set matching ETag condition, copy object which matches the following ETag.
src.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
// and modified after 1st April 2014
copyConds.SetModified(time.Date(2014, time.April, 1, 0, 0, 0, 0, time.UTC))
// Set modified condition, copy object modified since 2014 April 1.
src.SetModifiedSinceCond(time.Date(2014, time.April, 1, 0, 0, 0, 0, time.UTC))
// but unmodified since 23rd April 2014
copyConds.SetUnmodified(time.Date(2014, time.April, 23, 0, 0, 0, 0, time.UTC))
// Set unmodified condition, copy object unmodified since 2014 April 23.
src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 23, 0, 0, 0, 0, time.UTC))
err := minioClient.CopyObject("mybucket", "myobject", "my-sourcebucketname/my-sourceobjectname", copyConds)
// Set copy-range of only first 1MiB of file.
src.SetRange(0, 1024*1024-1)
// Destination object
dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
if err != nil {
fmt.Println(err)
return
}
// Copy object call
err = s3Client.CopyObject(dst, src)
if err != nil {
fmt.Println(err)
return
}
```
<a name="ComposeObject"></a>
### ComposeObject(dst DestinationInfo, srcs []SourceInfo) error
Create an object by concatenating a list of source objects using
server-side copying.
__Parameters__
|Param |Type |Description |
|:---|:---|:---|
|`dst` | _minio.DestinationInfo_ |Struct with info about the object to be created. |
|`srcs` | _[]minio.SourceInfo_ |Slice of struct with info about source objects to be concatenated in order. |
__Example__
```go
// Prepare source decryption key (here we assume same key to
// decrypt all source objects.)
decKey := minio.NewSSEInfo([]byte{1, 2, 3}, "")
// Source objects to concatenate. We also specify decryption
// key for each
src1 := minio.NewSourceInfo("bucket1", "object1", decKey)
src1.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
src2 := minio.NewSourceInfo("bucket2", "object2", decKey)
src2.SetMatchETag("f8ef9c385918b653a31624deb84149d2")
src3 := minio.NewSourceInfo("bucket3", "object3", decKey)
src3.SetMatchETag("5918b653a31624deb84149d2f8ef9c38")
// Create slice of sources.
srcs := []minio.SourceInfo{src1, src2, src3}
// Prepare destination encryption key
encKey := minio.NewSSEInfo([]byte{8, 9, 0}, "")
// Create destination info
dst := minio.NewDestinationInfo("bucket", "object", encKey, nil)
err = s3Client.ComposeObject(dst, srcs)
if err != nil {
log.Println(err)
return
}
log.Println("Composed object successfully.")
```
<a name="NewSourceInfo"></a>
### NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo
Construct a `SourceInfo` object that can be used as the source for server-side copying operations like `CopyObject` and `ComposeObject`. This object can be used to set copy-conditions on the source.
__Parameters__
| Param | Type | Description |
| :--- | :--- | :--- |
| `bucket` | _string_ | Name of the source bucket |
| `object` | _string_ | Name of the source object |
| `decryptSSEC` | _*minio.SSEInfo_ | Decryption info for the source object (`nil` without encryption) |
__Example__
``` go
// No decryption parameter.
src := NewSourceInfo("bucket", "object", nil)
// With decryption parameter.
decKey := NewSSEKey([]byte{1,2,3}, "")
src := NewSourceInfo("bucket", "object", decKey)
```
<a name="NewDestinationInfo"></a>
### NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, userMeta map[string]string) (DestinationInfo, error)
Construct a `DestinationInfo` object that can be used as the destination object for server-side copying operations like `CopyObject` and `ComposeObject`.
__Parameters__
| Param | Type | Description |
| :--- | :--- | :--- |
| `bucket` | _string_ | Name of the destination bucket |
| `object` | _string_ | Name of the destination object |
| `encryptSSEC` | _*minio.SSEInfo_ | Encryption info for the source object (`nil` without encryption) |
| `userMeta` | _map[string]string_ | User metadata to be set on the destination. If nil, with only one source, user-metadata is copied from source. |
__Example__
``` go
// No encryption parameter.
dst, err := NewDestinationInfo("bucket", "object", nil, nil)
// With encryption parameter.
encKey := NewSSEKey([]byte{1,2,3}, "")
dst, err := NewDecryptionInfo("bucket", "object", encKey, nil)
```
<a name="FPutObject"></a>
### FPutObject(bucketName, objectName, filePath, contentType string) (length int64, err error)
@ -881,6 +1006,26 @@ if err != nil {
}
```
<a name="NewSSEInfo"></a>
### NewSSEInfo(key []byte, algo string) SSEInfo
Create a key object for use as encryption or decryption parameter in operations involving server-side-encryption with customer provided key (SSE-C).
__Parameters__
| Param | Type | Description |
| :--- | :--- | :--- |
| `key` | _[]byte_ | Byte-slice of the raw, un-encoded binary key |
| `algo` | _string_ | Algorithm to use in encryption or decryption with the given key. Can be empty (defaults to `AES256`) |
__Example__
``` go
// Key for use in encryption/decryption
keyInfo := NewSSEInfo([]byte{1,2,3}, "")
```
## 5. Presigned operations
<a name="PresignedGetObject"></a>

View File

@ -0,0 +1,77 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
minio "github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
// my-objectname are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
// Enable trace.
// s3Client.TraceOn(os.Stderr)
// Prepare source decryption key (here we assume same key to
// decrypt all source objects.)
decKey := minio.NewSSEInfo([]byte{1, 2, 3}, "")
// Source objects to concatenate. We also specify decryption
// key for each
src1 := minio.NewSourceInfo("bucket1", "object1", &decKey)
src1.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
src2 := minio.NewSourceInfo("bucket2", "object2", &decKey)
src2.SetMatchETagCond("f8ef9c385918b653a31624deb84149d2")
src3 := minio.NewSourceInfo("bucket3", "object3", &decKey)
src3.SetMatchETagCond("5918b653a31624deb84149d2f8ef9c38")
// Create slice of sources.
srcs := []minio.SourceInfo{src1, src2, src3}
// Prepare destination encryption key
encKey := minio.NewSSEInfo([]byte{8, 9, 0}, "")
// Create destination info
dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil)
if err != nil {
log.Fatalln(err)
}
err = s3Client.ComposeObject(dst, srcs)
if err != nil {
log.Fatalln(err)
}
log.Println("Composed object successfully.")
}

View File

@ -42,24 +42,31 @@ func main() {
// Enable trace.
// s3Client.TraceOn(os.Stderr)
// Source object
src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil)
// All following conditions are allowed and can be combined together.
// Set copy conditions.
var copyConds = minio.CopyConditions{}
// Set modified condition, copy object modified since 2014 April.
copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
// Set unmodified condition, copy object unmodified since 2014 April.
// copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
// src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
// Set matching ETag condition, copy object which matches the following ETag.
// copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
// src.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
// Set matching ETag except condition, copy object which does not match the following ETag.
// copyConds.SetMatchETagExcept("31624deb84149d2f8ef9c385918b653a")
// src.SetMatchETagExceptCond("31624deb84149d2f8ef9c385918b653a")
// Destination object
dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
if err != nil {
log.Fatalln(err)
}
// Initiate copy object.
err = s3Client.CopyObject("my-bucketname", "my-objectname", "/my-sourcebucketname/my-sourceobjectname", copyConds)
err = s3Client.CopyObject(dst, src)
if err != nil {
log.Fatalln(err)
}

View File

@ -55,7 +55,9 @@ func main() {
progress := pb.New64(objectInfo.Size)
progress.Start()
n, err := s3Client.PutObjectWithProgress("my-bucketname", "my-objectname-progress", reader, "application/octet-stream", progress)
n, err := s3Client.PutObjectWithProgress("my-bucketname", "my-objectname-progress", reader, map[string][]string{
"Content-Type": []string{"application/octet-stream"},
}, progress)
if err != nil {
log.Fatalln(err)
}

View File

@ -40,7 +40,7 @@ func main() {
}
// Enable S3 transfer accelerate endpoint.
s3Client.S3TransferAccelerate("s3-accelerate.amazonaws.com")
s3Client.SetS3TransferAccelerate("s3-accelerate.amazonaws.com")
object, err := os.Open("my-testfile")
if err != nil {

File diff suppressed because it is too large Load Diff

View File

@ -99,7 +99,7 @@ func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int
if sessionToken != "" {
req.Header.Set("X-Amz-Security-Token", sessionToken)
}
req.Header.Set("Content-Encoding", streamingEncoding)
req.Header.Add("Content-Encoding", streamingEncoding)
req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
// Set content length with streaming signature for each chunk included.
@ -254,7 +254,18 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
s.chunkBufLen = 0
for {
n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
if err == nil || err == io.ErrUnexpectedEOF {
// Usually we validate `err` first, but in this case
// we are validating n > 0 for the following reasons.
//
// 1. n > 0, err is one of io.EOF, nil (near end of stream)
// A Reader returning a non-zero number of bytes at the end
// of the input stream may return either err == EOF or err == nil
//
// 2. n == 0, err is io.EOF (actual end of stream)
//
// Callers should always process the n > 0 bytes returned
// before considering the error err.
if n1 > 0 {
s.chunkBufLen += n1
s.bytesRead += int64(n1)
@ -265,25 +276,26 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
s.signChunk(s.chunkBufLen)
break
}
}
if err != nil {
if err == io.EOF {
// No more data left in baseReader - last chunk.
// Done reading the last chunk from baseReader.
s.done = true
} else if err == io.EOF {
// No more data left in baseReader - last chunk.
// Done reading the last chunk from baseReader.
s.done = true
// bytes read from baseReader different than
// content length provided.
if s.bytesRead != s.contentLen {
return 0, io.ErrUnexpectedEOF
}
// bytes read from baseReader different than
// content length provided.
if s.bytesRead != s.contentLen {
return 0, io.ErrUnexpectedEOF
// Sign the chunk and write it to s.buf.
s.signChunk(0)
break
}
// Sign the chunk and write it to s.buf.
s.signChunk(0)
break
} else {
return 0, err
}
}
}
return s.buf.Read(buf)

View File

@ -1,60 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"io/ioutil"
"os"
"sync"
)
// tempFile - temporary file container.
type tempFile struct {
*os.File
mutex *sync.Mutex
}
// newTempFile returns a new temporary file, once closed it automatically deletes itself.
func newTempFile(prefix string) (*tempFile, error) {
// use platform specific temp directory.
file, err := ioutil.TempFile(os.TempDir(), prefix)
if err != nil {
return nil, err
}
return &tempFile{
File: file,
mutex: &sync.Mutex{},
}, nil
}
// Close - closer wrapper to close and remove temporary file.
func (t *tempFile) Close() error {
t.mutex.Lock()
defer t.mutex.Unlock()
if t.File != nil {
// Close the file.
if err := t.File.Close(); err != nil {
return err
}
// Remove file.
if err := os.Remove(t.File.Name()); err != nil {
return err
}
t.File = nil
}
return nil
}

View File

@ -64,11 +64,11 @@ func encodeResponse(response interface{}) []byte {
return bytesBuffer.Bytes()
}
// Convert string to bool and always return true if any error
// Convert string to bool and always return false if any error
func mustParseBool(str string) bool {
b, err := strconv.ParseBool(str)
if err != nil {
return true
return false
}
return b
}

View File

@ -0,0 +1,48 @@
// +build go1.7 go1.8
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"net"
"net/http"
"time"
)
// This default transport is similar to http.DefaultTransport
// but with additional DisableCompression:
var defaultMinioTransport http.RoundTripper = &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
// Set this value so that the underlying transport round-tripper
// doesn't try to auto decode the body of objects with
// content-encoding set to `gzip`.
//
// Refer:
// https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
DisableCompression: true,
}

View File

@ -0,0 +1,39 @@
// +build go1.5,!go1.6,!go1.7,!go1.8
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"net/http"
"time"
)
// This default transport is similar to http.DefaultTransport
// but with additional DisableCompression:
var defaultMinioTransport http.RoundTripper = &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSHandshakeTimeout: 10 * time.Second,
// Set this value so that the underlying transport round-tripper
// doesn't try to auto decode the body of objects with
// content-encoding set to `gzip`.
//
// Refer:
// https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
DisableCompression: true,
}

View File

@ -0,0 +1,40 @@
// +build go1.6,!go1.7,!go1.8
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"net/http"
"time"
)
// This default transport is similar to http.DefaultTransport
// but with additional DisableCompression:
var defaultMinioTransport http.RoundTripper = &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
// Set this value so that the underlying transport round-tripper
// doesn't try to auto decode the body of objects with
// content-encoding set to `gzip`.
//
// Refer:
// https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
DisableCompression: true,
}

View File

@ -122,7 +122,7 @@ func isValidEndpointURL(endpointURL url.URL) error {
if endpointURL.Path != "/" && endpointURL.Path != "" {
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
}
if strings.Contains(endpointURL.Host, ".amazonaws.com") {
if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") {
if !s3utils.IsAmazonEndpoint(endpointURL) {
return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
}

View File

@ -84,9 +84,9 @@ func TestGetEndpointURL(t *testing.T) {
{"s3.cn-north-1.amazonaws.com.cn", false, "http://s3.cn-north-1.amazonaws.com.cn", nil, true},
{"192.168.1.1:9000", false, "http://192.168.1.1:9000", nil, true},
{"192.168.1.1:9000", true, "https://192.168.1.1:9000", nil, true},
{"s3.amazonaws.com:443", true, "https://s3.amazonaws.com:443", nil, true},
{"13333.123123.-", true, "", ErrInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-")), false},
{"13333.123123.-", true, "", ErrInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-")), false},
{"s3.amazonaws.com:443", true, "", ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
{"storage.googleapis.com:4000", true, "", ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
{"s3.aamzza.-", true, "", ErrInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "s3.aamzza.-")), false},
{"", true, "", ErrInvalidArgument("Endpoint: does not follow ip address or domain name standards."), false},
@ -132,10 +132,11 @@ func TestIsValidEndpointURL(t *testing.T) {
{"https://s3-fips-us-gov-west-1.amazonaws.com", nil, true},
{"https://s3.amazonaws.com/", nil, true},
{"https://storage.googleapis.com/", nil, true},
{"https://z3.amazonaws.com", nil, true},
{"https://mybalancer.us-east-1.elb.amazonaws.com", nil, true},
{"192.168.1.1", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false},
{"https://amazon.googleapis.com/", ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
{"https://storage.googleapis.com/bucket/", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false},
{"https://z3.amazonaws.com", ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
{"https://s3.amazonaws.com/bucket/object", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false},
}