mirror of
https://github.com/octoleo/restic.git
synced 2024-12-23 11:28:54 +00:00
commit
2b1c6d3cf8
@ -91,7 +91,7 @@ func (env *TravisEnvironment) Prepare() error {
|
|||||||
"golang.org/x/tools/cmd/cover",
|
"golang.org/x/tools/cmd/cover",
|
||||||
"github.com/pierrre/gotestcover",
|
"github.com/pierrre/gotestcover",
|
||||||
"github.com/NebulousLabs/glyphcheck",
|
"github.com/NebulousLabs/glyphcheck",
|
||||||
"github.com/restic/rest-server",
|
"github.com/restic/rest-server/cmd/rest-server",
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, pkg := range pkgs {
|
for _, pkg := range pkgs {
|
||||||
|
2
vendor/manifest
vendored
2
vendor/manifest
vendored
@ -46,7 +46,7 @@
|
|||||||
{
|
{
|
||||||
"importpath": "github.com/minio/minio-go",
|
"importpath": "github.com/minio/minio-go",
|
||||||
"repository": "https://github.com/minio/minio-go",
|
"repository": "https://github.com/minio/minio-go",
|
||||||
"revision": "b752793c53c56d2d3f9002dc971e998e08335fc1",
|
"revision": "fe53a65ebc43b5d22626b29a19a3de81170e42d3",
|
||||||
"branch": "master"
|
"branch": "master"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -20,15 +20,17 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FGetObject - download contents of an object to a local file.
|
// FGetObject - download contents of an object to a local file.
|
||||||
func (c Client) FGetObject(bucketName, objectName, filePath string) error {
|
func (c Client) FGetObject(bucketName, objectName, filePath string) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,9 +26,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/minio-go/pkg/encrypt"
|
"github.com/minio/minio-go/pkg/encrypt"
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetEncryptedObject deciphers and streams data stored in the server after applying a specifed encryption materials,
|
// GetEncryptedObject deciphers and streams data stored in the server after applying a specified encryption materials,
|
||||||
// returned stream should be closed by the caller.
|
// returned stream should be closed by the caller.
|
||||||
func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) {
|
func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) {
|
||||||
if encryptMaterials == nil {
|
if encryptMaterials == nil {
|
||||||
@ -57,10 +58,10 @@ func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMateria
|
|||||||
// GetObject - returns an seekable, readable object.
|
// GetObject - returns an seekable, readable object.
|
||||||
func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -627,10 +628,10 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<-
|
|||||||
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
|
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
|
||||||
func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) {
|
func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) {
|
||||||
// Validate input arguments.
|
// Validate input arguments.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return nil, ObjectInfo{}, err
|
return nil, ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return nil, ObjectInfo{}, err
|
return nil, ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,15 +23,16 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/minio/minio-go/pkg/policy"
|
"github.com/minio/minio-go/pkg/policy"
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetBucketPolicy - get bucket policy at a given path.
|
// GetBucketPolicy - get bucket policy at a given path.
|
||||||
func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) {
|
func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return policy.BucketPolicyNone, err
|
return policy.BucketPolicyNone, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||||
return policy.BucketPolicyNone, err
|
return policy.BucketPolicyNone, err
|
||||||
}
|
}
|
||||||
policyInfo, err := c.getBucketPolicy(bucketName)
|
policyInfo, err := c.getBucketPolicy(bucketName)
|
||||||
@ -48,10 +49,10 @@ func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy p
|
|||||||
// ListBucketPolicies - list all policies for a given prefix and all its children.
|
// ListBucketPolicies - list all policies for a given prefix and all its children.
|
||||||
func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
|
func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return map[string]policy.BucketPolicy{}, err
|
return map[string]policy.BucketPolicy{}, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||||
return map[string]policy.BucketPolicy{}, err
|
return map[string]policy.BucketPolicy{}, err
|
||||||
}
|
}
|
||||||
policyInfo, err := c.getBucketPolicy(bucketName)
|
policyInfo, err := c.getBucketPolicy(bucketName)
|
||||||
|
46
vendor/src/github.com/minio/minio-go/api-list.go
vendored
46
vendor/src/github.com/minio/minio-go/api-list.go
vendored
@ -17,10 +17,13 @@
|
|||||||
package minio
|
package minio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ListBuckets list all buckets owned by this authenticated user.
|
// ListBuckets list all buckets owned by this authenticated user.
|
||||||
@ -84,18 +87,21 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
|
|||||||
// If recursive we do not delimit.
|
// If recursive we do not delimit.
|
||||||
delimiter = ""
|
delimiter = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return object owner information by default
|
// Return object owner information by default
|
||||||
fetchOwner := true
|
fetchOwner := true
|
||||||
|
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
defer close(objectStatCh)
|
defer close(objectStatCh)
|
||||||
objectStatCh <- ObjectInfo{
|
objectStatCh <- ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
}
|
||||||
return objectStatCh
|
return objectStatCh
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate incoming object prefix.
|
// Validate incoming object prefix.
|
||||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||||
defer close(objectStatCh)
|
defer close(objectStatCh)
|
||||||
objectStatCh <- ObjectInfo{
|
objectStatCh <- ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
@ -120,7 +126,6 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
|
|||||||
|
|
||||||
// If contents are available loop through and send over channel.
|
// If contents are available loop through and send over channel.
|
||||||
for _, object := range result.Contents {
|
for _, object := range result.Contents {
|
||||||
// Save the marker.
|
|
||||||
select {
|
select {
|
||||||
// Send object content.
|
// Send object content.
|
||||||
case objectStatCh <- object:
|
case objectStatCh <- object:
|
||||||
@ -133,12 +138,12 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
|
|||||||
// Send all common prefixes if any.
|
// Send all common prefixes if any.
|
||||||
// NOTE: prefixes are only present if the request is delimited.
|
// NOTE: prefixes are only present if the request is delimited.
|
||||||
for _, obj := range result.CommonPrefixes {
|
for _, obj := range result.CommonPrefixes {
|
||||||
object := ObjectInfo{}
|
|
||||||
object.Key = obj.Prefix
|
|
||||||
object.Size = 0
|
|
||||||
select {
|
select {
|
||||||
// Send object prefixes.
|
// Send object prefixes.
|
||||||
case objectStatCh <- object:
|
case objectStatCh <- ObjectInfo{
|
||||||
|
Key: obj.Prefix,
|
||||||
|
Size: 0,
|
||||||
|
}:
|
||||||
// If receives done from the caller, return here.
|
// If receives done from the caller, return here.
|
||||||
case <-doneCh:
|
case <-doneCh:
|
||||||
return
|
return
|
||||||
@ -170,11 +175,11 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
|
|||||||
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
||||||
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
|
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return ListBucketV2Result{}, err
|
return ListBucketV2Result{}, err
|
||||||
}
|
}
|
||||||
// Validate object prefix.
|
// Validate object prefix.
|
||||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||||
return ListBucketV2Result{}, err
|
return ListBucketV2Result{}, err
|
||||||
}
|
}
|
||||||
// Get resources properly escaped and lined up before
|
// Get resources properly escaped and lined up before
|
||||||
@ -227,10 +232,17 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s
|
|||||||
|
|
||||||
// Decode listBuckets XML.
|
// Decode listBuckets XML.
|
||||||
listBucketResult := ListBucketV2Result{}
|
listBucketResult := ListBucketV2Result{}
|
||||||
err = xmlDecoder(resp.Body, &listBucketResult)
|
if err = xmlDecoder(resp.Body, &listBucketResult); err != nil {
|
||||||
if err != nil {
|
|
||||||
return listBucketResult, err
|
return listBucketResult, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This is an additional verification check to make
|
||||||
|
// sure proper responses are received.
|
||||||
|
if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" {
|
||||||
|
return listBucketResult, errors.New("Truncated response should have continuation token set")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Success.
|
||||||
return listBucketResult, nil
|
return listBucketResult, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -266,7 +278,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
|
|||||||
delimiter = ""
|
delimiter = ""
|
||||||
}
|
}
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
defer close(objectStatCh)
|
defer close(objectStatCh)
|
||||||
objectStatCh <- ObjectInfo{
|
objectStatCh <- ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
@ -274,7 +286,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
|
|||||||
return objectStatCh
|
return objectStatCh
|
||||||
}
|
}
|
||||||
// Validate incoming object prefix.
|
// Validate incoming object prefix.
|
||||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||||
defer close(objectStatCh)
|
defer close(objectStatCh)
|
||||||
objectStatCh <- ObjectInfo{
|
objectStatCh <- ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
@ -350,11 +362,11 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
|
|||||||
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
||||||
func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) {
|
func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) {
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return ListBucketResult{}, err
|
return ListBucketResult{}, err
|
||||||
}
|
}
|
||||||
// Validate object prefix.
|
// Validate object prefix.
|
||||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||||
return ListBucketResult{}, err
|
return ListBucketResult{}, err
|
||||||
}
|
}
|
||||||
// Get resources properly escaped and lined up before
|
// Get resources properly escaped and lined up before
|
||||||
@ -442,7 +454,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
|
|||||||
delimiter = ""
|
delimiter = ""
|
||||||
}
|
}
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
defer close(objectMultipartStatCh)
|
defer close(objectMultipartStatCh)
|
||||||
objectMultipartStatCh <- ObjectMultipartInfo{
|
objectMultipartStatCh <- ObjectMultipartInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
@ -450,7 +462,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
|
|||||||
return objectMultipartStatCh
|
return objectMultipartStatCh
|
||||||
}
|
}
|
||||||
// Validate incoming object prefix.
|
// Validate incoming object prefix.
|
||||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||||
defer close(objectMultipartStatCh)
|
defer close(objectMultipartStatCh)
|
||||||
objectMultipartStatCh <- ObjectMultipartInfo{
|
objectMultipartStatCh <- ObjectMultipartInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
// GetBucketNotification - get bucket notification at a given path.
|
// GetBucketNotification - get bucket notification at a given path.
|
||||||
func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) {
|
func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return BucketNotification{}, err
|
return BucketNotification{}, err
|
||||||
}
|
}
|
||||||
notification, err := c.getBucketNotification(bucketName)
|
notification, err := c.getBucketNotification(bucketName)
|
||||||
@ -140,7 +140,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
|
|||||||
defer close(notificationInfoCh)
|
defer close(notificationInfoCh)
|
||||||
|
|
||||||
// Validate the bucket name.
|
// Validate the bucket name.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
notificationInfoCh <- NotificationInfo{
|
notificationInfoCh <- NotificationInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
}
|
||||||
@ -155,7 +155,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Continously run and listen on bucket notification.
|
// Continuously run and listen on bucket notification.
|
||||||
// Create a done channel to control 'ListObjects' go routine.
|
// Create a done channel to control 'ListObjects' go routine.
|
||||||
retryDoneCh := make(chan struct{}, 1)
|
retryDoneCh := make(chan struct{}, 1)
|
||||||
|
|
||||||
|
@ -42,10 +42,10 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
|
|||||||
if method == "" {
|
if method == "" {
|
||||||
return nil, ErrInvalidArgument("method cannot be empty.")
|
return nil, ErrInvalidArgument("method cannot be empty.")
|
||||||
}
|
}
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := isValidExpiry(expires); err != nil {
|
if err := isValidExpiry(expires); err != nil {
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/minio/minio-go/pkg/policy"
|
"github.com/minio/minio-go/pkg/policy"
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
/// Bucket operations
|
/// Bucket operations
|
||||||
@ -46,7 +47,7 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Validate the input arguments.
|
// Validate the input arguments.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,17 +60,6 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
|
|||||||
location = c.region
|
location = c.region
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try creating bucket with the provided region, in case of
|
|
||||||
// invalid region error let's guess the appropriate region
|
|
||||||
// from S3 API headers
|
|
||||||
|
|
||||||
// Create a done channel to control 'newRetryTimer' go routine.
|
|
||||||
doneCh := make(chan struct{}, 1)
|
|
||||||
|
|
||||||
// Indicate to our routine to exit cleanly upon return.
|
|
||||||
defer close(doneCh)
|
|
||||||
|
|
||||||
// PUT bucket request metadata.
|
// PUT bucket request metadata.
|
||||||
reqMetadata := requestMetadata{
|
reqMetadata := requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
@ -118,10 +108,10 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
|
|||||||
// writeonly - anonymous put/delete access to a given object prefix.
|
// writeonly - anonymous put/delete access to a given object prefix.
|
||||||
func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error {
|
func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,7 +140,7 @@ func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPo
|
|||||||
// Saves a new bucket policy.
|
// Saves a new bucket policy.
|
||||||
func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error {
|
func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,7 +186,7 @@ func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAcces
|
|||||||
// Removes all policies on a bucket.
|
// Removes all policies on a bucket.
|
||||||
func (c Client) removeBucketPolicy(bucketName string) error {
|
func (c Client) removeBucketPolicy(bucketName string) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Get resources properly escaped and lined up before
|
// Get resources properly escaped and lined up before
|
||||||
@ -220,7 +210,7 @@ func (c Client) removeBucketPolicy(bucketName string) error {
|
|||||||
// SetBucketNotification saves a new bucket notification.
|
// SetBucketNotification saves a new bucket notification.
|
||||||
func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
|
func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,6 +23,8 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Verify if reader is *os.File
|
// Verify if reader is *os.File
|
||||||
@ -43,23 +45,6 @@ func isReadAt(reader io.Reader) (ok bool) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldUploadPart - verify if part should be uploaded.
|
|
||||||
func shouldUploadPart(objPart ObjectPart, uploadReq uploadPartReq) bool {
|
|
||||||
// If part not found should upload the part.
|
|
||||||
if uploadReq.Part == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// if size mismatches should upload the part.
|
|
||||||
if objPart.Size != uploadReq.Part.Size {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// if md5sum mismatches should upload the part.
|
|
||||||
if objPart.ETag != uploadReq.Part.ETag {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// optimalPartInfo - calculate the optimal part info for a given
|
// optimalPartInfo - calculate the optimal part info for a given
|
||||||
// object size.
|
// object size.
|
||||||
//
|
//
|
||||||
@ -168,10 +153,10 @@ func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte,
|
|||||||
// or initiate a new request to fetch a new upload id.
|
// or initiate a new request to fetch a new upload id.
|
||||||
func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
|
func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -183,49 +168,6 @@ func (c Client) newUploadID(bucketName, objectName string, metaData map[string][
|
|||||||
return initMultipartUploadResult.UploadID, nil
|
return initMultipartUploadResult.UploadID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getMpartUploadSession returns the upload id and the uploaded parts to continue a previous upload session
|
|
||||||
// or initiate a new multipart session if no current one found
|
|
||||||
func (c Client) getMpartUploadSession(bucketName, objectName string, metaData map[string][]string) (string, map[int]ObjectPart, error) {
|
|
||||||
// A map of all uploaded parts.
|
|
||||||
var partsInfo map[int]ObjectPart
|
|
||||||
var err error
|
|
||||||
|
|
||||||
uploadID, err := c.findUploadID(bucketName, objectName)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if uploadID == "" {
|
|
||||||
// Initiates a new multipart request
|
|
||||||
uploadID, err = c.newUploadID(bucketName, objectName, metaData)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Fetch previously upload parts and maximum part size.
|
|
||||||
partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
|
|
||||||
if err != nil {
|
|
||||||
// When the server returns NoSuchUpload even if its previouls acknowleged the existance of the upload id,
|
|
||||||
// initiate a new multipart upload
|
|
||||||
if respErr, ok := err.(ErrorResponse); ok && respErr.Code == "NoSuchUpload" {
|
|
||||||
uploadID, err = c.newUploadID(bucketName, objectName, metaData)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate partsInfo if not done yet
|
|
||||||
if partsInfo == nil {
|
|
||||||
partsInfo = make(map[int]ObjectPart)
|
|
||||||
}
|
|
||||||
|
|
||||||
return uploadID, partsInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// computeHash - Calculates hashes for an input read Seeker.
|
// computeHash - Calculates hashes for an input read Seeker.
|
||||||
func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) {
|
func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) {
|
||||||
hashWriter := ioutil.Discard
|
hashWriter := ioutil.Discard
|
||||||
|
@ -25,10 +25,10 @@ import (
|
|||||||
// CopyObject - copy a source object into a new object with the provided name in the provided bucket
|
// CopyObject - copy a source object into a new object with the provided name in the provided bucket
|
||||||
func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
|
func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if objectSource == "" {
|
if objectSource == "" {
|
||||||
|
@ -17,11 +17,7 @@
|
|||||||
package minio
|
package minio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"mime"
|
"mime"
|
||||||
@ -35,10 +31,10 @@ import (
|
|||||||
// FPutObject - Create an object in a bucket, with contents from file at filePath.
|
// FPutObject - Create an object in a bucket, with contents from file at filePath.
|
||||||
func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) {
|
func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,22 +105,20 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
|
|||||||
// putObjectMultipartFromFile - Creates object from contents of *os.File
|
// putObjectMultipartFromFile - Creates object from contents of *os.File
|
||||||
//
|
//
|
||||||
// NOTE: This function is meant to be used for readers with local
|
// NOTE: This function is meant to be used for readers with local
|
||||||
// file as in *os.File. This function resumes by skipping all the
|
// file as in *os.File. This function effectively utilizes file
|
||||||
// necessary parts which were already uploaded by verifying them
|
// system capabilities of reading from specific sections and not
|
||||||
// against MD5SUM of each individual parts. This function also
|
// having to create temporary files.
|
||||||
// effectively utilizes file system capabilities of reading from
|
|
||||||
// specific sections and not having to create temporary files.
|
|
||||||
func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) {
|
func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
|
// Initiate a new multipart upload.
|
||||||
uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
|
uploadID, err := c.newUploadID(bucketName, objectName, metaData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -152,6 +146,9 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
|
|||||||
// Just for readability.
|
// Just for readability.
|
||||||
lastPartNumber := totalPartsCount
|
lastPartNumber := totalPartsCount
|
||||||
|
|
||||||
|
// Initialize parts uploaded map.
|
||||||
|
partsInfo := make(map[int]ObjectPart)
|
||||||
|
|
||||||
// Send each part through the partUploadCh to be uploaded.
|
// Send each part through the partUploadCh to be uploaded.
|
||||||
for p := 1; p <= totalPartsCount; p++ {
|
for p := 1; p <= totalPartsCount; p++ {
|
||||||
part, ok := partsInfo[p]
|
part, ok := partsInfo[p]
|
||||||
@ -170,12 +167,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
|
|||||||
for uploadReq := range uploadPartsCh {
|
for uploadReq := range uploadPartsCh {
|
||||||
// Add hash algorithms that need to be calculated by computeHash()
|
// Add hash algorithms that need to be calculated by computeHash()
|
||||||
// In case of a non-v4 signature or https connection, sha256 is not needed.
|
// In case of a non-v4 signature or https connection, sha256 is not needed.
|
||||||
hashAlgos := make(map[string]hash.Hash)
|
hashAlgos, hashSums := c.hashMaterials()
|
||||||
hashSums := make(map[string][]byte)
|
|
||||||
hashAlgos["md5"] = md5.New()
|
|
||||||
if c.overrideSignerType.IsV4() && !c.secure {
|
|
||||||
hashAlgos["sha256"] = sha256.New()
|
|
||||||
}
|
|
||||||
|
|
||||||
// If partNumber was not uploaded we calculate the missing
|
// If partNumber was not uploaded we calculate the missing
|
||||||
// part offset and size. For all other part numbers we
|
// part offset and size. For all other part numbers we
|
||||||
@ -204,36 +196,24 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the part to be uploaded.
|
// Proceed to upload the part.
|
||||||
verifyObjPart := ObjectPart{
|
var objPart ObjectPart
|
||||||
ETag: hex.EncodeToString(hashSums["md5"]),
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum,
|
||||||
PartNumber: uploadReq.PartNum,
|
hashSums["md5"], hashSums["sha256"], prtSize)
|
||||||
Size: partSize,
|
if err != nil {
|
||||||
}
|
uploadedPartsCh <- uploadedPartRes{
|
||||||
|
Error: err,
|
||||||
// If this is the last part do not give it the full part size.
|
|
||||||
if uploadReq.PartNum == lastPartNumber {
|
|
||||||
verifyObjPart.Size = lastPartSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify if part should be uploaded.
|
|
||||||
if shouldUploadPart(verifyObjPart, uploadReq) {
|
|
||||||
// Proceed to upload the part.
|
|
||||||
var objPart ObjectPart
|
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
|
|
||||||
if err != nil {
|
|
||||||
uploadedPartsCh <- uploadedPartRes{
|
|
||||||
Error: err,
|
|
||||||
}
|
|
||||||
// Exit the goroutine.
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
// Save successfully uploaded part metadata.
|
// Exit the goroutine.
|
||||||
uploadReq.Part = &objPart
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Save successfully uploaded part metadata.
|
||||||
|
uploadReq.Part = &objPart
|
||||||
|
|
||||||
// Return through the channel the part size.
|
// Return through the channel the part size.
|
||||||
uploadedPartsCh <- uploadedPartRes{
|
uploadedPartsCh <- uploadedPartRes{
|
||||||
Size: verifyObjPart.Size,
|
Size: missingPartSize,
|
||||||
PartNum: uploadReq.PartNum,
|
PartNum: uploadReq.PartNum,
|
||||||
Part: uploadReq.Part,
|
Part: uploadReq.Part,
|
||||||
Error: nil,
|
Error: nil,
|
||||||
|
@ -18,12 +18,8 @@ package minio
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/md5"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -32,9 +28,11 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Comprehensive put object operation involving multipart resumable uploads.
|
// Comprehensive put object operation involving multipart uploads.
|
||||||
//
|
//
|
||||||
// Following code handles these types of readers.
|
// Following code handles these types of readers.
|
||||||
//
|
//
|
||||||
@ -42,9 +40,6 @@ import (
|
|||||||
// - *minio.Object
|
// - *minio.Object
|
||||||
// - Any reader which has a method 'ReadAt()'
|
// - Any reader which has a method 'ReadAt()'
|
||||||
//
|
//
|
||||||
// If we exhaust all the known types, code proceeds to use stream as
|
|
||||||
// is where each part is re-downloaded, checksummed and verified
|
|
||||||
// before upload.
|
|
||||||
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
if size > 0 && size > minPartSize {
|
if size > 0 && size > minPartSize {
|
||||||
// Verify if reader is *os.File, then use file system functionalities.
|
// Verify if reader is *os.File, then use file system functionalities.
|
||||||
@ -68,31 +63,22 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read
|
|||||||
|
|
||||||
// putObjectMultipartStreamNoChecksum - upload a large object using
|
// putObjectMultipartStreamNoChecksum - upload a large object using
|
||||||
// multipart upload and streaming signature for signing payload.
|
// multipart upload and streaming signature for signing payload.
|
||||||
// N B We don't resume an incomplete multipart upload, we overwrite
|
|
||||||
// existing parts of an incomplete upload.
|
|
||||||
func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
|
func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
|
||||||
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) {
|
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) {
|
||||||
|
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
|
// Initiates a new multipart request
|
||||||
uploadID, err := c.findUploadID(bucketName, objectName)
|
uploadID, err := c.newUploadID(bucketName, objectName, metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if uploadID == "" {
|
|
||||||
// Initiates a new multipart request
|
|
||||||
uploadID, err = c.newUploadID(bucketName, objectName, metadata)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate the optimal parts info for a given size.
|
// Calculate the optimal parts info for a given size.
|
||||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
|
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
|
||||||
@ -176,10 +162,10 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
|
|||||||
// special case where size is unknown i.e '-1'.
|
// special case where size is unknown i.e '-1'.
|
||||||
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -189,8 +175,8 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
|||||||
// Complete multipart upload.
|
// Complete multipart upload.
|
||||||
var complMultipartUpload completeMultipartUpload
|
var complMultipartUpload completeMultipartUpload
|
||||||
|
|
||||||
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
|
// Initiate a new multipart upload.
|
||||||
uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
|
uploadID, err := c.newUploadID(bucketName, objectName, metaData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -207,15 +193,13 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
|||||||
// Initialize a temporary buffer.
|
// Initialize a temporary buffer.
|
||||||
tmpBuffer := new(bytes.Buffer)
|
tmpBuffer := new(bytes.Buffer)
|
||||||
|
|
||||||
|
// Initialize parts uploaded map.
|
||||||
|
partsInfo := make(map[int]ObjectPart)
|
||||||
|
|
||||||
for partNumber <= totalPartsCount {
|
for partNumber <= totalPartsCount {
|
||||||
// Choose hash algorithms to be calculated by hashCopyN, avoid sha256
|
// Choose hash algorithms to be calculated by hashCopyN, avoid sha256
|
||||||
// with non-v4 signature request or HTTPS connection
|
// with non-v4 signature request or HTTPS connection
|
||||||
hashSums := make(map[string][]byte)
|
hashAlgos, hashSums := c.hashMaterials()
|
||||||
hashAlgos := make(map[string]hash.Hash)
|
|
||||||
hashAlgos["md5"] = md5.New()
|
|
||||||
if c.overrideSignerType.IsV4() && !c.secure {
|
|
||||||
hashAlgos["sha256"] = sha256.New()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculates hash sums while copying partSize bytes into tmpBuffer.
|
// Calculates hash sums while copying partSize bytes into tmpBuffer.
|
||||||
prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize)
|
prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize)
|
||||||
@ -228,31 +212,23 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
|||||||
// as we read from the source.
|
// as we read from the source.
|
||||||
reader = newHook(tmpBuffer, progress)
|
reader = newHook(tmpBuffer, progress)
|
||||||
|
|
||||||
part, ok := partsInfo[partNumber]
|
// Proceed to upload the part.
|
||||||
|
var objPart ObjectPart
|
||||||
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
|
||||||
|
if err != nil {
|
||||||
|
// Reset the temporary buffer upon any error.
|
||||||
|
tmpBuffer.Reset()
|
||||||
|
return totalUploadedSize, err
|
||||||
|
}
|
||||||
|
|
||||||
// Verify if part should be uploaded.
|
// Save successfully uploaded part metadata.
|
||||||
if !ok || shouldUploadPart(ObjectPart{
|
partsInfo[partNumber] = objPart
|
||||||
ETag: hex.EncodeToString(hashSums["md5"]),
|
|
||||||
PartNumber: partNumber,
|
// Update the progress reader for the skipped part.
|
||||||
Size: prtSize,
|
if progress != nil {
|
||||||
}, uploadPartReq{PartNum: partNumber, Part: &part}) {
|
if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
|
||||||
// Proceed to upload the part.
|
|
||||||
var objPart ObjectPart
|
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
|
|
||||||
if err != nil {
|
|
||||||
// Reset the temporary buffer upon any error.
|
|
||||||
tmpBuffer.Reset()
|
|
||||||
return totalUploadedSize, err
|
return totalUploadedSize, err
|
||||||
}
|
}
|
||||||
// Save successfully uploaded part metadata.
|
|
||||||
partsInfo[partNumber] = objPart
|
|
||||||
} else {
|
|
||||||
// Update the progress reader for the skipped part.
|
|
||||||
if progress != nil {
|
|
||||||
if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
|
|
||||||
return totalUploadedSize, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset the temporary buffer.
|
// Reset the temporary buffer.
|
||||||
@ -305,10 +281,10 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
|||||||
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
|
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
|
||||||
func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) {
|
func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return initiateMultipartUploadResult{}, err
|
return initiateMultipartUploadResult{}, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return initiateMultipartUploadResult{}, err
|
return initiateMultipartUploadResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -359,10 +335,10 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData
|
|||||||
// uploadPart - Uploads a part in a multipart upload.
|
// uploadPart - Uploads a part in a multipart upload.
|
||||||
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (ObjectPart, error) {
|
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (ObjectPart, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return ObjectPart{}, err
|
return ObjectPart{}, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return ObjectPart{}, err
|
return ObjectPart{}, err
|
||||||
}
|
}
|
||||||
if size > maxPartSize {
|
if size > maxPartSize {
|
||||||
@ -419,10 +395,10 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
|
|||||||
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
|
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
|
||||||
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
|
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return completeMultipartUploadResult{}, err
|
return completeMultipartUploadResult{}, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return completeMultipartUploadResult{}, err
|
return completeMultipartUploadResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,10 +58,10 @@ func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Read
|
|||||||
// PutObjectWithMetadata - with metadata.
|
// PutObjectWithMetadata - with metadata.
|
||||||
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if reader == nil {
|
if reader == nil {
|
||||||
|
@ -18,13 +18,12 @@ package minio
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/md5"
|
|
||||||
"crypto/sha256"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// uploadedPartRes - the response received from a part upload.
|
// uploadedPartRes - the response received from a part upload.
|
||||||
@ -40,19 +39,6 @@ type uploadPartReq struct {
|
|||||||
Part *ObjectPart // Size of the part uploaded.
|
Part *ObjectPart // Size of the part uploaded.
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldUploadPartReadAt - verify if part should be uploaded.
|
|
||||||
func shouldUploadPartReadAt(objPart ObjectPart, uploadReq uploadPartReq) bool {
|
|
||||||
// If part not found part should be uploaded.
|
|
||||||
if uploadReq.Part == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// if size mismatches part should be uploaded.
|
|
||||||
if uploadReq.Part.Size != objPart.Size {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader
|
// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader
|
||||||
// of type which implements io.ReaderAt interface (ReadAt method).
|
// of type which implements io.ReaderAt interface (ReadAt method).
|
||||||
//
|
//
|
||||||
@ -65,15 +51,15 @@ func shouldUploadPartReadAt(objPart ObjectPart, uploadReq uploadPartReq) bool {
|
|||||||
// stream after uploading all the contents successfully.
|
// stream after uploading all the contents successfully.
|
||||||
func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
|
// Initiate a new multipart upload.
|
||||||
uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
|
uploadID, err := c.newUploadID(bucketName, objectName, metaData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -90,9 +76,6 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used for readability, lastPartNumber is always totalPartsCount.
|
|
||||||
lastPartNumber := totalPartsCount
|
|
||||||
|
|
||||||
// Declare a channel that sends the next part number to be uploaded.
|
// Declare a channel that sends the next part number to be uploaded.
|
||||||
// Buffered to 10000 because thats the maximum number of parts allowed
|
// Buffered to 10000 because thats the maximum number of parts allowed
|
||||||
// by S3.
|
// by S3.
|
||||||
@ -103,6 +86,12 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
|||||||
// by S3.
|
// by S3.
|
||||||
uploadedPartsCh := make(chan uploadedPartRes, 10000)
|
uploadedPartsCh := make(chan uploadedPartRes, 10000)
|
||||||
|
|
||||||
|
// Used for readability, lastPartNumber is always totalPartsCount.
|
||||||
|
lastPartNumber := totalPartsCount
|
||||||
|
|
||||||
|
// Initialize parts uploaded map.
|
||||||
|
partsInfo := make(map[int]ObjectPart)
|
||||||
|
|
||||||
// Send each part number to the channel to be processed.
|
// Send each part number to the channel to be processed.
|
||||||
for p := 1; p <= totalPartsCount; p++ {
|
for p := 1; p <= totalPartsCount; p++ {
|
||||||
part, ok := partsInfo[p]
|
part, ok := partsInfo[p]
|
||||||
@ -143,12 +132,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
|||||||
|
|
||||||
// Choose the needed hash algorithms to be calculated by hashCopyBuffer.
|
// Choose the needed hash algorithms to be calculated by hashCopyBuffer.
|
||||||
// Sha256 is avoided in non-v4 signature requests or HTTPS connections
|
// Sha256 is avoided in non-v4 signature requests or HTTPS connections
|
||||||
hashSums := make(map[string][]byte)
|
hashAlgos, hashSums := c.hashMaterials()
|
||||||
hashAlgos := make(map[string]hash.Hash)
|
|
||||||
hashAlgos["md5"] = md5.New()
|
|
||||||
if c.overrideSignerType.IsV4() && !c.secure {
|
|
||||||
hashAlgos["sha256"] = sha256.New()
|
|
||||||
}
|
|
||||||
|
|
||||||
var prtSize int64
|
var prtSize int64
|
||||||
var err error
|
var err error
|
||||||
@ -163,37 +147,25 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify object if its uploaded.
|
// Proceed to upload the part.
|
||||||
verifyObjPart := ObjectPart{
|
var objPart ObjectPart
|
||||||
PartNumber: uploadReq.PartNum,
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer,
|
||||||
Size: partSize,
|
uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
|
||||||
}
|
if err != nil {
|
||||||
// Special case if we see a last part number, save last part
|
uploadedPartsCh <- uploadedPartRes{
|
||||||
// size as the proper part size.
|
Size: 0,
|
||||||
if uploadReq.PartNum == lastPartNumber {
|
Error: err,
|
||||||
verifyObjPart.Size = lastPartSize
|
}
|
||||||
|
// Exit the goroutine.
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only upload the necessary parts. Otherwise return size through channel
|
// Save successfully uploaded part metadata.
|
||||||
// to update any progress bar.
|
uploadReq.Part = &objPart
|
||||||
if shouldUploadPartReadAt(verifyObjPart, uploadReq) {
|
|
||||||
// Proceed to upload the part.
|
|
||||||
var objPart ObjectPart
|
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
|
|
||||||
if err != nil {
|
|
||||||
uploadedPartsCh <- uploadedPartRes{
|
|
||||||
Size: 0,
|
|
||||||
Error: err,
|
|
||||||
}
|
|
||||||
// Exit the goroutine.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Save successfully uploaded part metadata.
|
|
||||||
uploadReq.Part = &objPart
|
|
||||||
}
|
|
||||||
// Send successful part info through the channel.
|
// Send successful part info through the channel.
|
||||||
uploadedPartsCh <- uploadedPartRes{
|
uploadedPartsCh <- uploadedPartRes{
|
||||||
Size: verifyObjPart.Size,
|
Size: missingPartSize,
|
||||||
PartNum: uploadReq.PartNum,
|
PartNum: uploadReq.PartNum,
|
||||||
Part: uploadReq.Part,
|
Part: uploadReq.Part,
|
||||||
Error: nil,
|
Error: nil,
|
||||||
|
@ -17,9 +17,6 @@
|
|||||||
package minio
|
package minio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
|
||||||
"crypto/sha256"
|
|
||||||
"hash"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -27,6 +24,8 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// toInt - converts go value to its integer representation based
|
// toInt - converts go value to its integer representation based
|
||||||
@ -144,14 +143,13 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
|
|||||||
//
|
//
|
||||||
// You must have WRITE permissions on a bucket to create an object.
|
// You must have WRITE permissions on a bucket to create an object.
|
||||||
//
|
//
|
||||||
// - For size smaller than 5MiB PutObject automatically does a single atomic Put operation.
|
// - For size smaller than 64MiB PutObject automatically does a single atomic Put operation.
|
||||||
// - For size larger than 5MiB PutObject automatically does a resumable multipart Put operation.
|
// - For size larger than 64MiB PutObject automatically does a multipart Put operation.
|
||||||
// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF.
|
// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF.
|
||||||
// Maximum object size that can be uploaded through this operation will be 5TiB.
|
// Maximum object size that can be uploaded through this operation will be 5TiB.
|
||||||
//
|
//
|
||||||
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
|
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
|
||||||
// So we fall back to single PUT operation with the maximum limit of 5GiB.
|
// So we fall back to single PUT operation with the maximum limit of 5GiB.
|
||||||
//
|
|
||||||
func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
|
func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
|
||||||
return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil)
|
return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil)
|
||||||
}
|
}
|
||||||
@ -160,10 +158,10 @@ func (c Client) PutObject(bucketName, objectName string, reader io.Reader, conte
|
|||||||
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
|
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
|
||||||
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if size > 0 {
|
if size > 0 {
|
||||||
@ -193,10 +191,10 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
|
|||||||
// This special function is used as a fallback when multipart upload fails.
|
// This special function is used as a fallback when multipart upload fails.
|
||||||
func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if size > maxSinglePutObjectSize {
|
if size > maxSinglePutObjectSize {
|
||||||
@ -209,12 +207,7 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
|
|||||||
|
|
||||||
// Add the appropriate hash algorithms that need to be calculated by hashCopyN
|
// Add the appropriate hash algorithms that need to be calculated by hashCopyN
|
||||||
// In case of non-v4 signature request or HTTPS connection, sha256 is not needed.
|
// In case of non-v4 signature request or HTTPS connection, sha256 is not needed.
|
||||||
hashAlgos := make(map[string]hash.Hash)
|
hashAlgos, hashSums := c.hashMaterials()
|
||||||
hashSums := make(map[string][]byte)
|
|
||||||
hashAlgos["md5"] = md5.New()
|
|
||||||
if c.overrideSignerType.IsV4() && !c.secure {
|
|
||||||
hashAlgos["sha256"] = sha256.New()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize a new temporary file.
|
// Initialize a new temporary file.
|
||||||
tmpFile, err := newTempFile("single$-putobject-single")
|
tmpFile, err := newTempFile("single$-putobject-single")
|
||||||
@ -256,10 +249,10 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
|
|||||||
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
|
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
|
||||||
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
|
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,6 +22,8 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RemoveBucket deletes the bucket name.
|
// RemoveBucket deletes the bucket name.
|
||||||
@ -30,7 +32,7 @@ import (
|
|||||||
// in the bucket must be deleted before successfully attempting this request.
|
// in the bucket must be deleted before successfully attempting this request.
|
||||||
func (c Client) RemoveBucket(bucketName string) error {
|
func (c Client) RemoveBucket(bucketName string) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Execute DELETE on bucket.
|
// Execute DELETE on bucket.
|
||||||
@ -57,10 +59,10 @@ func (c Client) RemoveBucket(bucketName string) error {
|
|||||||
// RemoveObject remove an object from a bucket.
|
// RemoveObject remove an object from a bucket.
|
||||||
func (c Client) RemoveObject(bucketName, objectName string) error {
|
func (c Client) RemoveObject(bucketName, objectName string) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Execute DELETE on objectName.
|
// Execute DELETE on objectName.
|
||||||
@ -132,7 +134,7 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
|
|||||||
errorCh := make(chan RemoveObjectError, 1)
|
errorCh := make(chan RemoveObjectError, 1)
|
||||||
|
|
||||||
// Validate if bucket name is valid.
|
// Validate if bucket name is valid.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
defer close(errorCh)
|
defer close(errorCh)
|
||||||
errorCh <- RemoveObjectError{
|
errorCh <- RemoveObjectError{
|
||||||
Err: err,
|
Err: err,
|
||||||
@ -174,7 +176,7 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
// Multi Objects Delete API doesn't accept empty object list, quit immediatly
|
// Multi Objects Delete API doesn't accept empty object list, quit immediately
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if count < maxEntries {
|
if count < maxEntries {
|
||||||
@ -212,10 +214,10 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
|
|||||||
// RemoveIncompleteUpload aborts an partially uploaded object.
|
// RemoveIncompleteUpload aborts an partially uploaded object.
|
||||||
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
|
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Find multipart upload id of the object to be aborted.
|
// Find multipart upload id of the object to be aborted.
|
||||||
@ -237,10 +239,10 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
|
|||||||
// uploadID, all previously uploaded parts are deleted.
|
// uploadID, all previously uploaded parts are deleted.
|
||||||
func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
|
func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,8 +36,8 @@ type owner struct {
|
|||||||
ID string
|
ID string
|
||||||
}
|
}
|
||||||
|
|
||||||
// commonPrefix container for prefix response.
|
// CommonPrefix container for prefix response.
|
||||||
type commonPrefix struct {
|
type CommonPrefix struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,7 +45,7 @@ type commonPrefix struct {
|
|||||||
type ListBucketV2Result struct {
|
type ListBucketV2Result struct {
|
||||||
// A response can contain CommonPrefixes only if you have
|
// A response can contain CommonPrefixes only if you have
|
||||||
// specified a delimiter.
|
// specified a delimiter.
|
||||||
CommonPrefixes []commonPrefix
|
CommonPrefixes []CommonPrefix
|
||||||
// Metadata about each object returned.
|
// Metadata about each object returned.
|
||||||
Contents []ObjectInfo
|
Contents []ObjectInfo
|
||||||
Delimiter string
|
Delimiter string
|
||||||
@ -74,7 +74,7 @@ type ListBucketV2Result struct {
|
|||||||
type ListBucketResult struct {
|
type ListBucketResult struct {
|
||||||
// A response can contain CommonPrefixes only if you have
|
// A response can contain CommonPrefixes only if you have
|
||||||
// specified a delimiter.
|
// specified a delimiter.
|
||||||
CommonPrefixes []commonPrefix
|
CommonPrefixes []CommonPrefix
|
||||||
// Metadata about each object returned.
|
// Metadata about each object returned.
|
||||||
Contents []ObjectInfo
|
Contents []ObjectInfo
|
||||||
Delimiter string
|
Delimiter string
|
||||||
@ -116,7 +116,7 @@ type ListMultipartUploadsResult struct {
|
|||||||
Prefix string
|
Prefix string
|
||||||
Delimiter string
|
Delimiter string
|
||||||
// A response can contain CommonPrefixes only if you specify a delimiter.
|
// A response can contain CommonPrefixes only if you specify a delimiter.
|
||||||
CommonPrefixes []commonPrefix
|
CommonPrefixes []CommonPrefix
|
||||||
}
|
}
|
||||||
|
|
||||||
// initiator container for who initiated multipart upload.
|
// initiator container for who initiated multipart upload.
|
||||||
|
14
vendor/src/github.com/minio/minio-go/api-stat.go
vendored
14
vendor/src/github.com/minio/minio-go/api-stat.go
vendored
@ -21,12 +21,14 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BucketExists verify if bucket exists and you have permission to access it.
|
// BucketExists verify if bucket exists and you have permission to access it.
|
||||||
func (c Client) BucketExists(bucketName string) (bool, error) {
|
func (c Client) BucketExists(bucketName string) (bool, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,11 +55,13 @@ func (c Client) BucketExists(bucketName string) (bool, error) {
|
|||||||
// List of header keys to be filtered, usually
|
// List of header keys to be filtered, usually
|
||||||
// from all S3 API http responses.
|
// from all S3 API http responses.
|
||||||
var defaultFilterKeys = []string{
|
var defaultFilterKeys = []string{
|
||||||
|
"Connection",
|
||||||
"Transfer-Encoding",
|
"Transfer-Encoding",
|
||||||
"Accept-Ranges",
|
"Accept-Ranges",
|
||||||
"Date",
|
"Date",
|
||||||
"Server",
|
"Server",
|
||||||
"Vary",
|
"Vary",
|
||||||
|
"x-amz-bucket-region",
|
||||||
"x-amz-request-id",
|
"x-amz-request-id",
|
||||||
"x-amz-id-2",
|
"x-amz-id-2",
|
||||||
// Add new headers to be ignored.
|
// Add new headers to be ignored.
|
||||||
@ -78,10 +82,10 @@ func extractObjMetadata(header http.Header) http.Header {
|
|||||||
// StatObject verifies if object exists and you have permission to access.
|
// StatObject verifies if object exists and you have permission to access.
|
||||||
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
reqHeaders := NewHeadReqHeaders()
|
reqHeaders := NewHeadReqHeaders()
|
||||||
@ -91,10 +95,10 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
|||||||
// Lower level API for statObject supporting pre-conditions and range headers.
|
// Lower level API for statObject supporting pre-conditions and range headers.
|
||||||
func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) {
|
func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
69
vendor/src/github.com/minio/minio-go/api.go
vendored
69
vendor/src/github.com/minio/minio-go/api.go
vendored
@ -19,10 +19,13 @@ package minio
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/md5"
|
||||||
|
"crypto/sha256"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -289,6 +292,29 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Hash materials provides relevant initialized hash algo writers
|
||||||
|
// based on the expected signature type.
|
||||||
|
//
|
||||||
|
// - For signature v4 request if the connection is insecure compute only sha256.
|
||||||
|
// - For signature v4 request if the connection is secure compute only md5.
|
||||||
|
// - For anonymous request compute md5.
|
||||||
|
func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) {
|
||||||
|
hashSums = make(map[string][]byte)
|
||||||
|
hashAlgos = make(map[string]hash.Hash)
|
||||||
|
if c.overrideSignerType.IsV4() {
|
||||||
|
if c.secure {
|
||||||
|
hashAlgos["md5"] = md5.New()
|
||||||
|
} else {
|
||||||
|
hashAlgos["sha256"] = sha256.New()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if c.overrideSignerType.IsAnonymous() {
|
||||||
|
hashAlgos["md5"] = md5.New()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return hashAlgos, hashSums
|
||||||
|
}
|
||||||
|
|
||||||
// requestMetadata - is container for all the values to make a request.
|
// requestMetadata - is container for all the values to make a request.
|
||||||
type requestMetadata struct {
|
type requestMetadata struct {
|
||||||
// If set newRequest presigns the URL.
|
// If set newRequest presigns the URL.
|
||||||
@ -450,6 +476,13 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
|
|||||||
case os.Stdin, os.Stdout, os.Stderr:
|
case os.Stdin, os.Stdout, os.Stderr:
|
||||||
isRetryable = false
|
isRetryable = false
|
||||||
}
|
}
|
||||||
|
// Figure out if the body can be closed - if yes
|
||||||
|
// we will definitely close it upon the function
|
||||||
|
// return.
|
||||||
|
bodyCloser, ok := metadata.contentBody.(io.Closer)
|
||||||
|
if ok {
|
||||||
|
defer bodyCloser.Close()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a done channel to control 'newRetryTimer' go routine.
|
// Create a done channel to control 'newRetryTimer' go routine.
|
||||||
@ -558,15 +591,23 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
|||||||
method = "POST"
|
method = "POST"
|
||||||
}
|
}
|
||||||
|
|
||||||
var location string
|
location := metadata.bucketLocation
|
||||||
// Gather location only if bucketName is present.
|
if location == "" {
|
||||||
if metadata.bucketName != "" && metadata.bucketLocation == "" {
|
if metadata.bucketName != "" {
|
||||||
location, err = c.getBucketLocation(metadata.bucketName)
|
// Gather location only if bucketName is present.
|
||||||
if err != nil {
|
location, err = c.getBucketLocation(metadata.bucketName)
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
if ToErrorResponse(err).Code != "AccessDenied" {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Upon AccessDenied error on fetching bucket location, default
|
||||||
|
// to possible locations based on endpoint URL. This can usually
|
||||||
|
// happen when GetBucketLocation() is disabled using IAM policies.
|
||||||
|
}
|
||||||
|
if location == "" {
|
||||||
|
location = getDefaultLocation(c.endpointURL, c.region)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
location = metadata.bucketLocation
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct a new target URL.
|
// Construct a new target URL.
|
||||||
@ -576,7 +617,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Initialize a new HTTP request for the method.
|
// Initialize a new HTTP request for the method.
|
||||||
req, err = http.NewRequest(method, targetURL.String(), metadata.contentBody)
|
req, err = http.NewRequest(method, targetURL.String(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -628,6 +669,16 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
|||||||
req.Header.Set(k, v[0])
|
req.Header.Set(k, v[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Go net/http notoriously closes the request body.
|
||||||
|
// - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
|
||||||
|
// This can cause underlying *os.File seekers to fail, avoid that
|
||||||
|
// by making sure to wrap the closer as a nop.
|
||||||
|
if metadata.contentLength == 0 {
|
||||||
|
req.Body = nil
|
||||||
|
} else {
|
||||||
|
req.Body = ioutil.NopCloser(metadata.contentBody)
|
||||||
|
}
|
||||||
|
|
||||||
// Set incoming content-length.
|
// Set incoming content-length.
|
||||||
req.ContentLength = metadata.contentLength
|
req.ContentLength = metadata.contentLength
|
||||||
if req.ContentLength <= -1 {
|
if req.ContentLength <= -1 {
|
||||||
|
@ -36,7 +36,7 @@ func TestMakeBucketErrorV2(t *testing.T) {
|
|||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping functional tests for short runs")
|
t.Skip("skipping functional tests for short runs")
|
||||||
}
|
}
|
||||||
if os.Getenv("S3_ADDRESS") != "s3.amazonaws.com" {
|
if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
|
||||||
t.Skip("skipping region functional tests for non s3 runs")
|
t.Skip("skipping region functional tests for non s3 runs")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,10 +45,10 @@ func TestMakeBucketErrorV2(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -91,10 +91,10 @@ func TestGetObjectClosedTwiceV2(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -171,10 +171,10 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -229,119 +229,6 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests resumable put object cloud to cloud.
|
|
||||||
func TestResumablePutObjectV2(t *testing.T) {
|
|
||||||
// By passing 'go test -short' skips these tests.
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping functional tests for the short runs")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seed random based on current time.
|
|
||||||
rand.Seed(time.Now().Unix())
|
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
|
||||||
c, err := NewV2(
|
|
||||||
os.Getenv("S3_ADDRESS"),
|
|
||||||
os.Getenv("ACCESS_KEY"),
|
|
||||||
os.Getenv("SECRET_KEY"),
|
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set user agent.
|
|
||||||
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
|
||||||
|
|
||||||
// Enable tracing, write to stdout.
|
|
||||||
// c.TraceOn(os.Stderr)
|
|
||||||
|
|
||||||
// Generate a new random bucket name.
|
|
||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
|
|
||||||
|
|
||||||
// Make a new bucket.
|
|
||||||
err = c.MakeBucket(bucketName, "us-east-1")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err, bucketName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a temporary file.
|
|
||||||
file, err := ioutil.TempFile(os.TempDir(), "resumable")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
|
|
||||||
// Copy 11MiB worth of random data.
|
|
||||||
n, err := io.CopyN(file, r, 11*1024*1024)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if n != int64(11*1024*1024) {
|
|
||||||
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the file pro-actively for windows.
|
|
||||||
if err = file.Close(); err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New object name.
|
|
||||||
objectName := bucketName + "-resumable"
|
|
||||||
|
|
||||||
// Upload the file.
|
|
||||||
n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if n != int64(11*1024*1024) {
|
|
||||||
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the uploaded object.
|
|
||||||
reader, err := c.GetObject(bucketName, objectName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upload now cloud to cloud.
|
|
||||||
n, err = c.PutObject(bucketName, objectName+"-put", reader, "application/octest-stream")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get object info.
|
|
||||||
objInfo, err := reader.Stat()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if n != objInfo.Size {
|
|
||||||
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", objInfo.Size, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove all temp files, objects and bucket.
|
|
||||||
err = c.RemoveObject(bucketName, objectName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.RemoveObject(bucketName, objectName+"-put")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.RemoveBucket(bucketName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.Remove(file.Name())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests FPutObject hidden contentType setting
|
// Tests FPutObject hidden contentType setting
|
||||||
func TestFPutObjectV2(t *testing.T) {
|
func TestFPutObjectV2(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
@ -353,10 +240,10 @@ func TestFPutObjectV2(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -491,90 +378,12 @@ func TestFPutObjectV2(t *testing.T) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests resumable file based put object multipart upload.
|
|
||||||
func TestResumableFPutObjectV2(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping functional tests for the short runs")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seed random based on current time.
|
|
||||||
rand.Seed(time.Now().Unix())
|
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
|
||||||
c, err := NewV2(
|
|
||||||
os.Getenv("S3_ADDRESS"),
|
|
||||||
os.Getenv("ACCESS_KEY"),
|
|
||||||
os.Getenv("SECRET_KEY"),
|
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set user agent.
|
|
||||||
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
|
||||||
|
|
||||||
// Enable tracing, write to stdout.
|
|
||||||
// c.TraceOn(os.Stderr)
|
|
||||||
|
|
||||||
// Generate a new random bucket name.
|
|
||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
|
|
||||||
|
|
||||||
// make a new bucket.
|
|
||||||
err = c.MakeBucket(bucketName, "us-east-1")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err, bucketName)
|
|
||||||
}
|
|
||||||
|
|
||||||
file, err := ioutil.TempFile(os.TempDir(), "resumable")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
|
|
||||||
n, err := io.CopyN(file, r, 11*1024*1024)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if n != int64(11*1024*1024) {
|
|
||||||
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
objectName := bucketName + "-resumable"
|
|
||||||
|
|
||||||
n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if n != int64(11*1024*1024) {
|
|
||||||
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the file pro-actively for windows.
|
|
||||||
file.Close()
|
|
||||||
|
|
||||||
err = c.RemoveObject(bucketName, objectName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.RemoveBucket(bucketName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.Remove(file.Name())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests various bucket supported formats.
|
// Tests various bucket supported formats.
|
||||||
func TestMakeBucketRegionsV2(t *testing.T) {
|
func TestMakeBucketRegionsV2(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping functional tests for short runs")
|
t.Skip("skipping functional tests for short runs")
|
||||||
}
|
}
|
||||||
if os.Getenv("S3_ADDRESS") != "s3.amazonaws.com" {
|
if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
|
||||||
t.Skip("skipping region functional tests for non s3 runs")
|
t.Skip("skipping region functional tests for non s3 runs")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -583,10 +392,10 @@ func TestMakeBucketRegionsV2(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -634,10 +443,10 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -767,10 +576,10 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -903,10 +712,10 @@ func TestCopyObjectV2(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object
|
// Instantiate new minio client object
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -1020,10 +829,10 @@ func TestFunctionalV2(t *testing.T) {
|
|||||||
rand.Seed(time.Now().Unix())
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -66,7 +66,7 @@ func TestMakeBucketError(t *testing.T) {
|
|||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping functional tests for short runs")
|
t.Skip("skipping functional tests for short runs")
|
||||||
}
|
}
|
||||||
if os.Getenv("S3_ADDRESS") != "s3.amazonaws.com" {
|
if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
|
||||||
t.Skip("skipping region functional tests for non s3 runs")
|
t.Skip("skipping region functional tests for non s3 runs")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,10 +75,10 @@ func TestMakeBucketError(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -129,7 +129,7 @@ func TestMakeBucketRegions(t *testing.T) {
|
|||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping functional tests for short runs")
|
t.Skip("skipping functional tests for short runs")
|
||||||
}
|
}
|
||||||
if os.Getenv("S3_ADDRESS") != "s3.amazonaws.com" {
|
if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
|
||||||
t.Skip("skipping region functional tests for non s3 runs")
|
t.Skip("skipping region functional tests for non s3 runs")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,10 +138,10 @@ func TestMakeBucketRegions(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -189,10 +189,10 @@ func TestPutObjectReadAt(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -279,10 +279,10 @@ func TestPutObjectWithMetadata(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -371,10 +371,10 @@ func TestPutObjectStreaming(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV4(
|
c, err := NewV4(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -435,10 +435,10 @@ func TestListPartiallyUploaded(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -511,10 +511,10 @@ func TestGetOjectSeekEnd(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -606,10 +606,10 @@ func TestGetObjectClosedTwice(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -686,10 +686,10 @@ func TestRemoveMultipleObjects(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -761,10 +761,10 @@ func TestRemovePartiallyUploaded(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -819,209 +819,6 @@ func TestRemovePartiallyUploaded(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests resumable put object cloud to cloud.
|
|
||||||
func TestResumablePutObject(t *testing.T) {
|
|
||||||
// By passing 'go test -short' skips these tests.
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping functional tests for the short runs")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seed random based on current time.
|
|
||||||
rand.Seed(time.Now().Unix())
|
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
|
||||||
c, err := New(
|
|
||||||
os.Getenv("S3_ADDRESS"),
|
|
||||||
os.Getenv("ACCESS_KEY"),
|
|
||||||
os.Getenv("SECRET_KEY"),
|
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set user agent.
|
|
||||||
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
|
||||||
|
|
||||||
// Enable tracing, write to stdout.
|
|
||||||
// c.TraceOn(os.Stderr)
|
|
||||||
|
|
||||||
// Generate a new random bucket name.
|
|
||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
|
|
||||||
|
|
||||||
// Make a new bucket.
|
|
||||||
err = c.MakeBucket(bucketName, "us-east-1")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err, bucketName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a temporary file.
|
|
||||||
file, err := ioutil.TempFile(os.TempDir(), "resumable")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
r := bytes.NewReader(bytes.Repeat([]byte("b"), minPartSize*2))
|
|
||||||
n, err := io.CopyN(file, r, minPartSize*2)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if n != int64(minPartSize*2) {
|
|
||||||
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the file pro-actively for windows.
|
|
||||||
if err = file.Close(); err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New object name.
|
|
||||||
objectName := bucketName + "-resumable"
|
|
||||||
objectContentType := "application/custom-octet-stream"
|
|
||||||
|
|
||||||
// Upload the file.
|
|
||||||
n, err = c.FPutObject(bucketName, objectName, file.Name(), objectContentType)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if n != int64(minPartSize*2) {
|
|
||||||
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the uploaded object.
|
|
||||||
reader, err := c.GetObject(bucketName, objectName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get object info.
|
|
||||||
objInfo, err := reader.Stat()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if objInfo.ContentType != objectContentType {
|
|
||||||
t.Fatalf("Error: Content types don't match, want %v, got %v\n", objectContentType, objInfo.ContentType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upload now cloud to cloud.
|
|
||||||
n, err = c.PutObject(bucketName, objectName+"-put", reader, objectContentType)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if n != objInfo.Size {
|
|
||||||
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", objInfo.Size, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove all temp files, objects and bucket.
|
|
||||||
err = c.RemoveObject(bucketName, objectName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.RemoveObject(bucketName, objectName+"-put")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.RemoveBucket(bucketName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.Remove(file.Name())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests resumable file based put object multipart upload.
|
|
||||||
func TestResumableFPutObject(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping functional tests for the short runs")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seed random based on current time.
|
|
||||||
rand.Seed(time.Now().Unix())
|
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
|
||||||
c, err := New(
|
|
||||||
os.Getenv("S3_ADDRESS"),
|
|
||||||
os.Getenv("ACCESS_KEY"),
|
|
||||||
os.Getenv("SECRET_KEY"),
|
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set user agent.
|
|
||||||
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
|
||||||
|
|
||||||
// Enable tracing, write to stdout.
|
|
||||||
// c.TraceOn(os.Stderr)
|
|
||||||
|
|
||||||
// Generate a new random bucket name.
|
|
||||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
|
|
||||||
|
|
||||||
// Make a new bucket.
|
|
||||||
err = c.MakeBucket(bucketName, "us-east-1")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err, bucketName)
|
|
||||||
}
|
|
||||||
|
|
||||||
file, err := ioutil.TempFile(os.TempDir(), "resumable")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upload 4 parts to use all 3 multipart 'workers' and have an extra part.
|
|
||||||
// Use different data in each part for multipart tests to ensure parts are uploaded in correct order.
|
|
||||||
var buffer []byte
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
buffer = append(buffer, bytes.Repeat([]byte(string('a'+i)), minPartSize)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
size, err := file.Write(buffer)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if size != minPartSize*4 {
|
|
||||||
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the file pro-actively for windows.
|
|
||||||
err = file.Close()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
objectName := bucketName + "-resumable"
|
|
||||||
|
|
||||||
n, err := c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if n != int64(minPartSize*4) {
|
|
||||||
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.RemoveObject(bucketName, objectName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.RemoveBucket(bucketName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.Remove(file.Name())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests FPutObject of a big file to trigger multipart
|
// Tests FPutObject of a big file to trigger multipart
|
||||||
func TestFPutObjectMultipart(t *testing.T) {
|
func TestFPutObjectMultipart(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
@ -1033,10 +830,10 @@ func TestFPutObjectMultipart(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -1134,10 +931,10 @@ func TestFPutObject(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -1290,10 +1087,10 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -1444,10 +1241,10 @@ func TestGetObjectReadAtFunctional(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -1593,10 +1390,10 @@ func TestPresignedPostPolicy(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object
|
// Instantiate new minio client object
|
||||||
c, err := NewV4(
|
c, err := NewV4(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -1688,10 +1485,10 @@ func TestCopyObject(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object
|
// Instantiate new minio client object
|
||||||
c, err := NewV4(
|
c, err := NewV4(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -1856,10 +1653,10 @@ func TestEncryptionPutGet(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -2023,10 +1820,10 @@ func TestBucketNotification(t *testing.T) {
|
|||||||
rand.Seed(time.Now().Unix())
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -2099,10 +1896,10 @@ func TestFunctional(t *testing.T) {
|
|||||||
rand.Seed(time.Now().Unix())
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
c, err := New(
|
c, err := New(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -2427,10 +2224,10 @@ func TestGetObjectObjectModified(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV4(
|
c, err := NewV4(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -2501,10 +2298,10 @@ func TestPutObjectUploadSeekedObject(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV4(
|
c, err := NewV4(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -73,7 +73,7 @@ func (r *bucketLocationCache) Delete(bucketName string) {
|
|||||||
// GetBucketLocation - get location for the bucket name from location cache, if not
|
// GetBucketLocation - get location for the bucket name from location cache, if not
|
||||||
// fetch freshly by making a new request.
|
// fetch freshly by making a new request.
|
||||||
func (c Client) GetBucketLocation(bucketName string) (string, error) {
|
func (c Client) GetBucketLocation(bucketName string) (string, error) {
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return c.getBucketLocation(bucketName)
|
return c.getBucketLocation(bucketName)
|
||||||
@ -82,10 +82,15 @@ func (c Client) GetBucketLocation(bucketName string) (string, error) {
|
|||||||
// getBucketLocation - Get location for the bucketName from location map cache, if not
|
// getBucketLocation - Get location for the bucketName from location map cache, if not
|
||||||
// fetch freshly by making a new request.
|
// fetch freshly by making a new request.
|
||||||
func (c Client) getBucketLocation(bucketName string) (string, error) {
|
func (c Client) getBucketLocation(bucketName string) (string, error) {
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Region set then no need to fetch bucket location.
|
||||||
|
if c.region != "" {
|
||||||
|
return c.region, nil
|
||||||
|
}
|
||||||
|
|
||||||
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
|
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
|
||||||
// For china specifically we need to set everything to
|
// For china specifically we need to set everything to
|
||||||
// cn-north-1 for now, there is no easier way until AWS S3
|
// cn-north-1 for now, there is no easier way until AWS S3
|
||||||
@ -100,11 +105,6 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
|
|||||||
return "us-gov-west-1", nil
|
return "us-gov-west-1", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Region set then no need to fetch bucket location.
|
|
||||||
if c.region != "" {
|
|
||||||
return c.region, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
||||||
return location, nil
|
return location, nil
|
||||||
}
|
}
|
||||||
|
@ -28,6 +28,13 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
serverEndpoint = "SERVER_ENDPOINT"
|
||||||
|
accessKey = "ACCESS_KEY"
|
||||||
|
secretKey = "SECRET_KEY"
|
||||||
|
enableSecurity = "ENABLE_HTTPS"
|
||||||
|
)
|
||||||
|
|
||||||
// Tests for Core GetObject() function.
|
// Tests for Core GetObject() function.
|
||||||
func TestGetObjectCore(t *testing.T) {
|
func TestGetObjectCore(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
@ -39,10 +46,10 @@ func TestGetObjectCore(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio core client object.
|
// Instantiate new minio core client object.
|
||||||
c, err := NewCore(
|
c, err := NewCore(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -213,10 +220,10 @@ func TestGetBucketPolicy(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewCore(
|
c, err := NewCore(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
@ -276,10 +283,10 @@ func TestCorePutObject(t *testing.T) {
|
|||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewCore(
|
c, err := NewCore(
|
||||||
os.Getenv("S3_ADDRESS"),
|
os.Getenv(serverEndpoint),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv(accessKey),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv(secretKey),
|
||||||
mustParseBool(os.Getenv("S3_SECURE")),
|
mustParseBool(os.Getenv(enableSecurity)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -438,9 +438,6 @@ if err != nil {
|
|||||||
|
|
||||||
Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object as parts of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
|
Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object as parts of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
|
||||||
|
|
||||||
In the event that PutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, PutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
|
|
||||||
|
|
||||||
|
|
||||||
__Parameters__
|
__Parameters__
|
||||||
|
|
||||||
|
|
||||||
@ -566,8 +563,6 @@ Uploads contents from a file to objectName.
|
|||||||
|
|
||||||
FPutObject uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than the 64MiB in size, FPutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
|
FPutObject uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than the 64MiB in size, FPutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
|
||||||
|
|
||||||
In the event that FPutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, FPutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
|
|
||||||
|
|
||||||
|
|
||||||
__Parameters__
|
__Parameters__
|
||||||
|
|
||||||
@ -1243,7 +1238,7 @@ __Return Values__
|
|||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`chan NotificationInfo` | _chan_ | Read channel for all notificatons on bucket |
|
|`chan NotificationInfo` | _chan_ | Read channel for all notifications on bucket |
|
||||||
|`NotificationInfo` | _object_ | Notification object represents events info |
|
|`NotificationInfo` | _object_ | Notification object represents events info |
|
||||||
|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events |
|
|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events |
|
||||||
|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation |
|
|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation |
|
||||||
|
@ -50,9 +50,8 @@ func main() {
|
|||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// progress reader is notified as PutObject makes progress with
|
// Progress reader is notified as PutObject makes progress with
|
||||||
// the read. For partial resume put object, progress reader is
|
// the Reads inside.
|
||||||
// appropriately advanced.
|
|
||||||
progress := pb.New64(objectInfo.Size)
|
progress := pb.New64(objectInfo.Size)
|
||||||
progress.Start()
|
progress.Start()
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ type IAM struct {
|
|||||||
// Required http Client to use when connecting to IAM metadata service.
|
// Required http Client to use when connecting to IAM metadata service.
|
||||||
Client *http.Client
|
Client *http.Client
|
||||||
|
|
||||||
// Custom endpoint in place of
|
// Custom endpoint to fetch IAM role credentials.
|
||||||
endpoint string
|
endpoint string
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,14 +58,19 @@ func redirectHeaders(req *http.Request, via []*http.Request) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IAM Roles for Amazon EC2
|
||||||
|
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||||
|
const (
|
||||||
|
defaultIAMRoleEndpoint = "http://169.254.169.254"
|
||||||
|
defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials"
|
||||||
|
)
|
||||||
|
|
||||||
// NewIAM returns a pointer to a new Credentials object wrapping
|
// NewIAM returns a pointer to a new Credentials object wrapping
|
||||||
// the IAM. Takes a ConfigProvider to create a EC2Metadata client.
|
// the IAM. Takes a ConfigProvider to create a EC2Metadata client.
|
||||||
// The ConfigProvider is satisfied by the session.Session type.
|
// The ConfigProvider is satisfied by the session.Session type.
|
||||||
func NewIAM(endpoint string) *Credentials {
|
func NewIAM(endpoint string) *Credentials {
|
||||||
if endpoint == "" {
|
if endpoint == "" {
|
||||||
// IAM Roles for Amazon EC2
|
endpoint = defaultIAMRoleEndpoint
|
||||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
|
||||||
endpoint = "http://169.254.169.254"
|
|
||||||
}
|
}
|
||||||
p := &IAM{
|
p := &IAM{
|
||||||
Client: &http.Client{
|
Client: &http.Client{
|
||||||
@ -81,17 +86,7 @@ func NewIAM(endpoint string) *Credentials {
|
|||||||
// Error will be returned if the request fails, or unable to extract
|
// Error will be returned if the request fails, or unable to extract
|
||||||
// the desired
|
// the desired
|
||||||
func (m *IAM) Retrieve() (Value, error) {
|
func (m *IAM) Retrieve() (Value, error) {
|
||||||
credsList, err := requestCredList(m.Client, m.endpoint)
|
roleCreds, err := getCredentials(m.Client, m.endpoint)
|
||||||
if err != nil {
|
|
||||||
return Value{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(credsList) == 0 {
|
|
||||||
return Value{}, errors.New("empty EC2 Role list")
|
|
||||||
}
|
|
||||||
credsName := credsList[0]
|
|
||||||
|
|
||||||
roleCreds, err := requestCred(m.Client, m.endpoint, credsName)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Value{}, err
|
return Value{}, err
|
||||||
}
|
}
|
||||||
@ -119,18 +114,32 @@ type ec2RoleCredRespBody struct {
|
|||||||
// Error state
|
// Error state
|
||||||
Code string
|
Code string
|
||||||
Message string
|
Message string
|
||||||
|
|
||||||
|
// Unused params.
|
||||||
|
LastUpdated time.Time
|
||||||
|
Type string
|
||||||
}
|
}
|
||||||
|
|
||||||
const iamSecurityCredsPath = "/latest/meta-data/iam/security-credentials"
|
// Get the final IAM role URL where the request will
|
||||||
|
// be sent to fetch the rolling access credentials.
|
||||||
// requestCredList requests a list of credentials from the EC2 service.
|
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||||
// If there are no credentials, or there is an error making or receiving the request
|
func getIAMRoleURL(endpoint string) (*url.URL, error) {
|
||||||
func requestCredList(client *http.Client, endpoint string) ([]string, error) {
|
if endpoint == "" {
|
||||||
|
endpoint = defaultIAMRoleEndpoint
|
||||||
|
}
|
||||||
u, err := url.Parse(endpoint)
|
u, err := url.Parse(endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
u.Path = iamSecurityCredsPath
|
u.Path = defaultIAMSecurityCredsPath
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// listRoleNames lists of credential role names associated
|
||||||
|
// with the current EC2 service. If there are no credentials,
|
||||||
|
// or there is an error making or receiving the request.
|
||||||
|
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||||
|
func listRoleNames(client *http.Client, u *url.URL) ([]string, error) {
|
||||||
req, err := http.NewRequest("GET", u.String(), nil)
|
req, err := http.NewRequest("GET", u.String(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -157,17 +166,39 @@ func requestCredList(client *http.Client, endpoint string) ([]string, error) {
|
|||||||
return credsList, nil
|
return credsList, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// requestCred requests the credentials for a specific credentials from the EC2 service.
|
// getCredentials - obtains the credentials from the IAM role name associated with
|
||||||
|
// the current EC2 service.
|
||||||
//
|
//
|
||||||
// If the credentials cannot be found, or there is an error reading the response
|
// If the credentials cannot be found, or there is an error
|
||||||
// and error will be returned.
|
// reading the response an error will be returned.
|
||||||
func requestCred(client *http.Client, endpoint string, credsName string) (ec2RoleCredRespBody, error) {
|
func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
|
||||||
u, err := url.Parse(endpoint)
|
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||||
|
u, err := getIAMRoleURL(endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ec2RoleCredRespBody{}, err
|
return ec2RoleCredRespBody{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
u.Path = path.Join(iamSecurityCredsPath, credsName)
|
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||||
|
roleNames, err := listRoleNames(client, u)
|
||||||
|
if err != nil {
|
||||||
|
return ec2RoleCredRespBody{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(roleNames) == 0 {
|
||||||
|
return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service")
|
||||||
|
}
|
||||||
|
|
||||||
|
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||||
|
// - An instance profile can contain only one IAM role. This limit cannot be increased.
|
||||||
|
roleName := roleNames[0]
|
||||||
|
|
||||||
|
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||||
|
// The following command retrieves the security credentials for an
|
||||||
|
// IAM role named `s3access`.
|
||||||
|
//
|
||||||
|
// $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access
|
||||||
|
//
|
||||||
|
u.Path = path.Join(u.Path, roleName)
|
||||||
req, err := http.NewRequest("GET", u.String(), nil)
|
req, err := http.NewRequest("GET", u.String(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ec2RoleCredRespBody{}, err
|
return ec2RoleCredRespBody{}, err
|
||||||
|
@ -91,8 +91,8 @@ func TestIAMNoRoles(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here")
|
t.Fatal("Unexpected should fail here")
|
||||||
}
|
}
|
||||||
if err.Error() != "empty EC2 Role list" {
|
if err.Error() != "No IAM roles attached to this EC2 service" {
|
||||||
t.Fatalf("Expected 'empty EC2 Role list', got %s", err)
|
t.Fatalf("Expected 'No IAM roles attached to this EC2 service', got %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -205,6 +206,10 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok
|
|||||||
// Set headers needed for streaming signature.
|
// Set headers needed for streaming signature.
|
||||||
prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
|
prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
|
||||||
|
|
||||||
|
if req.Body == nil {
|
||||||
|
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
|
||||||
|
}
|
||||||
|
|
||||||
stReader := &StreamingReader{
|
stReader := &StreamingReader{
|
||||||
baseReadCloser: req.Body,
|
baseReadCloser: req.Body,
|
||||||
accessKeyID: accessKeyID,
|
accessKeyID: accessKeyID,
|
||||||
|
@ -19,6 +19,7 @@ package s3utils
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
@ -200,3 +201,74 @@ func EncodePath(pathName string) string {
|
|||||||
}
|
}
|
||||||
return encodedPathname
|
return encodedPathname
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We support '.' with bucket names but we fallback to using path
|
||||||
|
// style requests instead for such buckets.
|
||||||
|
var (
|
||||||
|
validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-]{1,61}[A-Za-z0-9]$`)
|
||||||
|
validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
||||||
|
ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Common checker for both stricter and basic validation.
|
||||||
|
func checkBucketNameCommon(bucketName string, strict bool) (err error) {
|
||||||
|
if strings.TrimSpace(bucketName) == "" {
|
||||||
|
return errors.New("Bucket name cannot be empty")
|
||||||
|
}
|
||||||
|
if len(bucketName) < 3 {
|
||||||
|
return errors.New("Bucket name cannot be smaller than 3 characters")
|
||||||
|
}
|
||||||
|
if len(bucketName) > 63 {
|
||||||
|
return errors.New("Bucket name cannot be greater than 63 characters")
|
||||||
|
}
|
||||||
|
if ipAddress.MatchString(bucketName) {
|
||||||
|
return errors.New("Bucket name cannot be an ip address")
|
||||||
|
}
|
||||||
|
if strings.Contains(bucketName, "..") {
|
||||||
|
return errors.New("Bucket name contains invalid characters")
|
||||||
|
}
|
||||||
|
if strict {
|
||||||
|
if !validBucketNameStrict.MatchString(bucketName) {
|
||||||
|
err = errors.New("Bucket name contains invalid characters")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !validBucketName.MatchString(bucketName) {
|
||||||
|
err = errors.New("Bucket name contains invalid characters")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckValidBucketName - checks if we have a valid input bucket name.
|
||||||
|
// This is a non stricter version.
|
||||||
|
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
|
||||||
|
func CheckValidBucketName(bucketName string) (err error) {
|
||||||
|
return checkBucketNameCommon(bucketName, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
|
||||||
|
// This is a stricter version.
|
||||||
|
func CheckValidBucketNameStrict(bucketName string) (err error) {
|
||||||
|
return checkBucketNameCommon(bucketName, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix.
|
||||||
|
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
||||||
|
func CheckValidObjectNamePrefix(objectName string) error {
|
||||||
|
if len(objectName) > 1024 {
|
||||||
|
return errors.New("Object name cannot be greater than 1024 characters")
|
||||||
|
}
|
||||||
|
if !utf8.ValidString(objectName) {
|
||||||
|
return errors.New("Object name with non UTF-8 strings are not supported")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckValidObjectName - checks if we have a valid input object name.
|
||||||
|
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
||||||
|
func CheckValidObjectName(objectName string) error {
|
||||||
|
if strings.TrimSpace(objectName) == "" {
|
||||||
|
return errors.New("Object name cannot be empty")
|
||||||
|
}
|
||||||
|
return CheckValidObjectNamePrefix(objectName)
|
||||||
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package s3utils
|
package s3utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
@ -282,3 +283,87 @@ func TestEncodePath(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests validate the bucket name validator.
|
||||||
|
func TestIsValidBucketName(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
// Input.
|
||||||
|
bucketName string
|
||||||
|
// Expected result.
|
||||||
|
err error
|
||||||
|
// Flag to indicate whether test should Pass.
|
||||||
|
shouldPass bool
|
||||||
|
}{
|
||||||
|
{".mybucket", errors.New("Bucket name contains invalid characters"), false},
|
||||||
|
{"$mybucket", errors.New("Bucket name contains invalid characters"), false},
|
||||||
|
{"mybucket-", errors.New("Bucket name contains invalid characters"), false},
|
||||||
|
{"my", errors.New("Bucket name cannot be smaller than 3 characters"), false},
|
||||||
|
{"", errors.New("Bucket name cannot be empty"), false},
|
||||||
|
{"my..bucket", errors.New("Bucket name contains invalid characters"), false},
|
||||||
|
{"192.168.1.168", errors.New("Bucket name cannot be an ip address"), false},
|
||||||
|
{"my.bucket.com", nil, true},
|
||||||
|
{"my-bucket", nil, true},
|
||||||
|
{"123my-bucket", nil, true},
|
||||||
|
{"Mybucket", nil, true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
err := CheckValidBucketName(testCase.bucketName)
|
||||||
|
if err != nil && testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
if err == nil && !testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||||
|
}
|
||||||
|
// Failed as expected, but does it fail for the expected reason.
|
||||||
|
if err != nil && !testCase.shouldPass {
|
||||||
|
if err.Error() != testCase.err.Error() {
|
||||||
|
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate the bucket name validator stricter.
|
||||||
|
func TestIsValidBucketNameStrict(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
// Input.
|
||||||
|
bucketName string
|
||||||
|
// Expected result.
|
||||||
|
err error
|
||||||
|
// Flag to indicate whether test should Pass.
|
||||||
|
shouldPass bool
|
||||||
|
}{
|
||||||
|
{".mybucket", errors.New("Bucket name contains invalid characters"), false},
|
||||||
|
{"$mybucket", errors.New("Bucket name contains invalid characters"), false},
|
||||||
|
{"mybucket-", errors.New("Bucket name contains invalid characters"), false},
|
||||||
|
{"my", errors.New("Bucket name cannot be smaller than 3 characters"), false},
|
||||||
|
{"", errors.New("Bucket name cannot be empty"), false},
|
||||||
|
{"my..bucket", errors.New("Bucket name contains invalid characters"), false},
|
||||||
|
{"192.168.1.168", errors.New("Bucket name cannot be an ip address"), false},
|
||||||
|
{"Mybucket", errors.New("Bucket name contains invalid characters"), false},
|
||||||
|
{"my.bucket.com", nil, true},
|
||||||
|
{"my-bucket", nil, true},
|
||||||
|
{"123my-bucket", nil, true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
err := CheckValidBucketNameStrict(testCase.bucketName)
|
||||||
|
if err != nil && testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||||
|
}
|
||||||
|
if err == nil && !testCase.shouldPass {
|
||||||
|
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||||
|
}
|
||||||
|
// Failed as expected, but does it fail for the expected reason.
|
||||||
|
if err != nil && !testCase.shouldPass {
|
||||||
|
if err.Error() != testCase.err.Error() {
|
||||||
|
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
78
vendor/src/github.com/minio/minio-go/utils.go
vendored
78
vendor/src/github.com/minio/minio-go/utils.go
vendored
@ -28,7 +28,6 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go/pkg/s3utils"
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
@ -148,63 +147,6 @@ func isValidExpiry(expires time.Duration) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// We support '.' with bucket names but we fallback to using path
|
|
||||||
// style requests instead for such buckets.
|
|
||||||
var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
|
||||||
|
|
||||||
// Invalid bucket name with double dot.
|
|
||||||
var invalidDotBucketName = regexp.MustCompile(`\.\.`)
|
|
||||||
|
|
||||||
// isValidBucketName - verify bucket name in accordance with
|
|
||||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
|
|
||||||
func isValidBucketName(bucketName string) error {
|
|
||||||
if strings.TrimSpace(bucketName) == "" {
|
|
||||||
return ErrInvalidBucketName("Bucket name cannot be empty.")
|
|
||||||
}
|
|
||||||
if len(bucketName) < 3 {
|
|
||||||
return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.")
|
|
||||||
}
|
|
||||||
if len(bucketName) > 63 {
|
|
||||||
return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.")
|
|
||||||
}
|
|
||||||
if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
|
|
||||||
return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
|
|
||||||
}
|
|
||||||
if invalidDotBucketName.MatchString(bucketName) {
|
|
||||||
return ErrInvalidBucketName("Bucket name cannot have successive periods.")
|
|
||||||
}
|
|
||||||
if !validBucketName.MatchString(bucketName) {
|
|
||||||
return ErrInvalidBucketName("Bucket name contains invalid characters.")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// isValidObjectName - verify object name in accordance with
|
|
||||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
|
||||||
func isValidObjectName(objectName string) error {
|
|
||||||
if strings.TrimSpace(objectName) == "" {
|
|
||||||
return ErrInvalidObjectName("Object name cannot be empty.")
|
|
||||||
}
|
|
||||||
if len(objectName) > 1024 {
|
|
||||||
return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.")
|
|
||||||
}
|
|
||||||
if !utf8.ValidString(objectName) {
|
|
||||||
return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// isValidObjectPrefix - verify if object prefix is valid.
|
|
||||||
func isValidObjectPrefix(objectPrefix string) error {
|
|
||||||
if len(objectPrefix) > 1024 {
|
|
||||||
return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.")
|
|
||||||
}
|
|
||||||
if !utf8.ValidString(objectPrefix) {
|
|
||||||
return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// make a copy of http.Header
|
// make a copy of http.Header
|
||||||
func cloneHeader(h http.Header) http.Header {
|
func cloneHeader(h http.Header) http.Header {
|
||||||
h2 := make(http.Header, len(h))
|
h2 := make(http.Header, len(h))
|
||||||
@ -250,3 +192,23 @@ func redactSignature(origAuth string) string {
|
|||||||
// Strip out 256-bit signature from: Signature=<256-bit signature>
|
// Strip out 256-bit signature from: Signature=<256-bit signature>
|
||||||
return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
|
return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get default location returns the location based on the input
|
||||||
|
// URL `u`, if region override is provided then all location
|
||||||
|
// defaults to regionOverride.
|
||||||
|
//
|
||||||
|
// If no other cases match then the location is set to `us-east-1`
|
||||||
|
// as a last resort.
|
||||||
|
func getDefaultLocation(u url.URL, regionOverride string) (location string) {
|
||||||
|
if regionOverride != "" {
|
||||||
|
return regionOverride
|
||||||
|
}
|
||||||
|
if s3utils.IsAmazonChinaEndpoint(u) {
|
||||||
|
return "cn-north-1"
|
||||||
|
}
|
||||||
|
if s3utils.IsAmazonGovCloudEndpoint(u) {
|
||||||
|
return "us-gov-west-1"
|
||||||
|
}
|
||||||
|
// Default to location to 'us-east-1'.
|
||||||
|
return "us-east-1"
|
||||||
|
}
|
||||||
|
@ -21,6 +21,8 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Tests signature redacting function used
|
// Tests signature redacting function used
|
||||||
@ -124,8 +126,10 @@ func TestIsValidEndpointURL(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{"", ErrInvalidArgument("Endpoint url cannot be empty."), false},
|
{"", ErrInvalidArgument("Endpoint url cannot be empty."), false},
|
||||||
{"/", nil, true},
|
{"/", nil, true},
|
||||||
{"https://s3.am1;4205;0cazonaws.com", nil, true},
|
{"https://s3.amazonaws.com", nil, true},
|
||||||
{"https://s3.cn-north-1.amazonaws.com.cn", nil, true},
|
{"https://s3.cn-north-1.amazonaws.com.cn", nil, true},
|
||||||
|
{"https://s3-us-gov-west-1.amazonaws.com", nil, true},
|
||||||
|
{"https://s3-fips-us-gov-west-1.amazonaws.com", nil, true},
|
||||||
{"https://s3.amazonaws.com/", nil, true},
|
{"https://s3.amazonaws.com/", nil, true},
|
||||||
{"https://storage.googleapis.com/", nil, true},
|
{"https://storage.googleapis.com/", nil, true},
|
||||||
{"192.168.1.1", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false},
|
{"192.168.1.1", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false},
|
||||||
@ -163,6 +167,52 @@ func TestIsValidEndpointURL(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDefaultBucketLocation(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
endpointURL url.URL
|
||||||
|
regionOverride string
|
||||||
|
expectedLocation string
|
||||||
|
}{
|
||||||
|
// Region override is set URL is ignored. - Test 1.
|
||||||
|
{
|
||||||
|
endpointURL: url.URL{Host: "s3-fips-us-gov-west-1.amazonaws.com"},
|
||||||
|
regionOverride: "us-west-1",
|
||||||
|
expectedLocation: "us-west-1",
|
||||||
|
},
|
||||||
|
// No region override, url based preferenced is honored - Test 2.
|
||||||
|
{
|
||||||
|
endpointURL: url.URL{Host: "s3-fips-us-gov-west-1.amazonaws.com"},
|
||||||
|
regionOverride: "",
|
||||||
|
expectedLocation: "us-gov-west-1",
|
||||||
|
},
|
||||||
|
// Region override is honored - Test 3.
|
||||||
|
{
|
||||||
|
endpointURL: url.URL{Host: "s3.amazonaws.com"},
|
||||||
|
regionOverride: "us-west-1",
|
||||||
|
expectedLocation: "us-west-1",
|
||||||
|
},
|
||||||
|
// China region should be honored, region override not provided. - Test 4.
|
||||||
|
{
|
||||||
|
endpointURL: url.URL{Host: "s3.cn-north-1.amazonaws.com.cn"},
|
||||||
|
regionOverride: "",
|
||||||
|
expectedLocation: "cn-north-1",
|
||||||
|
},
|
||||||
|
// No region provided, no standard region strings provided as well. - Test 5.
|
||||||
|
{
|
||||||
|
endpointURL: url.URL{Host: "s3.amazonaws.com"},
|
||||||
|
regionOverride: "",
|
||||||
|
expectedLocation: "us-east-1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
retLocation := getDefaultLocation(testCase.endpointURL, testCase.regionOverride)
|
||||||
|
if testCase.expectedLocation != retLocation {
|
||||||
|
t.Errorf("Test %d: Expected location %s, got %s", i+1, testCase.expectedLocation, retLocation)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Tests validate the expiry time validator.
|
// Tests validate the expiry time validator.
|
||||||
func TestIsValidExpiry(t *testing.T) {
|
func TestIsValidExpiry(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
@ -209,19 +259,19 @@ func TestIsValidBucketName(t *testing.T) {
|
|||||||
// Flag to indicate whether test should Pass.
|
// Flag to indicate whether test should Pass.
|
||||||
shouldPass bool
|
shouldPass bool
|
||||||
}{
|
}{
|
||||||
{".mybucket", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
|
{".mybucket", ErrInvalidBucketName("Bucket name contains invalid characters"), false},
|
||||||
{"mybucket.", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
|
{"mybucket.", ErrInvalidBucketName("Bucket name contains invalid characters"), false},
|
||||||
{"mybucket-", ErrInvalidBucketName("Bucket name contains invalid characters."), false},
|
{"mybucket-", ErrInvalidBucketName("Bucket name contains invalid characters"), false},
|
||||||
{"my", ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters."), false},
|
{"my", ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters"), false},
|
||||||
{"", ErrInvalidBucketName("Bucket name cannot be empty."), false},
|
{"", ErrInvalidBucketName("Bucket name cannot be empty"), false},
|
||||||
{"my..bucket", ErrInvalidBucketName("Bucket name cannot have successive periods."), false},
|
{"my..bucket", ErrInvalidBucketName("Bucket name contains invalid characters"), false},
|
||||||
{"my.bucket.com", nil, true},
|
{"my.bucket.com", nil, true},
|
||||||
{"my-bucket", nil, true},
|
{"my-bucket", nil, true},
|
||||||
{"123my-bucket", nil, true},
|
{"123my-bucket", nil, true},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
err := isValidBucketName(testCase.bucketName)
|
err := s3utils.CheckValidBucketName(testCase.bucketName)
|
||||||
if err != nil && testCase.shouldPass {
|
if err != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user