From 31e156c666d1d67b1af35072684c4d8e395a378c Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 30 Mar 2018 12:33:40 +0200 Subject: [PATCH] Update vendored library github.com/minio/minio-go --- Gopkg.lock | 6 +- vendor/github.com/minio/minio-go/Makefile | 4 +- vendor/github.com/minio/minio-go/README.md | 4 - .../minio/minio-go/api-compose-object.go | 88 +- .../minio/minio-go/api-get-object-file.go | 13 +- .../minio/minio-go/api-get-object.go | 37 +- .../minio/minio-go/api-get-options.go | 8 +- .../minio/minio-go/api-get-policy.go | 2 +- .../minio/minio-go/api-notification.go | 12 +- .../minio/minio-go/api-presigned.go | 4 +- .../minio/minio-go/api-put-object-context.go | 6 - .../minio-go/api-put-object-encrypted.go | 44 - .../minio-go/api-put-object-multipart.go | 15 +- .../minio-go/api-put-object-streaming.go | 4 +- .../minio/minio-go/api-put-object.go | 37 +- .../minio/minio-go/api-put-object_test.go | 1 + .../github.com/minio/minio-go/api-remove.go | 15 +- vendor/github.com/minio/minio-go/api-stat.go | 2 +- vendor/github.com/minio/minio-go/api.go | 86 +- .../minio/minio-go/api_unit_test.go | 3 +- vendor/github.com/minio/minio-go/appveyor.yml | 1 + .../github.com/minio/minio-go/bucket-cache.go | 4 +- .../minio/minio-go/bucket-cache_test.go | 2 +- .../minio/minio-go/bucket-notification.go | 69 +- vendor/github.com/minio/minio-go/constants.go | 7 - vendor/github.com/minio/minio-go/core.go | 10 +- vendor/github.com/minio/minio-go/docs/API.md | 437 ++----- .../examples/s3/fputencrypted-object.go | 39 +- .../examples/s3/get-encrypted-object.go | 34 +- .../examples/s3/put-encrypted-object.go | 38 +- .../examples/s3/putobject-getobject-sse.go | 43 +- .../minio/minio-go/functional_tests.go | 1040 +++++++++++++---- .../minio/minio-go/pkg/encrypt/cbc.go | 294 ----- .../minio/minio-go/pkg/encrypt/interface.go | 54 - .../minio/minio-go/pkg/encrypt/keys.go | 166 --- .../minio/minio-go/pkg/encrypt/server-side.go | 146 +++ .../minio-go/pkg/policy/bucket-policy.go | 35 +- .../minio-go/pkg/policy/bucket-policy_test.go | 201 +++- .../pkg/s3signer/request-signature-v2.go | 47 +- .../pkg/s3signer/request-signature_test.go | 52 +- .../minio/minio-go/pkg/s3signer/utils_test.go | 27 +- vendor/github.com/minio/minio-go/transport.go | 10 +- vendor/github.com/minio/minio-go/utils.go | 21 +- .../github.com/minio/minio-go/utils_test.go | 27 +- 44 files changed, 1612 insertions(+), 1583 deletions(-) delete mode 100644 vendor/github.com/minio/minio-go/api-put-object-encrypted.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/encrypt/interface.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/encrypt/keys.go create mode 100644 vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go diff --git a/Gopkg.lock b/Gopkg.lock index e91babbd8..1b7d635ad 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -100,8 +100,8 @@ [[projects]] name = "github.com/minio/minio-go" packages = [".","pkg/credentials","pkg/encrypt","pkg/policy","pkg/s3signer","pkg/s3utils","pkg/set"] - revision = "14f1d472d115bac5ca4804094aa87484a72ced61" - version = "4.0.6" + revision = "66252c2a3c15f7b90cc8493d497a04ac3b6e3606" + version = "5.0.0" [[projects]] branch = "master" @@ -178,7 +178,7 @@ [[projects]] branch = "master" name = "golang.org/x/crypto" - packages = ["curve25519","ed25519","ed25519/internal/edwards25519","internal/chacha20","pbkdf2","poly1305","scrypt","ssh","ssh/terminal"] + packages = ["argon2","blake2b","curve25519","ed25519","ed25519/internal/edwards25519","internal/chacha20","pbkdf2","poly1305","scrypt","ssh","ssh/terminal"] revision = "88942b9c40a4c9d203b82b3731787b672d6e809b" [[projects]] diff --git a/vendor/github.com/minio/minio-go/Makefile b/vendor/github.com/minio/minio-go/Makefile index 05081c723..bad81ffaf 100644 --- a/vendor/github.com/minio/minio-go/Makefile +++ b/vendor/github.com/minio/minio-go/Makefile @@ -3,10 +3,10 @@ all: checks checks: @go get -t ./... @go vet ./... - @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go test -race -v ./... + @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... @go get github.com/dustin/go-humanize/... @go get github.com/sirupsen/logrus/... - @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go run functional_tests.go + @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go @mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done @go get -u github.com/a8m/mark/... @go get -u github.com/minio/cli/... diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md index 2dedc1a28..553b621aa 100644 --- a/vendor/github.com/minio/minio-go/README.md +++ b/vendor/github.com/minio/minio-go/README.md @@ -156,10 +156,6 @@ The full API Reference is available here. * [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects) * [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -### API Reference: Encrypted Object Operations -* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject) -* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject) - ### API Reference : Presigned Operations * [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject) * [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject) diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go index 81314e3b4..c16b1cff1 100644 --- a/vendor/github.com/minio/minio-go/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/api-compose-object.go @@ -19,7 +19,6 @@ package minio import ( "context" - "encoding/base64" "fmt" "net/http" "net/url" @@ -27,58 +26,15 @@ import ( "strings" "time" + "github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/s3utils" ) -// SSEInfo - represents Server-Side-Encryption parameters specified by -// a user. -type SSEInfo struct { - key []byte - algo string -} - -// NewSSEInfo - specifies (binary or un-encoded) encryption key and -// algorithm name. If algo is empty, it defaults to "AES256". Ref: -// https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html -func NewSSEInfo(key []byte, algo string) SSEInfo { - if algo == "" { - algo = "AES256" - } - return SSEInfo{key, algo} -} - -// internal method that computes SSE-C headers -func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string { - if s == nil { - return nil - } - - cs := "" - if isCopySource { - cs = "copy-source-" - } - return map[string]string{ - "x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo, - "x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key), - "x-amz-" + cs + "server-side-encryption-customer-key-MD5": sumMD5Base64(s.key), - } -} - -// GetSSEHeaders - computes and returns headers for SSE-C as key-value -// pairs. They can be set as metadata in PutObject* requests (for -// encryption) or be set as request headers in `Core.GetObject` (for -// decryption). -func (s *SSEInfo) GetSSEHeaders() map[string]string { - return s.getSSEHeaders(false) -} - // DestinationInfo - type with information about the object to be // created via server-side copy requests, using the Compose API. type DestinationInfo struct { bucket, object string - - // key for encrypting destination - encryption *SSEInfo + encryption encrypt.ServerSide // if no user-metadata is provided, it is copied from source // (when there is only once source object in the compose @@ -97,9 +53,7 @@ type DestinationInfo struct { // if needed. If nil is passed, and if only a single source (of any // size) is provided in the ComposeObject call, then metadata from the // source is copied to the destination. -func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, - userMeta map[string]string) (d DestinationInfo, err error) { - +func NewDestinationInfo(bucket, object string, sse encrypt.ServerSide, userMeta map[string]string) (d DestinationInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucket); err != nil { return d, err @@ -125,7 +79,7 @@ func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, return DestinationInfo{ bucket: bucket, object: object, - encryption: encryptSSEC, + encryption: sse, userMetadata: m, }, nil } @@ -154,10 +108,8 @@ func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) ma // server-side copying APIs. type SourceInfo struct { bucket, object string - - start, end int64 - - decryptKey *SSEInfo + start, end int64 + encryption encrypt.ServerSide // Headers to send with the upload-part-copy request involving // this source object. Headers http.Header @@ -169,12 +121,12 @@ type SourceInfo struct { // `decryptSSEC` is the decryption key using server-side-encryption // with customer provided key. It may be nil if the source is not // encrypted. -func NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo { +func NewSourceInfo(bucket, object string, sse encrypt.ServerSide) SourceInfo { r := SourceInfo{ bucket: bucket, object: object, start: -1, // range is unspecified by default - decryptKey: decryptSSEC, + encryption: sse, Headers: make(http.Header), } @@ -182,8 +134,8 @@ func NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo { r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object)) // Assemble decryption headers for upload-part-copy request - for k, v := range decryptSSEC.getSSEHeaders(true) { - r.Headers.Set(k, v) + if r.encryption != nil { + encrypt.SSECopy(r.encryption).Marshal(r.Headers) } return r @@ -245,10 +197,7 @@ func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[s // Get object info - need size and etag here. Also, decryption // headers are added to the stat request if given. var objInfo ObjectInfo - opts := StatObjectOptions{} - for k, v := range s.decryptKey.getSSEHeaders(false) { - opts.Set(k, v) - } + opts := StatObjectOptions{GetObjectOptions{ServerSideEncryption: s.encryption}} objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts) if err != nil { err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)) @@ -476,12 +425,12 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { // Single source object case (i.e. when only one source is // involved, it is being copied wholly and at most 5GiB in - // size). - if totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize { + // size, emptyfiles are also supported). + if (totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { h := srcs[0].Headers // Add destination encryption headers - for k, v := range dst.encryption.getSSEHeaders(false) { - h.Set(k, v) + if dst.encryption != nil { + dst.encryption.Marshal(h) } // If no user metadata is specified (and so, the @@ -527,7 +476,8 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { for k, v := range metaMap { metaHeaders[k] = v } - uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{UserMetadata: metaHeaders}) + + uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{ServerSideEncryption: dst.encryption, UserMetadata: metaHeaders}) if err != nil { return err } @@ -538,8 +488,8 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { for i, src := range srcs { h := src.Headers // Add destination encryption headers - for k, v := range dst.encryption.getSSEHeaders(false) { - h.Set(k, v) + if dst.encryption != nil { + dst.encryption.Marshal(h) } // calculate start/end indices of parts after diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go index 2b58220a6..a852220a2 100644 --- a/vendor/github.com/minio/minio-go/api-get-object-file.go +++ b/vendor/github.com/minio/minio-go/api-get-object-file.go @@ -18,14 +18,11 @@ package minio import ( + "context" "io" "os" "path/filepath" - "github.com/minio/minio-go/pkg/encrypt" - - "context" - "github.com/minio/minio-go/pkg/s3utils" ) @@ -40,14 +37,6 @@ func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObje return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) } -// FGetEncryptedObject - Decrypt and store an object at filePath. -func (c Client) FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error { - if materials == nil { - return ErrInvalidArgument("Unable to recognize empty encryption properties") - } - return c.FGetObject(bucketName, objectName, filePath, GetObjectOptions{Materials: materials}) -} - // fGetObjectWithContext - fgetObject wrapper function with context func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { // Input validation. diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go index 50bbc2201..0bf556ec6 100644 --- a/vendor/github.com/minio/minio-go/api-get-object.go +++ b/vendor/github.com/minio/minio-go/api-get-object.go @@ -27,20 +27,9 @@ import ( "sync" "time" - "github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/s3utils" ) -// GetEncryptedObject deciphers and streams data stored in the server after applying a specified encryption materials, -// returned stream should be closed by the caller. -func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) { - if encryptMaterials == nil { - return nil, ErrInvalidArgument("Unable to recognize empty encryption properties") - } - - return c.GetObject(bucketName, objectName, GetObjectOptions{Materials: encryptMaterials}) -} - // GetObject - returns an seekable, readable object. func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) { return c.getObjectWithContext(context.Background(), bucketName, objectName, opts) @@ -127,6 +116,9 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName } else { // First request is a Stat or Seek call. // Only need to run a StatObject until an actual Read or ReadAt request comes through. + + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) if err != nil { resCh <- getResponse{ @@ -142,6 +134,8 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName } } } else if req.settingObjectInfo { // Request is just to get objectInfo. + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") if etag != "" { opts.SetMatchETag(etag) } @@ -381,13 +375,11 @@ func (o *Object) Stat() (ObjectInfo, error) { // This is the first request. if !o.isStarted || !o.objectInfoSet { - statReq := getRequest{ + // Send the request and get the response. + _, err := o.doGetRequest(getRequest{ isFirstReq: !o.isStarted, settingObjectInfo: !o.objectInfoSet, - } - - // Send the request and get the response. - _, err := o.doGetRequest(statReq) + }) if err != nil { o.prevErr = err return ObjectInfo{}, err @@ -493,7 +485,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) { // Negative offset is valid for whence of '2'. if offset < 0 && whence != 2 { - return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence)) + return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) } // This is the first request. So before anything else @@ -662,15 +654,6 @@ func (c Client) getObject(ctx context.Context, bucketName, objectName string, op Metadata: extractObjMetadata(resp.Header), } - reader := resp.Body - if opts.Materials != nil { - err = opts.Materials.SetupDecryptMode(reader, objectStat.Metadata.Get(amzHeaderIV), objectStat.Metadata.Get(amzHeaderKey)) - if err != nil { - return nil, ObjectInfo{}, err - } - reader = opts.Materials - } - // do not close body here, caller will close - return reader, objectStat, nil + return resp.Body, objectStat, nil } diff --git a/vendor/github.com/minio/minio-go/api-get-options.go b/vendor/github.com/minio/minio-go/api-get-options.go index dd70415cd..990aa37f6 100644 --- a/vendor/github.com/minio/minio-go/api-get-options.go +++ b/vendor/github.com/minio/minio-go/api-get-options.go @@ -28,9 +28,8 @@ import ( // GetObjectOptions are used to specify additional headers or options // during GET requests. type GetObjectOptions struct { - headers map[string]string - - Materials encrypt.Materials + headers map[string]string + ServerSideEncryption encrypt.ServerSide } // StatObjectOptions are used to specify additional headers or options @@ -45,6 +44,9 @@ func (o GetObjectOptions) Header() http.Header { for k, v := range o.headers { headers.Set(k, v) } + if o.ServerSideEncryption != nil { + o.ServerSideEncryption.Marshal(headers) + } return headers } diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go index a4259c9d7..c9d3e22d7 100644 --- a/vendor/github.com/minio/minio-go/api-get-policy.go +++ b/vendor/github.com/minio/minio-go/api-get-policy.go @@ -65,7 +65,7 @@ func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolic } return map[string]policy.BucketPolicy{}, err } - return policy.GetPolicies(policyInfo.Statements, bucketName), nil + return policy.GetPolicies(policyInfo.Statements, bucketName, objectPrefix), nil } // Default empty bucket access policy. diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go index 578fdea8e..1c01e362b 100644 --- a/vendor/github.com/minio/minio-go/api-notification.go +++ b/vendor/github.com/minio/minio-go/api-notification.go @@ -205,13 +205,11 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { continue } - // Send notifications on channel only if there are events received. - if len(notificationInfo.Records) > 0 { - select { - case notificationInfoCh <- notificationInfo: - case <-doneCh: - return - } + // Send notificationInfo + select { + case notificationInfoCh <- notificationInfo: + case <-doneCh: + return } } // Look for any underlying errors. diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go index 8b0258948..a2c060786 100644 --- a/vendor/github.com/minio/minio-go/api-presigned.go +++ b/vendor/github.com/minio/minio-go/api-presigned.go @@ -119,7 +119,9 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str return nil, nil, err } - u, err = c.makeTargetURL(bucketName, "", location, nil) + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) + + u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/minio/minio-go/api-put-object-context.go b/vendor/github.com/minio/minio-go/api-put-object-context.go index a6f23dcaa..ff4663e2f 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-context.go +++ b/vendor/github.com/minio/minio-go/api-put-object-context.go @@ -29,11 +29,5 @@ func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName if err != nil { return 0, err } - if opts.EncryptMaterials != nil { - if err = opts.EncryptMaterials.SetupEncryptMode(reader); err != nil { - return 0, err - } - return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, opts.EncryptMaterials, opts) - } return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) } diff --git a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go deleted file mode 100644 index 87dd1ab1a..000000000 --- a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - - "github.com/minio/minio-go/pkg/encrypt" -) - -// PutEncryptedObject - Encrypt and store object. -func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int64, err error) { - - if encryptMaterials == nil { - return 0, ErrInvalidArgument("Unable to recognize empty encryption properties") - } - - if err := encryptMaterials.SetupEncryptMode(reader); err != nil { - return 0, err - } - - return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, -1, PutObjectOptions{EncryptMaterials: encryptMaterials}) -} - -// FPutEncryptedObject - Encrypt and store an object with contents from file at filePath. -func (c Client) FPutEncryptedObject(bucketName, objectName, filePath string, encryptMaterials encrypt.Materials) (n int64, err error) { - return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, PutObjectOptions{EncryptMaterials: encryptMaterials}) -} diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go index f5b8893e6..8805ecf96 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go @@ -33,6 +33,7 @@ import ( "strconv" "strings" + "github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/s3utils" ) @@ -138,7 +139,7 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje // Proceed to upload the part. var objPart ObjectPart objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, - md5Base64, sha256Hex, int64(length), opts.UserMetadata) + md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption) if err != nil { return totalUploadedSize, err } @@ -226,11 +227,9 @@ func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectN return initiateMultipartUploadResult, nil } -const serverEncryptionKeyPrefix = "x-amz-server-side-encryption" - // uploadPart - Uploads a part in a multipart upload. func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, - partNumber int, md5Base64, sha256Hex string, size int64, metadata map[string]string) (ObjectPart, error) { + partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectPart{}, err @@ -260,12 +259,8 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID // Set encryption headers, if any. customHeader := make(http.Header) - for k, v := range metadata { - if len(v) > 0 { - if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) { - customHeader.Set(k, v) - } - } + if sse != nil { + sse.Marshal(customHeader) } reqMetadata := requestMetadata{ diff --git a/vendor/github.com/minio/minio-go/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/api-put-object-streaming.go index be1dc57ef..211d1c23c 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/api-put-object-streaming.go @@ -167,7 +167,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa var objPart ObjectPart objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, - "", "", partSize, opts.UserMetadata) + "", "", partSize, opts.ServerSideEncryption) if err != nil { uploadedPartsCh <- uploadedPartRes{ Size: 0, @@ -280,7 +280,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketNa var objPart ObjectPart objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, io.LimitReader(hookReader, partSize), - partNumber, "", "", partSize, opts.UserMetadata) + partNumber, "", "", partSize, opts.ServerSideEncryption) if err != nil { return totalUploadedSize, err } diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go index 6d90eab74..530578277 100644 --- a/vendor/github.com/minio/minio-go/api-put-object.go +++ b/vendor/github.com/minio/minio-go/api-put-object.go @@ -33,15 +33,16 @@ import ( // PutObjectOptions represents options specified by user for PutObject call type PutObjectOptions struct { - UserMetadata map[string]string - Progress io.Reader - ContentType string - ContentEncoding string - ContentDisposition string - CacheControl string - EncryptMaterials encrypt.Materials - NumThreads uint - StorageClass string + UserMetadata map[string]string + Progress io.Reader + ContentType string + ContentEncoding string + ContentDisposition string + ContentLanguage string + CacheControl string + ServerSideEncryption encrypt.ServerSide + NumThreads uint + StorageClass string } // getNumThreads - gets the number of threads to be used in the multipart @@ -71,19 +72,20 @@ func (opts PutObjectOptions) Header() (header http.Header) { if opts.ContentDisposition != "" { header["Content-Disposition"] = []string{opts.ContentDisposition} } + if opts.ContentLanguage != "" { + header["Content-Language"] = []string{opts.ContentLanguage} + } if opts.CacheControl != "" { header["Cache-Control"] = []string{opts.CacheControl} } - if opts.EncryptMaterials != nil { - header[amzHeaderIV] = []string{opts.EncryptMaterials.GetIV()} - header[amzHeaderKey] = []string{opts.EncryptMaterials.GetKey()} - header[amzHeaderMatDesc] = []string{opts.EncryptMaterials.GetDesc()} + if opts.ServerSideEncryption != nil { + opts.ServerSideEncryption.Marshal(header) } if opts.StorageClass != "" { header[amzStorageClass] = []string{opts.StorageClass} } for k, v := range opts.UserMetadata { - if !isAmzHeader(k) && !isStandardHeader(k) && !isSSEHeader(k) && !isStorageClassHeader(k) { + if !isAmzHeader(k) && !isStandardHeader(k) && !isStorageClassHeader(k) { header["X-Amz-Meta-"+k] = []string{v} } else { header[k] = []string{v} @@ -92,11 +94,10 @@ func (opts PutObjectOptions) Header() (header http.Header) { return } -// validate() checks if the UserMetadata map has standard headers or client side -// encryption headers and raises an error if so. +// validate() checks if the UserMetadata map has standard headers or and raises an error if so. func (opts PutObjectOptions) validate() (err error) { for k, v := range opts.UserMetadata { - if !httplex.ValidHeaderFieldName(k) || isStandardHeader(k) || isCSEHeader(k) || isStorageClassHeader(k) { + if !httplex.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { return ErrInvalidArgument(k + " unsupported user defined metadata name") } if !httplex.ValidHeaderFieldValue(v) { @@ -217,7 +218,7 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName // Proceed to upload the part. var objPart ObjectPart objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, - "", "", int64(length), opts.UserMetadata) + "", "", int64(length), opts.ServerSideEncryption) if err != nil { return totalUploadedSize, err } diff --git a/vendor/github.com/minio/minio-go/api-put-object_test.go b/vendor/github.com/minio/minio-go/api-put-object_test.go index 2b8c1e2bb..d96abab9a 100644 --- a/vendor/github.com/minio/minio-go/api-put-object_test.go +++ b/vendor/github.com/minio/minio-go/api-put-object_test.go @@ -40,6 +40,7 @@ func TestPutObjectOptionsValidate(t *testing.T) { {"Content-Encoding", "gzip", false}, {"Cache-Control", "blah", false}, {"Content-Disposition", "something", false}, + {"Content-Language", "somelanguage", false}, // Valid metadata names. {"my-custom-header", "blah", true}, diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go index f14b2eb7f..824d6ee87 100644 --- a/vendor/github.com/minio/minio-go/api-remove.go +++ b/vendor/github.com/minio/minio-go/api-remove.go @@ -129,10 +129,8 @@ func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh } } -// RemoveObjects remove multiples objects from a bucket. -// The list of objects to remove are received from objectsCh. -// Remove failures are sent back via error channel. -func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { +// RemoveObjectsWithContext - Identical to RemoveObjects call, but accepts context to facilitate request cancellation. +func (c Client) RemoveObjectsWithContext(ctx context.Context, bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { errorCh := make(chan RemoveObjectError, 1) // Validate if bucket name is valid. @@ -189,7 +187,7 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan // Generate remove multi objects XML request removeBytes := generateRemoveMultiObjectsRequest(batch) // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "POST", requestMetadata{ + resp, err := c.executeMethod(ctx, "POST", requestMetadata{ bucketName: bucketName, queryValues: urlValues, contentBody: bytes.NewReader(removeBytes), @@ -213,6 +211,13 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan return errorCh } +// RemoveObjects removes multiple objects from a bucket. +// The list of objects to remove are received from objectsCh. +// Remove failures are sent back via error channel. +func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { + return c.RemoveObjectsWithContext(context.Background(), bucketName, objectsCh) +} + // RemoveIncompleteUpload aborts an partially uploaded object. func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { // Input validation. diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go index 8904dd678..5356f8a4f 100644 --- a/vendor/github.com/minio/minio-go/api-stat.go +++ b/vendor/github.com/minio/minio-go/api-stat.go @@ -115,7 +115,7 @@ func (c Client) statObject(ctx context.Context, bucketName, objectName string, o return ObjectInfo{}, err } if resp != nil { - if resp.StatusCode != http.StatusOK { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) } } diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go index 681853849..2288b5eac 100644 --- a/vendor/github.com/minio/minio-go/api.go +++ b/vendor/github.com/minio/minio-go/api.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. + * Copyright 2015-2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -81,12 +81,25 @@ type Client struct { // Random seed. random *rand.Rand + + // lookup indicates type of url lookup supported by server. If not specified, + // default to Auto. + lookup BucketLookupType +} + +// Options for New method +type Options struct { + Creds *credentials.Credentials + Secure bool + Region string + BucketLookup BucketLookupType + // Add future fields here } // Global constants. const ( libraryName = "minio-go" - libraryVersion = "4.0.6" + libraryVersion = "5.0.0" ) // User Agent should always following the below style. @@ -98,11 +111,21 @@ const ( libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion ) +// BucketLookupType is type of url lookup supported by server. +type BucketLookupType int + +// Different types of url lookup supported by the server.Initialized to BucketLookupAuto +const ( + BucketLookupAuto BucketLookupType = iota + BucketLookupDNS + BucketLookupPath +) + // NewV2 - instantiate minio client with Amazon S3 signature version // '2' compatibility. func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "") + clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) if err != nil { return nil, err } @@ -114,7 +137,7 @@ func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (* // '4' compatibility. func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "") + clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) if err != nil { return nil, err } @@ -125,7 +148,7 @@ func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (* // New - instantiate minio client, adds automatic verification of signature. func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "") + clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) if err != nil { return nil, err } @@ -144,7 +167,7 @@ func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, e // for retrieving credentials from various credentials provider such as // IAM, File, Env etc. func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { - return privateNew(endpoint, creds, secure, region) + return privateNew(endpoint, creds, secure, region, BucketLookupAuto) } // NewWithRegion - instantiate minio client, with region configured. Unlike New(), @@ -152,7 +175,12 @@ func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure // Use this function when if your application deals with single region. func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) { creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - return privateNew(endpoint, creds, secure, region) + return privateNew(endpoint, creds, secure, region, BucketLookupAuto) +} + +// NewWithOptions - instantiate minio client with options +func NewWithOptions(endpoint string, opts *Options) (*Client, error) { + return privateNew(endpoint, opts.Creds, opts.Secure, opts.Region, opts.BucketLookup) } // lockedRandSource provides protected rand source, implements rand.Source interface. @@ -230,8 +258,7 @@ func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error { } switch { case signerType.IsV2(): - // Add signature version '2' authorization header. - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey) + return errors.New("signature V2 cannot support redirection") case signerType.IsV4(): req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region)) } @@ -239,7 +266,7 @@ func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error { return nil } -func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { +func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string, lookup BucketLookupType) (*Client, error) { // construct endpoint. endpointURL, err := getEndpointURL(endpoint, secure) if err != nil { @@ -260,7 +287,7 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re // Instantiate http client and bucket location cache. clnt.httpClient = &http.Client{ - Transport: defaultMinioTransport, + Transport: DefaultTransport, CheckRedirect: clnt.redirectHeaders, } @@ -276,6 +303,9 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re // Introduce a new locked random seed. clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) + // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined + // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. + clnt.lookup = lookup // Return. return clnt, nil } @@ -307,7 +337,7 @@ func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { // TLSClientConfig: &tls.Config{RootCAs: pool}, // DisableCompression: true, // } - // api.SetTransport(tr) + // api.SetCustomTransport(tr) // if c.httpClient != nil { c.httpClient.Transport = customHTTPTransport @@ -663,8 +693,11 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R } } + // Look if target url supports virtual host. + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) + // Construct a new target URL. - targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, metadata.queryValues) + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, isVirtualHost, metadata.queryValues) if err != nil { return nil, err } @@ -706,7 +739,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R } if signerType.IsV2() { // Presign URL with signature v2. - req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires) + req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) } else if signerType.IsV4() { // Presign URL with signature v4. req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) @@ -752,7 +785,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R switch { case signerType.IsV2(): // Add signature version '2' authorization header. - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey) + req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure: // Streaming signature is used by default for a PUT object request. Additionally we also // look if the initialized client is secure, if yes then we don't need to perform @@ -784,7 +817,7 @@ func (c Client) setUserAgent(req *http.Request) { } // makeTargetURL make a new target url. -func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) { +func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { host := c.endpointURL.Host // For Amazon S3 endpoint, try to fetch location based endpoint. if s3utils.IsAmazonEndpoint(*c.endpointURL) { @@ -823,9 +856,6 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que // Make URL only if bucketName is available, otherwise use the // endpoint URL. if bucketName != "" { - // Save if target url will have buckets which suppport virtual host. - isVirtualHostStyle := s3utils.IsVirtualHostSupported(*c.endpointURL, bucketName) - // If endpoint supports virtual host style use that always. // Currently only S3 and Google Cloud Storage would support // virtual host style. @@ -850,3 +880,21 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que return url.Parse(urlStr) } + +// returns true if virtual hosted style requests are to be used. +func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { + if bucketName == "" { + return false + } + + if c.lookup == BucketLookupDNS { + return true + } + if c.lookup == BucketLookupPath { + return false + } + + // default to virtual only for Amazon/Google storage. In all other cases use + // path style requests + return s3utils.IsVirtualHostSupported(url, bucketName) +} diff --git a/vendor/github.com/minio/minio-go/api_unit_test.go b/vendor/github.com/minio/minio-go/api_unit_test.go index ee0b54f5c..d7822ab5e 100644 --- a/vendor/github.com/minio/minio-go/api_unit_test.go +++ b/vendor/github.com/minio/minio-go/api_unit_test.go @@ -190,7 +190,8 @@ func TestMakeTargetURL(t *testing.T) { for i, testCase := range testCases { // Initialize a Minio client c, _ := New(testCase.addr, "foo", "bar", testCase.secure) - u, err := c.makeTargetURL(testCase.bucketName, testCase.objectName, testCase.bucketLocation, testCase.queryValues) + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, testCase.bucketName) + u, err := c.makeTargetURL(testCase.bucketName, testCase.objectName, testCase.bucketLocation, isVirtualHost, testCase.queryValues) // Check the returned error if testCase.expectedErr == nil && err != nil { t.Fatalf("Test %d: Should succeed but failed with err = %v", i+1, err) diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml index b93b4d45d..aa9f840e5 100644 --- a/vendor/github.com/minio/minio-go/appveyor.yml +++ b/vendor/github.com/minio/minio-go/appveyor.yml @@ -19,6 +19,7 @@ install: - go get -u github.com/golang/lint/golint - go get -u github.com/remyoudompheng/go-misc/deadcode - go get -u github.com/gordonklaus/ineffassign + - go get -u golang.org/x/crypto/argon2 - go get -t ./... # to run your custom scripts instead of automatic MSBuild diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go index 5d56cdf42..cac7ad792 100644 --- a/vendor/github.com/minio/minio-go/bucket-cache.go +++ b/vendor/github.com/minio/minio-go/bucket-cache.go @@ -203,7 +203,9 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro } if signerType.IsV2() { - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey) + // Get Bucket Location calls should be always path style + isVirtualHost := false + req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) return req, nil } diff --git a/vendor/github.com/minio/minio-go/bucket-cache_test.go b/vendor/github.com/minio/minio-go/bucket-cache_test.go index fd7e7f344..507b40862 100644 --- a/vendor/github.com/minio/minio-go/bucket-cache_test.go +++ b/vendor/github.com/minio/minio-go/bucket-cache_test.go @@ -122,7 +122,7 @@ func TestGetBucketLocationRequest(t *testing.T) { req.Header.Set("X-Amz-Content-Sha256", contentSha256) req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") case signerType.IsV2(): - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey) + req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, false) } return req, nil diff --git a/vendor/github.com/minio/minio-go/bucket-notification.go b/vendor/github.com/minio/minio-go/bucket-notification.go index 1b9d6a0c7..ea303dd9d 100644 --- a/vendor/github.com/minio/minio-go/bucket-notification.go +++ b/vendor/github.com/minio/minio-go/bucket-notification.go @@ -19,7 +19,8 @@ package minio import ( "encoding/xml" - "reflect" + + "github.com/minio/minio-go/pkg/set" ) // NotificationEventType is a S3 notification event associated to the bucket notification configuration @@ -96,7 +97,7 @@ type NotificationConfig struct { // NewNotificationConfig creates one notification config and sets the given ARN func NewNotificationConfig(arn Arn) NotificationConfig { - return NotificationConfig{Arn: arn} + return NotificationConfig{Arn: arn, Filter: &Filter{}} } // AddEvents adds one event to the current notification config @@ -163,39 +164,79 @@ type BucketNotification struct { } // AddTopic adds a given topic config to the general bucket notification config -func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) { +func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) bool { newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()} for _, n := range b.TopicConfigs { - if reflect.DeepEqual(n, newTopicConfig) { - // Avoid adding duplicated entry - return + // If new config matches existing one + if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range topicConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } } } b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) + return true } // AddQueue adds a given queue config to the general bucket notification config -func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) { +func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) bool { newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()} for _, n := range b.QueueConfigs { - if reflect.DeepEqual(n, newQueueConfig) { - // Avoid adding duplicated entry - return + if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range queueConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } } } b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) + return true } // AddLambda adds a given lambda config to the general bucket notification config -func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) { +func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) bool { newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()} for _, n := range b.LambdaConfigs { - if reflect.DeepEqual(n, newLambdaConfig) { - // Avoid adding duplicated entry - return + if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range lambdaConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } } } b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) + return true } // RemoveTopicByArn removes all topic configurations that match the exact specified ARN diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go index 84b6cfdf3..7db5a99af 100644 --- a/vendor/github.com/minio/minio-go/constants.go +++ b/vendor/github.com/minio/minio-go/constants.go @@ -59,12 +59,5 @@ const ( iso8601DateFormat = "20060102T150405Z" ) -// Encryption headers stored along with the object. -const ( - amzHeaderIV = "X-Amz-Meta-X-Amz-Iv" - amzHeaderKey = "X-Amz-Meta-X-Amz-Key" - amzHeaderMatDesc = "X-Amz-Meta-X-Amz-Matdesc" -) - // Storage class header constant. const amzStorageClass = "X-Amz-Storage-Class" diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go index 4245fc065..44475ecde 100644 --- a/vendor/github.com/minio/minio-go/core.go +++ b/vendor/github.com/minio/minio-go/core.go @@ -78,6 +78,8 @@ func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Ba opts.ContentEncoding = v } else if strings.ToLower(k) == "content-disposition" { opts.ContentDisposition = v + } else if strings.ToLower(k) == "content-language" { + opts.ContentLanguage = v } else if strings.ToLower(k) == "content-type" { opts.ContentType = v } else if strings.ToLower(k) == "cache-control" { @@ -103,13 +105,7 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de // PutObjectPart - Upload an object part. func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string) (ObjectPart, error) { - return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, data, size, md5Base64, sha256Hex, nil) -} - -// PutObjectPartWithMetadata - upload an object part with additional request metadata. -func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, data io.Reader, - size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectPart, error) { - return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, metadata) + return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, nil) } // ListObjectParts - List uploaded parts of an incomplete upload.x diff --git a/vendor/github.com/minio/minio-go/docs/API.md b/vendor/github.com/minio/minio-go/docs/API.md index 33b8c5891..af86087cb 100644 --- a/vendor/github.com/minio/minio-go/docs/API.md +++ b/vendor/github.com/minio/minio-go/docs/API.md @@ -52,22 +52,23 @@ func main() { | Bucket operations | Object operations | Encrypted Object operations | Presigned operations | Bucket Policy/Notification Operations | Client custom settings | | :--- | :--- | :--- | :--- | :--- | :--- | -| [`MakeBucket`](#MakeBucket) | [`GetObject`](#GetObject) | [`NewSymmetricKey`](#NewSymmetricKey) | [`PresignedGetObject`](#PresignedGetObject) | [`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) | -| [`ListBuckets`](#ListBuckets) | [`PutObject`](#PutObject) | [`NewAsymmetricKey`](#NewAsymmetricKey) | [`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) | -| [`BucketExists`](#BucketExists) | [`CopyObject`](#CopyObject) | [`GetEncryptedObject`](#GetEncryptedObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) | -| [`RemoveBucket`](#RemoveBucket) | [`StatObject`](#StatObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) | -| [`ListObjects`](#ListObjects) | [`RemoveObject`](#RemoveObject) | [`NewSSEInfo`](#NewSSEInfo) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) | -| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | [`FPutEncryptedObject`](#FPutEncryptedObject) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | | +| [`MakeBucket`](#MakeBucket) | [`GetObject`](#GetObject) | [`GetObject`](#GetObject) | [`PresignedGetObject`](#PresignedGetObject) | [`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) | +| [`ListBuckets`](#ListBuckets) | [`PutObject`](#PutObject) | [`PutObject`](#PutObject) | [`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) | +| [`BucketExists`](#BucketExists) | [`CopyObject`](#CopyObject) | [`CopyObject`](#CopyObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) | +| [`RemoveBucket`](#RemoveBucket) | [`StatObject`](#StatObject) | [`StatObject`](#StatObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) | +| [`ListObjects`](#ListObjects) | [`RemoveObject`](#RemoveObject) | | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) | +| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | | | [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`ListenBucketNotification`](#ListenBucketNotification) | | -| | [`FPutObject`](#FPutObject) | | | | | -| | [`FGetObject`](#FGetObject) | | | | | -| | [`ComposeObject`](#ComposeObject) | | | | | -| | [`NewSourceInfo`](#NewSourceInfo) | | | | | -| | [`NewDestinationInfo`](#NewDestinationInfo) | | | | | -| | [`PutObjectWithContext`](#PutObjectWithContext) | | | | -| | [`GetObjectWithContext`](#GetObjectWithContext) | | | | -| | [`FPutObjectWithContext`](#FPutObjectWithContext) | | | | -| | [`FGetObjectWithContext`](#FGetObjectWithContext) | | | | +| | [`FPutObject`](#FPutObject) | [`FPutObject`](#FPutObject) | | | | +| | [`FGetObject`](#FGetObject) | [`FGetObject`](#FGetObject) | | | | +| | [`ComposeObject`](#ComposeObject) | [`ComposeObject`](#ComposeObject) | | | | +| | [`NewSourceInfo`](#NewSourceInfo) | [`NewSourceInfo`](#NewSourceInfo) | | | | +| | [`NewDestinationInfo`](#NewDestinationInfo) | [`NewDestinationInfo`](#NewDestinationInfo) | | | | +| | [`PutObjectWithContext`](#PutObjectWithContext) | [`PutObjectWithContext`](#PutObjectWithContext) | | | +| | [`GetObjectWithContext`](#GetObjectWithContext) | [`GetObjectWithContext`](#GetObjectWithContext) | | | +| | [`FPutObjectWithContext`](#FPutObjectWithContext) | [`FPutObjectWithContext`](#FPutObjectWithContext) | | | +| | [`FGetObjectWithContext`](#FGetObjectWithContext) | [`FGetObjectWithContext`](#FGetObjectWithContext) | | | +| | [`RemoveObjectsWithContext`](#RemoveObjectsWithContext) | | | | ## 1. Constructor @@ -86,16 +87,27 @@ __Parameters__ ### NewWithRegion(endpoint, accessKeyID, secretAccessKey string, ssl bool, region string) (*Client, error) Initializes minio client, with region configured. Unlike New(), NewWithRegion avoids bucket-location lookup operations and it is slightly faster. Use this function when your application deals with a single region. +### NewWithOptions(endpoint string, options *Options) (*Client, error) +Initializes minio client with options configured. + __Parameters__ |Param |Type |Description | |:---|:---| :---| |`endpoint` | _string_ |S3 compatible object storage endpoint | -|`accessKeyID` |_string_ |Access key for the object storage | -|`secretAccessKey` | _string_ |Secret key for the object storage | -|`ssl` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise | -|`region`| _string_ | Region for the object storage | +|`opts` |_minio.Options_ | Options for constructing a new client| +__minio.Options__ + +|Field | Type | Description | +|:--- |:--- | :--- | +| `opts.Creds` | _*credentials.Credentials_ | Access Credentials| +| `opts.Secure` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise | +| `opts.Region` | _string_ | region | +| `opts.BucketLookup` | _BucketLookupType_ | Bucket lookup type can be one of the following values | +| | | _minio.BucketLookupDNS_ | +| | | _minio.BucketLookupPath_ | +| | | _minio.BucketLookupAuto_ | ## 2. Bucket operations @@ -380,7 +392,7 @@ __minio.GetObjectOptions__ |Field | Type | Description | |:---|:---|:---| -| `opts.Materials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) | +| `opts.ServerSideEncryption` | _encrypt.ServerSide_ | Interface provided by `encrypt` package to specify server-side-encryption. (For more information see https://godoc.org/github.com/minio/minio-go) | __Return Value__ @@ -513,42 +525,6 @@ if err != nil { } ``` - -### FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error -Identical to FGetObject operation, but decrypts an encrypted request - -__Parameters__ - - -|Param |Type |Description | -|:---|:---| :---| -|`bucketName` | _string_ |Name of the bucket | -|`objectName` | _string_ |Name of the object | -|`filePath` | _string_ |Path to download object to | -|`materials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) | - - -__Example__ - - -```go -// Generate a master symmetric key -key := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) - -// Build the CBC encryption material -cbcMaterials, err := encrypt.NewCBCSecureMaterials(key) -if err != nil { - fmt.Println(err) - return -} - -err = minioClient.FGetEncryptedObject("mybucket", "myobject", "/tmp/myobject", cbcMaterials) -if err != nil { - fmt.Println(err) - return -} -``` - ### PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,opts PutObjectOptions) (n int, err error) Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object as parts of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB. @@ -573,8 +549,9 @@ __minio.PutObjectOptions__ | `opts.ContentType` | _string_ | Content type of object, e.g "application/text" | | `opts.ContentEncoding` | _string_ | Content encoding of object, e.g "gzip" | | `opts.ContentDisposition` | _string_ | Content disposition of object, "inline" | +| `opts.ContentLanguage` | _string_ | Content language of object, e.g "French" | | `opts.CacheControl` | _string_ | Used to specify directives for caching mechanisms in both requests and responses e.g "max-age=600"| -| `opts.EncryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) | +| `opts.ServerSideEncryption` | _encrypt.ServerSide_ | Interface provided by `encrypt` package to specify server-side-encryption. (For more information see https://godoc.org/github.com/minio/minio-go) | | `opts.StorageClass` | _string_ | Specify storage class for the object. Supported values for Minio server are `REDUCED_REDUNDANCY` and `STANDARD` | __Example__ @@ -618,7 +595,7 @@ __Parameters__ |`objectName` | _string_ |Name of the object | |`reader` | _io.Reader_ |Any Go type that implements io.Reader | |`objectSize`| _int64_ | size of the object being uploaded. Pass -1 if stream size is unknown | -|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding,content-disposition and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. | +|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding, content-disposition, content-language and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. | __Example__ @@ -746,27 +723,27 @@ __Example__ ```go // Prepare source decryption key (here we assume same key to // decrypt all source objects.) -decKey := minio.NewSSEInfo([]byte{1, 2, 3}, "") +sseSrc := encrypt.DefaultPBKDF([]byte("password"), []byte("salt")) // Source objects to concatenate. We also specify decryption // key for each -src1 := minio.NewSourceInfo("bucket1", "object1", &decKey) +src1 := minio.NewSourceInfo("bucket1", "object1", sseSrc) src1.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a") -src2 := minio.NewSourceInfo("bucket2", "object2", &decKey) +src2 := minio.NewSourceInfo("bucket2", "object2", sseSrc) src2.SetMatchETagCond("f8ef9c385918b653a31624deb84149d2") -src3 := minio.NewSourceInfo("bucket3", "object3", &decKey) +src3 := minio.NewSourceInfo("bucket3", "object3", sseSrc) src3.SetMatchETagCond("5918b653a31624deb84149d2f8ef9c38") // Create slice of sources. srcs := []minio.SourceInfo{src1, src2, src3} // Prepare destination encryption key -encKey := minio.NewSSEInfo([]byte{8, 9, 0}, "") +sseDst := encrypt.DefaultPBKDF([]byte("new-password"), []byte("new-salt")) // Create destination info -dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil) +dst, err := minio.NewDestinationInfo("bucket", "object", sseDst, nil) if err != nil { fmt.Println(err) return @@ -792,7 +769,7 @@ __Parameters__ | :--- | :--- | :--- | | `bucket` | _string_ | Name of the source bucket | | `object` | _string_ | Name of the source object | -| `decryptSSEC` | _*minio.SSEInfo_ | Decryption info for the source object (`nil` without encryption) | +| `sse` | _*encrypt.ServerSide_ | Interface provided by `encrypt` package to specify server-side-encryption. (For more information see https://godoc.org/github.com/minio/minio-go) | __Example__ @@ -817,8 +794,8 @@ if err != nil { ```go // With decryption parameter. -decKey := minio.NewSSEInfo([]byte{1,2,3}, "") -src := minio.NewSourceInfo("bucket", "object", &decKey) +sseSrc := encrypt.DefaultPBKDF([]byte("password"), []byte("salt")) +src := minio.NewSourceInfo("bucket", "object", sseSrc) // Destination object dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil) @@ -845,7 +822,7 @@ __Parameters__ | :--- | :--- | :--- | | `bucket` | _string_ | Name of the destination bucket | | `object` | _string_ | Name of the destination object | -| `encryptSSEC` | _*minio.SSEInfo_ | Encryption info for the source object (`nil` without encryption) | +| `sse` | _*encrypt.ServerSide_ | Interface provided by `encrypt` package to specify server-side-encryption. (For more information see https://godoc.org/github.com/minio/minio-go) | | | `userMeta` | _map[string]string_ | User metadata to be set on the destination. If nil, with only one source, user-metadata is copied from source. | __Example__ @@ -871,8 +848,8 @@ if err != nil { src := minio.NewSourceInfo("bucket", "object", nil) // With encryption parameter. -encKey := minio.NewSSEInfo([]byte{1,2,3}, "") -dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil) +sseDst := encrypt.DefaultPBKDF([]byte("password"), []byte("salt")) +dst, err := minio.NewDestinationInfo("bucket", "object", sseDst, nil) if err != nil { fmt.Println(err) return @@ -900,7 +877,7 @@ __Parameters__ |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | |`filePath` | _string_ |Path to file to be uploaded | -|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding,content-disposition and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. | +|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding, content-disposition, content-language and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. | __Example__ @@ -1050,6 +1027,47 @@ for rErr := range minioClient.RemoveObjects("mybucket", objectsCh) { } ``` + +### RemoveObjectsWithContext(ctx context.Context, bucketName string, objectsCh chan string) (errorCh <-chan RemoveObjectError) +*Identical to RemoveObjects operation, but accepts a context for request cancellation.* + +Parameters + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ |Request context | +|`bucketName` | _string_ |Name of the bucket | +|`objectsCh` | _chan string_ | Channel of objects to be removed | + + +__Return Values__ + +|Param |Type |Description | +|:---|:---| :---| +|`errorCh` | _<-chan minio.RemoveObjectError_ | Receive-only channel of errors observed during deletion. | + +```go +objectsCh := make(chan string) +ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second) +defer cancel() + +// Send object names that are needed to be removed to objectsCh +go func() { + defer close(objectsCh) + // List all objects from a bucket-name with a matching prefix. + for object := range minioClient.ListObjects("my-bucketname", "my-prefixname", true, nil) { + if object.Err != nil { + log.Fatalln(object.Err) + } + objectsCh <- object.Key + } +}() + +for rErr := range minioClient.RemoveObjects(ctx, "my-bucketname", objectsCh) { + fmt.Println("Error detected during deletion: ", rErr) +} +``` + ### RemoveIncompleteUpload(bucketName, objectName string) error Removes a partially uploaded object. @@ -1073,283 +1091,6 @@ if err != nil { } ``` -## 4. Encrypted object operations - - -### NewSymmetricKey(key []byte) *encrypt.SymmetricKey - -__Parameters__ - -|Param |Type |Description | -|:---|:---| :---| -|`key` | _string_ |Name of the bucket | - - -__Return Value__ - -|Param |Type |Description | -|:---|:---| :---| -|`symmetricKey` | _*encrypt.SymmetricKey_ | represents a symmetric key structure which can be used to encrypt and decrypt data | - -```go -symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) - -// Build the CBC encryption material with symmetric key. -cbcMaterials, err := encrypt.NewCBCSecureMaterials(symKey) -if err != nil { - fmt.Println(err) - return -} -fmt.Println("Successfully initialized Symmetric key CBC materials", cbcMaterials) - -object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials) -if err != nil { - fmt.Println(err) - return -} -defer object.Close() -``` - - -### NewAsymmetricKey(privateKey []byte, publicKey[]byte) (*encrypt.AsymmetricKey, error) - -__Parameters__ - -|Param |Type |Description | -|:---|:---| :---| -|`privateKey` | _[]byte_ | Private key data | -|`publicKey` | _[]byte_ | Public key data | - - -__Return Value__ - -|Param |Type |Description | -|:---|:---| :---| -|`asymmetricKey` | _*encrypt.AsymmetricKey_ | represents an asymmetric key structure which can be used to encrypt and decrypt data | -|`err` | _error_ | Standard Error | - - -```go -privateKey, err := ioutil.ReadFile("private.key") -if err != nil { - fmt.Println(err) - return -} - -publicKey, err := ioutil.ReadFile("public.key") -if err != nil { - fmt.Println(err) - return -} - -// Initialize the asymmetric key -asymmetricKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey) -if err != nil { - fmt.Println(err) - return -} - -// Build the CBC encryption material for asymmetric key. -cbcMaterials, err := encrypt.NewCBCSecureMaterials(asymmetricKey) -if err != nil { - fmt.Println(err) - return -} -fmt.Println("Successfully initialized Asymmetric key CBC materials", cbcMaterials) - -object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials) -if err != nil { - fmt.Println(err) - return -} -defer object.Close() -``` - - -### GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) - -Returns the decrypted stream of the object data based of the given encryption materials. Most of the common errors occur when reading the stream. - -__Parameters__ - -|Param |Type |Description | -|:---|:---| :---| -|`bucketName` | _string_ | Name of the bucket | -|`objectName` | _string_ | Name of the object | -|`encryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) | - - -__Return Value__ - -|Param |Type |Description | -|:---|:---| :---| -|`stream` | _io.ReadCloser_ | Returns the deciphered object reader, caller should close after reading. | -|`err` | _error | Returns errors. | - - -__Example__ - - -```go -// Generate a master symmetric key -key := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) - -// Build the CBC encryption material -cbcMaterials, err := encrypt.NewCBCSecureMaterials(key) -if err != nil { - fmt.Println(err) - return -} - -object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials) -if err != nil { - fmt.Println(err) - return -} -defer object.Close() - -localFile, err := os.Create("/tmp/local-file.jpg") -if err != nil { - fmt.Println(err) - return -} -defer localFile.Close() - -if _, err = io.Copy(localFile, object); err != nil { - fmt.Println(err) - return -} -``` - - - -### PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int, err error) -Encrypt and upload an object. - -__Parameters__ - -|Param |Type |Description | -|:---|:---| :---| -|`bucketName` | _string_ |Name of the bucket | -|`objectName` | _string_ |Name of the object | -|`reader` | _io.Reader_ |Any Go type that implements io.Reader | -|`encryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) | - -__Example__ - -```go -// Load a private key -privateKey, err := ioutil.ReadFile("private.key") -if err != nil { - fmt.Println(err) - return -} - -// Load a public key -publicKey, err := ioutil.ReadFile("public.key") -if err != nil { - fmt.Println(err) - return -} - -// Build an asymmetric key -key, err := encrypt.NewAsymmetricKey(privateKey, publicKey) -if err != nil { - fmt.Println(err) - return -} - -// Build the CBC encryption module -cbcMaterials, err := encrypt.NewCBCSecureMaterials(key) -if err != nil { - fmt.Println(err) - return -} - -// Open a file to upload -file, err := os.Open("my-testfile") -if err != nil { - fmt.Println(err) - return -} -defer file.Close() - -// Upload the encrypted form of the file -n, err := minioClient.PutEncryptedObject("mybucket", "myobject", file, cbcMaterials) -if err != nil { - fmt.Println(err) - return -} -fmt.Println("Successfully uploaded encrypted bytes: ", n) -``` - - -### FPutEncryptedObject(bucketName, objectName, filePath, encryptMaterials encrypt.Materials) (n int, err error) -Encrypt and upload an object from a file. - -__Parameters__ - - -|Param |Type |Description | -|:---|:---| :---| -|`bucketName` | _string_ |Name of the bucket | -|`objectName` | _string_ |Name of the object | -|`filePath` | _string_ |Path to file to be uploaded | -|`encryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go)The module that encrypts data | - -__Example__ - - -```go -// Load a private key -privateKey, err := ioutil.ReadFile("private.key") -if err != nil { - fmt.Println(err) - return -} - -// Load a public key -publicKey, err := ioutil.ReadFile("public.key") -if err != nil { - fmt.Println(err) - return -} - -// Build an asymmetric key -key, err := encrypt.NewAsymmetricKey(privateKey, publicKey) -if err != nil { - fmt.Println(err) - return -} - -// Build the CBC encryption module -cbcMaterials, err := encrypt.NewCBCSecureMaterials(key) -if err != nil { - fmt.Println(err) - return -} - -n, err := minioClient.FPutEncryptedObject("mybucket", "myobject.csv", "/tmp/otherobject.csv", cbcMaterials) -if err != nil { - fmt.Println(err) - return -} -fmt.Println("Successfully uploaded encrypted bytes: ", n) -``` - - - -### NewSSEInfo(key []byte, algo string) SSEInfo -Create a key object for use as encryption or decryption parameter in operations involving server-side-encryption with customer provided key (SSE-C). - -__Parameters__ - -| Param | Type | Description | -| :--- | :--- | :--- | -| `key` | _[]byte_ | Byte-slice of the raw, un-encoded binary key | -| `algo` | _string_ | Algorithm to use in encryption or decryption with the given key. Can be empty (defaults to `AES256`) | - - ## 5. Presigned operations diff --git a/vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go b/vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go index 96eec7e8f..5da9f9d71 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go +++ b/vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go @@ -22,8 +22,9 @@ package main import ( "log" - "github.com/minio/minio-go" "github.com/minio/minio-go/pkg/encrypt" + + "github.com/minio/minio-go" ) func main() { @@ -40,38 +41,16 @@ func main() { log.Fatalln(err) } - // Specify a local file that we will upload - filePath := "my-testfile" + filePath := "my-testfile" // Specify a local file that we will upload + bucketname := "my-bucketname" // Specify a bucket name - the bucket must already exist + objectName := "my-objectname" // Specify a object name + password := "correct horse battery staple" // Specify your password. DO NOT USE THIS ONE - USE YOUR OWN. - //// Build an asymmetric key from private and public files - // - // privateKey, err := ioutil.ReadFile("private.key") - // if err != nil { - // t.Fatal(err) - // } - // - // publicKey, err := ioutil.ReadFile("public.key") - // if err != nil { - // t.Fatal(err) - // } - // - // asymmetricKey, err := NewAsymmetricKey(privateKey, publicKey) - // if err != nil { - // t.Fatal(err) - // } - //// - - // Build a symmetric key - symmetricKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) - - // Build encryption materials which will encrypt uploaded data - cbcMaterials, err := encrypt.NewCBCSecureMaterials(symmetricKey) - if err != nil { - log.Fatalln(err) - } + // New SSE-C where the cryptographic key is derived from a password and the objectname + bucketname as salt + encryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketname+objectName)) // Encrypt file content and upload to the server - n, err := s3Client.FPutEncryptedObject("my-bucketname", "my-objectname", filePath, cbcMaterials) + n, err := s3Client.FPutObject(bucketname, objectName, filePath, minio.PutObjectOptions{ServerSideEncryption: encryption}) if err != nil { log.Fatalln(err) } diff --git a/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go b/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go index 9783bebe8..62a06d59f 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go +++ b/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go @@ -42,35 +42,15 @@ func main() { log.Fatalln(err) } - //// Build an asymmetric key from private and public files - // - // privateKey, err := ioutil.ReadFile("private.key") - // if err != nil { - // t.Fatal(err) - // } - // - // publicKey, err := ioutil.ReadFile("public.key") - // if err != nil { - // t.Fatal(err) - // } - // - // asymmetricKey, err := NewAsymmetricKey(privateKey, publicKey) - // if err != nil { - // t.Fatal(err) - // } - //// + bucketname := "my-bucketname" // Specify a bucket name - the bucket must already exist + objectName := "my-objectname" // Specify a object name - the object must already exist + password := "correct horse battery staple" // Specify your password. DO NOT USE THIS ONE - USE YOUR OWN. - // Build a symmetric key - symmetricKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) + // New SSE-C where the cryptographic key is derived from a password and the objectname + bucketname as salt + encryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketname+objectName)) - // Build encryption materials which will encrypt uploaded data - cbcMaterials, err := encrypt.NewCBCSecureMaterials(symmetricKey) - if err != nil { - log.Fatalln(err) - } - - // Get a deciphered data from the server, deciphering is assured by cbcMaterials - reader, err := s3Client.GetEncryptedObject("my-bucketname", "my-objectname", cbcMaterials) + // Get the encrypted object + reader, err := s3Client.GetObject(bucketname, objectName, minio.GetObjectOptions{ServerSideEncryption: encryption}) if err != nil { log.Fatalln(err) } diff --git a/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go b/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go index cdf09ac53..48b93671e 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go +++ b/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go @@ -41,42 +41,30 @@ func main() { log.Fatalln(err) } + filePath := "my-testfile" // Specify a local file that we will upload + // Open a local file that we will upload - file, err := os.Open("my-testfile") + file, err := os.Open(filePath) if err != nil { log.Fatalln(err) } defer file.Close() - //// Build an asymmetric key from private and public files - // - // privateKey, err := ioutil.ReadFile("private.key") - // if err != nil { - // t.Fatal(err) - // } - // - // publicKey, err := ioutil.ReadFile("public.key") - // if err != nil { - // t.Fatal(err) - // } - // - // asymmetricKey, err := NewAsymmetricKey(privateKey, publicKey) - // if err != nil { - // t.Fatal(err) - // } - //// - - // Build a symmetric key - symmetricKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) - - // Build encryption materials which will encrypt uploaded data - cbcMaterials, err := encrypt.NewCBCSecureMaterials(symmetricKey) + // Get file stats. + fstat, err := file.Stat() if err != nil { log.Fatalln(err) } + bucketname := "my-bucketname" // Specify a bucket name - the bucket must already exist + objectName := "my-objectname" // Specify a object name + password := "correct horse battery staple" // Specify your password. DO NOT USE THIS ONE - USE YOUR OWN. + + // New SSE-C where the cryptographic key is derived from a password and the objectname + bucketname as salt + encryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketname+objectName)) + // Encrypt file content and upload to the server - n, err := s3Client.PutEncryptedObject("my-bucketname", "my-objectname", file, cbcMaterials) + n, err := s3Client.PutObject(bucketname, objectName, file, fstat.Size(), minio.PutObjectOptions{ServerSideEncryption: encryption}) if err != nil { log.Fatalln(err) } diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go index 3d3b2fd2d..4e459b5d7 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go +++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go @@ -21,11 +21,11 @@ package main import ( "bytes" - "crypto/md5" - "encoding/base64" "io/ioutil" "log" + "github.com/minio/minio-go/pkg/encrypt" + minio "github.com/minio/minio-go" ) @@ -40,38 +40,19 @@ func main() { log.Fatalln(err) } - content := bytes.NewReader([]byte("Hello again")) - key := []byte("32byteslongsecretkeymustprovided") - h := md5.New() - h.Write(key) - encryptionKey := base64.StdEncoding.EncodeToString(key) - encryptionKeyMD5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) + bucketName := "my-bucket" + objectName := "my-encrypted-object" + object := []byte("Hello again") - // Amazon S3 does not store the encryption key you provide. - // Instead S3 stores a randomly salted HMAC value of the - // encryption key in order to validate future requests. - // The salted HMAC value cannot be used to derive the value - // of the encryption key or to decrypt the contents of the - // encrypted object. That means, if you lose the encryption - // key, you lose the object. - var metadata = map[string]string{ - "x-amz-server-side-encryption-customer-algorithm": "AES256", - "x-amz-server-side-encryption-customer-key": encryptionKey, - "x-amz-server-side-encryption-customer-key-MD5": encryptionKeyMD5, - } - - // minioClient.TraceOn(os.Stderr) // Enable to debug. - _, err = minioClient.PutObject("mybucket", "my-encrypted-object.txt", content, 11, minio.PutObjectOptions{UserMetadata: metadata}) + encryption := encrypt.DefaultPBKDF([]byte("my secret password"), []byte(bucketName+objectName)) + _, err = minioClient.PutObject(bucketName, objectName, bytes.NewReader(object), int64(len(object)), minio.PutObjectOptions{ + ServerSideEncryption: encryption, + }) if err != nil { log.Fatalln(err) } - opts := minio.GetObjectOptions{} - for k, v := range metadata { - opts.Set(k, v) - } - coreClient := minio.Core{minioClient} - reader, _, err := coreClient.GetObject("mybucket", "my-encrypted-object.txt", opts) + reader, err := minioClient.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: encryption}) if err != nil { log.Fatalln(err) } @@ -81,7 +62,7 @@ func main() { if err != nil { log.Fatalln(err) } - if !bytes.Equal(decBytes, []byte("Hello again")) { - log.Fatalln("Expected \"Hello, world\", got %s", string(decBytes)) + if !bytes.Equal(decBytes, object) { + log.Fatalln("Expected %s, got %s", string(object), string(decBytes)) } } diff --git a/vendor/github.com/minio/minio-go/functional_tests.go b/vendor/github.com/minio/minio-go/functional_tests.go index c4156c293..a11fdadba 100644 --- a/vendor/github.com/minio/minio-go/functional_tests.go +++ b/vendor/github.com/minio/minio-go/functional_tests.go @@ -22,7 +22,6 @@ package main import ( "bytes" "context" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -1101,6 +1100,101 @@ func testGetObjectClosedTwice() { successLogger(testName, function, args, startTime).Info() } +// Test RemoveObjectsWithContext request context cancels after timeout +func testRemoveObjectsWithContext() { + // Initialize logging params. + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjectsWithContext(ctx, bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current tie. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + + // Generate put data. + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 20 objects. + nrObjects := 20 + objectsCh := make(chan string) + go func() { + defer close(objectsCh) + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + objectsCh <- objectName + } + }() + // Set context to cancel in 1 nanosecond. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Call RemoveObjectsWithContext API with short timeout. + errorCh := c.RemoveObjectsWithContext(ctx, bucketName, objectsCh) + // Check for error. + select { + case r := <-errorCh: + if r.Err == nil { + logError(testName, function, args, startTime, "", "RemoveObjectsWithContext should fail on short timeout", err) + return + } + } + // Set context with longer timeout. + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + // Perform RemoveObjectsWithContext with the longer timeout. Expect the removals to succeed. + errorCh = c.RemoveObjectsWithContext(ctx, bucketName, objectsCh) + select { + case r, more := <-errorCh: + if more || r.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } + } + + // Delete all objects and buckets. + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + // Test removing multiple objects with Remove API func testRemoveMultipleObjects() { // initialize logging params @@ -1912,6 +2006,14 @@ func testGetObjectReadSeekFunctional() { return } + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + }() + // Generate 33K of data. bufSize := dataFileMap["datafile-33-kB"] var reader = getDataReader("datafile-33-kB") @@ -1938,14 +2040,6 @@ func testGetObjectReadSeekFunctional() { return } - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - }() - // Read the data back r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { @@ -2127,7 +2221,7 @@ func testGetObjectReadAtFunctional() { buf3 := make([]byte, 512) buf4 := make([]byte, 512) - // Test readAt before stat is called. + // Test readAt before stat is called such that objectInfo doesn't change. m, err := r.ReadAt(buf1, offset) if err != nil { logError(testName, function, args, startTime, "", "ReadAt failed", err) @@ -2167,6 +2261,7 @@ func testGetObjectReadAtFunctional() { logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) return } + offset += 512 m, err = r.ReadAt(buf3, offset) if err != nil { @@ -2411,9 +2506,10 @@ func testPresignedPostPolicy() { } expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName + expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName if val, ok := res.Header["Location"]; ok { - if val[0] != expectedLocation { + if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { logError(testName, function, args, startTime, "", "Location in header response is incorrect", err) return } @@ -2588,6 +2684,10 @@ func testCopyObject() { return } + // Close all the get readers before proceeding with CopyObject operations. + r.Close() + readerCopy.Close() + // CopyObject again but with wrong conditions src = minio.NewSourceInfo(bucketName, objectName, nil) err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) @@ -2608,6 +2708,37 @@ func testCopyObject() { return } + // Perform the Copy which should update only metadata. + src = minio.NewSourceInfo(bucketName, objectName, nil) + dst, err = minio.NewDestinationInfo(bucketName, objectName, nil, map[string]string{ + "Copy": "should be same", + }) + args["dst"] = dst + args["src"] = src + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + + err = c.CopyObject(dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) + return + } + + stOpts := minio.StatObjectOptions{} + stOpts.SetMatchETag(objInfo.ETag) + objInfo, err = c.StatObject(bucketName, objectName, stOpts) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) + return + } + + if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" { + logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err) + return + } + // Delete all objects and buckets if err = cleanupBucket(bucketName, c); err != nil { logError(testName, function, args, startTime, "", "Cleanup failed", err) @@ -2620,17 +2751,395 @@ func testCopyObject() { successLogger(testName, function, args, startTime).Info() } +// Tests SSE-C get object ReaderSeeker interface methods. +func testEncryptedGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + }() + + // Generate 65MiB of data. + bufSize := dataFileMap["datafile-65-MB"] + var reader = getDataReader("datafile-65-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-C get object ReaderAt interface methods. +func testEncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "Minio client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 65MiB of data. + bufSize := dataFileMap["datafile-65-MB"] + var reader = getDataReader("datafile-65-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return + } + + // read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, n) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, n+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + // TestEncryptionPutGet tests client side encryption func testEncryptionPutGet() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "PutEncryptedObject(bucketName, objectName, reader, cbcMaterials, metadata, progress)" + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "cbcMaterials": "", - "metadata": "", + "bucketName": "", + "objectName": "", + "sse": "", } // Seed random based on current time. rand.Seed(time.Now().Unix()) @@ -2664,104 +3173,41 @@ func testEncryptionPutGet() { return } - // Generate a symmetric key - symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) - - // Generate an assymmetric key from predefine public and private certificates - privateKey, err := hex.DecodeString( - "30820277020100300d06092a864886f70d0101010500048202613082025d" + - "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" + - "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" + - "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" + - "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" + - "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" + - "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" + - "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" + - "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" + - "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" + - "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" + - "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" + - "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" + - "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" + - "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" + - "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" + - "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" + - "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" + - "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" + - "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" + - "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" + - "9945cb5c7d") - - if err != nil { - logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err) - return - } - - publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" + - "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" + - "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" + - "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" + - "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" + - "80a89e43f29b570203010001") - if err != nil { - logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err) - return - } - - // Generate an asymmetric key - asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey) - if err != nil { - logError(testName, function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err) - return - } - testCases := []struct { - buf []byte - encKey encrypt.Key + buf []byte }{ - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, - - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, } + const password = "correct horse battery staple" // https://xkcd.com/936/ + for i, testCase := range testCases { // Generate a random object name objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName // Secured object - cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey) - args["cbcMaterials"] = cbcMaterials - - if err != nil { - logError(testName, function, args, startTime, "", "NewCBCSecureMaterials failed", err) - return - } + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse // Put encrypted data - _, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials) + _, err = c.PutObject(bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) if err != nil { logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) return } // Read the data back - r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) if err != nil { logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) return @@ -2801,13 +3247,13 @@ func testEncryptionFPut() { // initialize logging params startTime := time.Now() testName := getFuncName() - function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, cbcMaterials)" + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "filePath": "", - "contentType": "", - "cbcMaterials": "", + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", } // Seed random based on current time. rand.Seed(time.Now().Unix()) @@ -2841,98 +3287,36 @@ func testEncryptionFPut() { return } - // Generate a symmetric key - symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) - - // Generate an assymmetric key from predefine public and private certificates - privateKey, err := hex.DecodeString( - "30820277020100300d06092a864886f70d0101010500048202613082025d" + - "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" + - "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" + - "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" + - "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" + - "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" + - "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" + - "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" + - "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" + - "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" + - "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" + - "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" + - "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" + - "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" + - "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" + - "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" + - "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" + - "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" + - "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" + - "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" + - "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" + - "9945cb5c7d") - - if err != nil { - logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err) - return - } - - publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" + - "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" + - "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" + - "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" + - "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" + - "80a89e43f29b570203010001") - if err != nil { - logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err) - return - } - - // Generate an asymmetric key - asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey) - if err != nil { - logError(testName, function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err) - return - } - // Object custom metadata customContentType := "custom/contenttype" args["metadata"] = customContentType testCases := []struct { - buf []byte - encKey encrypt.Key + buf []byte }{ - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)}, - {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, - - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)}, - {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, } + const password = "correct horse battery staple" // https://xkcd.com/936/ for i, testCase := range testCases { // Generate a random object name objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName // Secured object - cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey) - args["cbcMaterials"] = cbcMaterials + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse - if err != nil { - logError(testName, function, args, startTime, "", "NewCBCSecureMaterials failed", err) - return - } // Generate a random file name. fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") file, err := os.Create(fileName) @@ -2947,13 +3331,13 @@ func testEncryptionFPut() { } file.Close() // Put encrypted data - if _, err = c.FPutEncryptedObject(bucketName, objectName, fileName, cbcMaterials); err != nil { + if _, err = c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) return } // Read the data back - r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) if err != nil { logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) return @@ -3101,7 +3485,7 @@ func testFunctional() { startTime := time.Now() testName := getFuncName() function := "testFunctional()" - function_all := "" + functionAll := "" args := map[string]interface{}{} // Seed random based on current time. @@ -3129,7 +3513,7 @@ func testFunctional() { // Make a new bucket. function = "MakeBucket(bucketName, region)" - function_all = "MakeBucket(bucketName, region)" + functionAll = "MakeBucket(bucketName, region)" args["bucketName"] = bucketName err = c.MakeBucket(bucketName, "us-east-1") @@ -3158,7 +3542,7 @@ func testFunctional() { // Verify if bucket exits and you have access. var exists bool function = "BucketExists(bucketName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, } @@ -3175,7 +3559,7 @@ func testFunctional() { // Asserting the default bucket policy. function = "GetBucketPolicy(bucketName, objectPrefix)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectPrefix": "", @@ -3193,7 +3577,7 @@ func testFunctional() { // Set the bucket policy to 'public readonly'. function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectPrefix": "", @@ -3207,7 +3591,7 @@ func testFunctional() { } // should return policy `readonly`. function = "GetBucketPolicy(bucketName, objectPrefix)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectPrefix": "", @@ -3225,7 +3609,7 @@ func testFunctional() { // Make the bucket 'public writeonly'. function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectPrefix": "", @@ -3239,7 +3623,7 @@ func testFunctional() { } // should return policy `writeonly`. function = "GetBucketPolicy(bucketName, objectPrefix)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectPrefix": "", @@ -3256,7 +3640,7 @@ func testFunctional() { } // Make the bucket 'public read/write'. function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectPrefix": "", @@ -3270,7 +3654,7 @@ func testFunctional() { } // should return policy `readwrite`. function = "GetBucketPolicy(bucketName, objectPrefix)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectPrefix": "", @@ -3287,7 +3671,7 @@ func testFunctional() { } // List all buckets. function = "ListBuckets()" - function_all += ", " + function + functionAll += ", " + function args = nil buckets, err := c.ListBuckets() @@ -3320,7 +3704,7 @@ func testFunctional() { buf := bytes.Repeat([]byte("f"), 1<<19) function = "PutObject(bucketName, objectName, reader, contentType)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3363,7 +3747,7 @@ func testFunctional() { isRecursive := true // Recursive is true. function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3384,7 +3768,7 @@ func testFunctional() { objFound = false isRecursive = true // Recursive is true. function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3405,7 +3789,7 @@ func testFunctional() { incompObjNotFound := true function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3424,7 +3808,7 @@ func testFunctional() { } function = "GetObject(bucketName, objectName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3446,9 +3830,10 @@ func testFunctional() { logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err) return } + newReader.Close() function = "FGetObject(bucketName, objectName, fileName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3462,7 +3847,7 @@ func testFunctional() { } function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": "", @@ -3475,7 +3860,7 @@ func testFunctional() { // Generate presigned HEAD object url. function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3504,7 +3889,7 @@ func testFunctional() { resp.Body.Close() function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": "", @@ -3518,7 +3903,7 @@ func testFunctional() { // Generate presigned GET object url. function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3592,7 +3977,7 @@ func testFunctional() { } function = "PresignedPutObject(bucketName, objectName, expires)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": "", @@ -3605,7 +3990,7 @@ func testFunctional() { } function = "PresignedPutObject(bucketName, objectName, expires)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName + "-presigned", @@ -3656,7 +4041,7 @@ func testFunctional() { } function = "RemoveObject(bucketName, objectName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -3692,7 +4077,7 @@ func testFunctional() { } function = "RemoveBucket(bucketName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, } @@ -3720,7 +4105,7 @@ func testFunctional() { logError(testName, function, args, startTime, "", "File Remove failed", err) return } - successLogger(testName, function_all, args, startTime).Info() + successLogger(testName, functionAll, args, startTime).Info() } // Test for validating GetObject Reader* methods functioning when the @@ -3916,6 +4301,7 @@ func testPutObjectUploadSeekedObject() { logError(testName, function, args, startTime, "", "GetObject failed", err) return } + defer obj.Close() n, err = obj.Seek(int64(offset), 0) if err != nil { @@ -4504,6 +4890,7 @@ func testGetObjectReadSeekFunctionalV2() { logError(testName, function, args, startTime, "", "GetObject failed", err) return } + defer r.Close() st, err := r.Stat() if err != nil { @@ -4667,6 +5054,7 @@ func testGetObjectReadAtFunctionalV2() { logError(testName, function, args, startTime, "", "GetObject failed", err) return } + defer r.Close() st, err := r.Stat() if err != nil { @@ -4839,6 +5227,7 @@ func testCopyObjectV2() { logError(testName, function, args, startTime, "", "Stat failed", err) return } + r.Close() // Copy Source src := minio.NewSourceInfo(bucketName, objectName, nil) @@ -4921,6 +5310,10 @@ func testCopyObjectV2() { return } + // Close all the readers. + r.Close() + readerCopy.Close() + // CopyObject again but with wrong conditions src = minio.NewSourceInfo(bucketName, objectName, nil) err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) @@ -5147,6 +5540,106 @@ func testCompose10KSourcesV2() { testComposeMultipleSources(c) } +func testEncryptedEmptyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object")) + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 0 + var buf []byte // Empty buffer + args["objectName"] = "object" + _, err = c.PutObject(bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + // 2. Test CopyObject for an empty object + dstInfo, err := minio.NewDestinationInfo(bucketName, "new-object", sse, nil) + if err != nil { + args["objectName"] = "new-object" + function = "NewDestinationInfo(bucketName, objectName, sse, userMetadata)" + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + srcInfo := minio.NewSourceInfo(bucketName, "object", sse) + if err = c.CopyObject(dstInfo, srcInfo); err != nil { + function = "CopyObject(dstInfo, srcInfo)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) + return + } + + // 3. Test Key rotation + newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) + dstInfo, err = minio.NewDestinationInfo(bucketName, "new-object", newSSE, nil) + if err != nil { + args["objectName"] = "new-object" + function = "NewDestinationInfo(bucketName, objectName, encryptSSEC, userMetadata)" + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + + srcInfo = minio.NewSourceInfo(bucketName, "new-object", sse) + if err = c.CopyObject(dstInfo, srcInfo); err != nil { + function = "CopyObject(dstInfo, srcInfo)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) + return + } + + // 4. Download the object. + reader, err := c.GetObject(bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err) + return + } + // Delete all objects and buckets + delete(args, "objectName") + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + func testEncryptedCopyObjectWrapper(c *minio.Client) { // initialize logging params startTime := time.Now() @@ -5163,26 +5656,24 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) { return } - key1 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven1"), "AES256") - key2 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven2"), "AES256") + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) // 1. create an sse-c encrypted object to copy by uploading const srcSize = 1024 * 1024 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB - metadata := make(map[string]string) - for k, v := range key1.GetSSEHeaders() { - metadata[k] = v - } - _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: metadata, Progress: nil}) + _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ServerSideEncryption: sseSrc, + }) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) return } // 2. copy object and change encryption key - src := minio.NewSourceInfo(bucketName, "srcObject", &key1) + src := minio.NewSourceInfo(bucketName, "srcObject", sseSrc) args["source"] = src - dst, err := minio.NewDestinationInfo(bucketName, "dstObject", &key2, nil) + dst, err := minio.NewDestinationInfo(bucketName, "dstObject", sseDst, nil) if err != nil { logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) return @@ -5196,17 +5687,12 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) { } // 3. get copied object and check if content is equal - opts := minio.GetObjectOptions{} - for k, v := range key2.GetSSEHeaders() { - opts.Set(k, v) - } coreClient := minio.Core{c} - reader, _, err := coreClient.GetObject(bucketName, "dstObject", opts) + reader, _, err := coreClient.GetObject(bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: sseDst}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return } - defer reader.Close() decBytes, err := ioutil.ReadAll(reader) if err != nil { @@ -5217,6 +5703,75 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) { logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) return } + reader.Close() + + // Test key rotation for source object in-place. + newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key + dst, err = minio.NewDestinationInfo(bucketName, "srcObject", newSSE, nil) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + args["destination"] = dst + + err = c.CopyObject(dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Get copied object and check if content is equal + reader, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err = ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + + // Test in-place decryption. + dst, err = minio.NewDestinationInfo(bucketName, "srcObject", nil, nil) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + args["destination"] = dst + + src = minio.NewSourceInfo(bucketName, "srcObject", newSSE) + args["source"] = src + err = c.CopyObject(dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Get copied decrypted object and check if content is equal + reader, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err = ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + // Delete all objects and buckets if err = cleanupBucket(bucketName, c); err != nil { logError(testName, function, args, startTime, "", "Cleanup failed", err) @@ -5270,6 +5825,7 @@ func testEncryptedCopyObjectV2() { return } + // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c) } @@ -5990,7 +6546,7 @@ func testFunctionalV2() { startTime := time.Now() testName := getFuncName() function := "testFunctionalV2()" - function_all := "" + functionAll := "" args := map[string]interface{}{} // Seed random based on current time. @@ -6018,7 +6574,7 @@ func testFunctionalV2() { location := "us-east-1" // Make a new bucket. function = "MakeBucket(bucketName, location)" - function_all = "MakeBucket(bucketName, location)" + functionAll = "MakeBucket(bucketName, location)" args = map[string]interface{}{ "bucketName": bucketName, "location": location, @@ -6049,7 +6605,7 @@ func testFunctionalV2() { // Verify if bucket exits and you have access. var exists bool function = "BucketExists(bucketName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, } @@ -6065,7 +6621,7 @@ func testFunctionalV2() { // Make the bucket 'public read/write'. function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectPrefix": "", @@ -6079,7 +6635,7 @@ func testFunctionalV2() { // List all buckets. function = "ListBuckets()" - function_all += ", " + function + functionAll += ", " + function args = nil buckets, err := c.ListBuckets() if len(buckets) == 0 { @@ -6145,7 +6701,7 @@ func testFunctionalV2() { objFound := false isRecursive := true // Recursive is true. function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -6164,7 +6720,7 @@ func testFunctionalV2() { incompObjNotFound := true function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -6182,7 +6738,7 @@ func testFunctionalV2() { } function = "GetObject(bucketName, objectName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -6198,6 +6754,7 @@ func testFunctionalV2() { logError(testName, function, args, startTime, "", "ReadAll failed", err) return } + newReader.Close() if !bytes.Equal(newReadBytes, buf) { logError(testName, function, args, startTime, "", "Bytes mismatch", err) @@ -6205,7 +6762,7 @@ func testFunctionalV2() { } function = "FGetObject(bucketName, objectName, fileName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -6219,7 +6776,7 @@ func testFunctionalV2() { // Generate presigned HEAD object url. function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -6248,7 +6805,7 @@ func testFunctionalV2() { // Generate presigned GET object url. function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName, @@ -6316,7 +6873,7 @@ func testFunctionalV2() { } function = "PresignedPutObject(bucketName, objectName, expires)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName + "-presigned", @@ -6350,7 +6907,7 @@ func testFunctionalV2() { } function = "GetObject(bucketName, objectName)" - function_all += ", " + function + functionAll += ", " + function args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName + "-presigned", @@ -6366,6 +6923,7 @@ func testFunctionalV2() { logError(testName, function, args, startTime, "", "ReadAll failed", err) return } + newReader.Close() if !bytes.Equal(newReadBytes, buf) { logError(testName, function, args, startTime, "", "Bytes mismatch", err) @@ -6386,7 +6944,7 @@ func testFunctionalV2() { logError(testName, function, args, startTime, "", "File removes failed", err) return } - successLogger(testName, function_all, args, startTime).Info() + successLogger(testName, functionAll, args, startTime).Info() } // Test get object with GetObjectWithContext @@ -6454,10 +7012,12 @@ func testGetObjectWithContext() { logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err) return } + if _, err = r.Stat(); err == nil { logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err) return } + r.Close() ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) args["ctx"] = ctx @@ -6736,6 +7296,7 @@ func testGetObjectWithContextV2() { logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err) return } + r.Close() ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) defer cancel() @@ -6922,8 +7483,6 @@ func main() { testGetObjectReadAtFunctional() testPresignedPostPolicy() testCopyObject() - testEncryptionPutGet() - testEncryptionFPut() testComposeObjectErrorCases() testCompose10KSources() testUserMetadataCopying() @@ -6941,8 +7500,13 @@ func main() { // SSE-C tests will only work over TLS connection. if tls { + testEncryptionPutGet() + testEncryptionFPut() + testEncryptedGetObjectReadAtFunctional() + testEncryptedGetObjectReadSeekFunctional() testEncryptedCopyObjectV2() testEncryptedCopyObject() + testEncryptedEmptyObject() } } else { testFunctional() diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go deleted file mode 100644 index b0f2d6e08..000000000 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package encrypt - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "encoding/base64" - "errors" - "io" -) - -// Crypt mode - encryption or decryption -type cryptMode int - -const ( - encryptMode cryptMode = iota - decryptMode -) - -// CBCSecureMaterials encrypts/decrypts data using AES CBC algorithm -type CBCSecureMaterials struct { - - // Data stream to encrypt/decrypt - stream io.Reader - - // Last internal error - err error - - // End of file reached - eof bool - - // Holds initial data - srcBuf *bytes.Buffer - - // Holds transformed data (encrypted or decrypted) - dstBuf *bytes.Buffer - - // Encryption algorithm - encryptionKey Key - - // Key to encrypts/decrypts data - contentKey []byte - - // Encrypted form of contentKey - cryptedKey []byte - - // Initialization vector - iv []byte - - // matDesc - currently unused - matDesc []byte - - // Indicate if we are going to encrypt or decrypt - cryptMode cryptMode - - // Helper that encrypts/decrypts data - blockMode cipher.BlockMode -} - -// NewCBCSecureMaterials builds new CBC crypter module with -// the specified encryption key (symmetric or asymmetric) -func NewCBCSecureMaterials(key Key) (*CBCSecureMaterials, error) { - if key == nil { - return nil, errors.New("Unable to recognize empty encryption properties") - } - return &CBCSecureMaterials{ - srcBuf: bytes.NewBuffer([]byte{}), - dstBuf: bytes.NewBuffer([]byte{}), - encryptionKey: key, - matDesc: []byte("{}"), - }, nil - -} - -// Close implements closes the internal stream. -func (s *CBCSecureMaterials) Close() error { - closer, ok := s.stream.(io.Closer) - if ok { - return closer.Close() - } - return nil -} - -// SetupEncryptMode - tells CBC that we are going to encrypt data -func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error { - // Set mode to encrypt - s.cryptMode = encryptMode - - // Set underlying reader - s.stream = stream - - s.eof = false - s.srcBuf.Reset() - s.dstBuf.Reset() - - var err error - - // Generate random content key - s.contentKey = make([]byte, aes.BlockSize*2) - if _, err := rand.Read(s.contentKey); err != nil { - return err - } - // Encrypt content key - s.cryptedKey, err = s.encryptionKey.Encrypt(s.contentKey) - if err != nil { - return err - } - // Generate random IV - s.iv = make([]byte, aes.BlockSize) - if _, err = rand.Read(s.iv); err != nil { - return err - } - // New cipher - encryptContentBlock, err := aes.NewCipher(s.contentKey) - if err != nil { - return err - } - - s.blockMode = cipher.NewCBCEncrypter(encryptContentBlock, s.iv) - - return nil -} - -// SetupDecryptMode - tells CBC that we are going to decrypt data -func (s *CBCSecureMaterials) SetupDecryptMode(stream io.Reader, iv string, key string) error { - // Set mode to decrypt - s.cryptMode = decryptMode - - // Set underlying reader - s.stream = stream - - // Reset - s.eof = false - s.srcBuf.Reset() - s.dstBuf.Reset() - - var err error - - // Get IV - s.iv, err = base64.StdEncoding.DecodeString(iv) - if err != nil { - return err - } - - // Get encrypted content key - s.cryptedKey, err = base64.StdEncoding.DecodeString(key) - if err != nil { - return err - } - - // Decrypt content key - s.contentKey, err = s.encryptionKey.Decrypt(s.cryptedKey) - if err != nil { - return err - } - - // New cipher - decryptContentBlock, err := aes.NewCipher(s.contentKey) - if err != nil { - return err - } - - s.blockMode = cipher.NewCBCDecrypter(decryptContentBlock, s.iv) - return nil -} - -// GetIV - return randomly generated IV (per S3 object), base64 encoded. -func (s *CBCSecureMaterials) GetIV() string { - return base64.StdEncoding.EncodeToString(s.iv) -} - -// GetKey - return content encrypting key (cek) in encrypted form, base64 encoded. -func (s *CBCSecureMaterials) GetKey() string { - return base64.StdEncoding.EncodeToString(s.cryptedKey) -} - -// GetDesc - user provided encryption material description in JSON (UTF8) format. -func (s *CBCSecureMaterials) GetDesc() string { - return string(s.matDesc) -} - -// Fill buf with encrypted/decrypted data -func (s *CBCSecureMaterials) Read(buf []byte) (n int, err error) { - // Always fill buf from bufChunk at the end of this function - defer func() { - if s.err != nil { - n, err = 0, s.err - } else { - n, err = s.dstBuf.Read(buf) - } - }() - - // Return - if s.eof { - return - } - - // Fill dest buffer if its length is less than buf - for !s.eof && s.dstBuf.Len() < len(buf) { - - srcPart := make([]byte, aes.BlockSize) - dstPart := make([]byte, aes.BlockSize) - - // Fill src buffer - for s.srcBuf.Len() < aes.BlockSize*2 { - _, err = io.CopyN(s.srcBuf, s.stream, aes.BlockSize) - if err != nil { - break - } - } - - // Quit immediately for errors other than io.EOF - if err != nil && err != io.EOF { - s.err = err - return - } - - // Mark current encrypting/decrypting as finished - s.eof = (err == io.EOF) - - if s.eof && s.cryptMode == encryptMode { - if srcPart, err = pkcs5Pad(s.srcBuf.Bytes(), aes.BlockSize); err != nil { - s.err = err - return - } - } else { - _, _ = s.srcBuf.Read(srcPart) - } - - // Crypt srcPart content - for len(srcPart) > 0 { - - // Crypt current part - s.blockMode.CryptBlocks(dstPart, srcPart[:aes.BlockSize]) - - // Unpad when this is the last part and we are decrypting - if s.eof && s.cryptMode == decryptMode { - dstPart, err = pkcs5Unpad(dstPart, aes.BlockSize) - if err != nil { - s.err = err - return - } - } - - // Send crypted data to dstBuf - if _, wErr := s.dstBuf.Write(dstPart); wErr != nil { - s.err = wErr - return - } - // Move to the next part - srcPart = srcPart[aes.BlockSize:] - } - } - return -} - -// Unpad a set of bytes following PKCS5 algorithm -func pkcs5Unpad(buf []byte, blockSize int) ([]byte, error) { - len := len(buf) - if len == 0 { - return nil, errors.New("buffer is empty") - } - pad := int(buf[len-1]) - if pad > len || pad > blockSize { - return nil, errors.New("invalid padding size") - } - return buf[:len-pad], nil -} - -// Pad a set of bytes following PKCS5 algorithm -func pkcs5Pad(buf []byte, blockSize int) ([]byte, error) { - len := len(buf) - pad := blockSize - (len % blockSize) - padText := bytes.Repeat([]byte{byte(pad)}, pad) - return append(buf, padText...), nil -} diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go deleted file mode 100644 index 482922ab7..000000000 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package encrypt implements a generic interface to encrypt any stream of data. -// currently this package implements two types of encryption -// - Symmetric encryption using AES. -// - Asymmetric encrytion using RSA. -package encrypt - -import "io" - -// Materials - provides generic interface to encrypt any stream of data. -type Materials interface { - - // Closes the wrapped stream properly, initiated by the caller. - Close() error - - // Returns encrypted/decrypted data, io.Reader compatible. - Read(b []byte) (int, error) - - // Get randomly generated IV, base64 encoded. - GetIV() (iv string) - - // Get content encrypting key (cek) in encrypted form, base64 encoded. - GetKey() (key string) - - // Get user provided encryption material description in - // JSON (UTF8) format. This is not used, kept for future. - GetDesc() (desc string) - - // Setup encrypt mode, further calls of Read() function - // will return the encrypted form of data streamed - // by the passed reader - SetupEncryptMode(stream io.Reader) error - - // Setup decrypted mode, further calls of Read() function - // will return the decrypted form of data streamed - // by the passed reader - SetupDecryptMode(stream io.Reader, iv string, key string) error -} diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go b/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go deleted file mode 100644 index 0ed95f5ff..000000000 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package encrypt - -import ( - "crypto/aes" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "errors" -) - -// Key - generic interface to encrypt/decrypt a key. -// We use it to encrypt/decrypt content key which is the key -// that encrypt/decrypt object data. -type Key interface { - // Encrypt data using to the set encryption key - Encrypt([]byte) ([]byte, error) - // Decrypt data using to the set encryption key - Decrypt([]byte) ([]byte, error) -} - -// SymmetricKey - encrypts data with a symmetric master key -type SymmetricKey struct { - masterKey []byte -} - -// Encrypt passed bytes -func (s *SymmetricKey) Encrypt(plain []byte) ([]byte, error) { - // Initialize an AES encryptor using a master key - keyBlock, err := aes.NewCipher(s.masterKey) - if err != nil { - return []byte{}, err - } - - // Pad the key before encryption - plain, _ = pkcs5Pad(plain, aes.BlockSize) - - encKey := []byte{} - encPart := make([]byte, aes.BlockSize) - - // Encrypt the passed key by block - for { - if len(plain) < aes.BlockSize { - break - } - // Encrypt the passed key - keyBlock.Encrypt(encPart, plain[:aes.BlockSize]) - // Add the encrypted block to the total encrypted key - encKey = append(encKey, encPart...) - // Pass to the next plain block - plain = plain[aes.BlockSize:] - } - return encKey, nil -} - -// Decrypt passed bytes -func (s *SymmetricKey) Decrypt(cipher []byte) ([]byte, error) { - // Initialize AES decrypter - keyBlock, err := aes.NewCipher(s.masterKey) - if err != nil { - return nil, err - } - - var plain []byte - plainPart := make([]byte, aes.BlockSize) - - // Decrypt the encrypted data block by block - for { - if len(cipher) < aes.BlockSize { - break - } - keyBlock.Decrypt(plainPart, cipher[:aes.BlockSize]) - // Add the decrypted block to the total result - plain = append(plain, plainPart...) - // Pass to the next cipher block - cipher = cipher[aes.BlockSize:] - } - - // Unpad the resulted plain data - plain, err = pkcs5Unpad(plain, aes.BlockSize) - if err != nil { - return nil, err - } - - return plain, nil -} - -// NewSymmetricKey generates a new encrypt/decrypt crypto using -// an AES master key password -func NewSymmetricKey(b []byte) *SymmetricKey { - return &SymmetricKey{masterKey: b} -} - -// AsymmetricKey - struct which encrypts/decrypts data -// using RSA public/private certificates -type AsymmetricKey struct { - publicKey *rsa.PublicKey - privateKey *rsa.PrivateKey -} - -// Encrypt data using public key -func (a *AsymmetricKey) Encrypt(plain []byte) ([]byte, error) { - cipher, err := rsa.EncryptPKCS1v15(rand.Reader, a.publicKey, plain) - if err != nil { - return nil, err - } - return cipher, nil -} - -// Decrypt data using public key -func (a *AsymmetricKey) Decrypt(cipher []byte) ([]byte, error) { - cipher, err := rsa.DecryptPKCS1v15(rand.Reader, a.privateKey, cipher) - if err != nil { - return nil, err - } - return cipher, nil -} - -// NewAsymmetricKey - generates a crypto module able to encrypt/decrypt -// data using a pair for private and public key -func NewAsymmetricKey(privData []byte, pubData []byte) (*AsymmetricKey, error) { - // Parse private key from passed data - priv, err := x509.ParsePKCS8PrivateKey(privData) - if err != nil { - return nil, err - } - privKey, ok := priv.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("not a valid private key") - } - - // Parse public key from passed data - pub, err := x509.ParsePKIXPublicKey(pubData) - if err != nil { - return nil, err - } - - pubKey, ok := pub.(*rsa.PublicKey) - if !ok { - return nil, errors.New("not a valid public key") - } - - // Associate the private key with the passed public key - privKey.PublicKey = *pubKey - - return &AsymmetricKey{ - publicKey: pubKey, - privateKey: privKey, - }, nil -} diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go new file mode 100644 index 000000000..92f5bf5ea --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go @@ -0,0 +1,146 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +import ( + "crypto/md5" + "encoding/base64" + "errors" + "net/http" + + "golang.org/x/crypto/argon2" +) + +const ( + // sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. + sseGenericHeader = "X-Amz-Server-Side-Encryption" + + // sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. + sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm" + // sseCustomerKey is the AWS SSE-C encryption key HTTP header key. + sseCustomerKey = sseGenericHeader + "-Customer-Key" + // sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. + sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5" + + // sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. + sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + // sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. + sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + // sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. + sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" +) + +// PBKDF creates a SSE-C key from the provided password and salt. +// PBKDF is a password-based key derivation function +// which can be used to derive a high-entropy cryptographic +// key from a low-entropy password and a salt. +type PBKDF func(password, salt []byte) ServerSide + +// DefaultPBKDF is the default PBKDF. It uses Argon2id with the +// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). +var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { + sse := ssec{} + copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) + return sse +} + +// Type is the server-side-encryption method. It represents one of +// the following encryption methods: +// - SSE-C: server-side-encryption with customer provided keys +// - KMS: server-side-encryption with managed keys +// - S3: server-side-encryption using S3 storage encryption +type Type string + +const ( + // SSEC represents server-side-encryption with customer provided keys + SSEC Type = "SSE-C" + // KMS represents server-side-encryption with managed keys + KMS Type = "KMS" + // S3 represents server-side-encryption using S3 storage encryption + S3 Type = "S3" +) + +// ServerSide is a form of S3 server-side-encryption. +type ServerSide interface { + // Type returns the server-side-encryption method. + Type() Type + + // Marshal adds encryption headers to the provided HTTP headers. + // It marks an HTTP request as server-side-encryption request + // and inserts the required data into the headers. + Marshal(h http.Header) +} + +// NewSSE returns a server-side-encryption using S3 storage encryption. +// Using SSE-S3 the server will encrypt the object with server-managed keys. +func NewSSE() ServerSide { return s3{} } + +// NewSSEC returns a new server-side-encryption using SSE-C and the provided key. +// The key must be 32 bytes long. +func NewSSEC(key []byte) (ServerSide, error) { + if len(key) != 32 { + return nil, errors.New("encrypt: SSE-C key must be 256 bit long") + } + sse := ssec{} + copy(sse[:], key) + return sse, nil +} + +// SSECopy transforms a SSE-C encryption into a SSE-C copy +// encryption. This is required for SSE-C key rotation or a SSE-C +// copy where the source and the destination should be encrypted. +// +// If the provided sse is no SSE-C encryption SSECopy returns +// sse unmodified. +func SSECopy(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssec); ok { + return ssecCopy(sse) + } + return sse +} + +type ssec [32]byte + +func (s ssec) Type() Type { return SSEC } + +func (s ssec) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(sseCustomerAlgorithm, "AES256") + h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type ssecCopy [32]byte + +func (s ssecCopy) Type() Type { return SSEC } + +func (s ssecCopy) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(sseCopyCustomerAlgorithm, "AES256") + h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type s3 struct{} + +func (s s3) Type() Type { return S3 } + +func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") } diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go index 9dda99efc..9d5f5b3fa 100644 --- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go +++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go @@ -18,6 +18,8 @@ package policy import ( + "encoding/json" + "errors" "reflect" "strings" @@ -82,6 +84,33 @@ type User struct { CanonicalUser set.StringSet `json:"CanonicalUser,omitempty"` } +// UnmarshalJSON is a custom json unmarshaler for Principal field, +// the reason is that Principal can take a json struct represented by +// User string but it can also take a string. +func (u *User) UnmarshalJSON(data []byte) error { + // Try to unmarshal data in a struct equal to User, we need it + // to avoid infinite recursive call of this function + type AliasUser User + var au AliasUser + err := json.Unmarshal(data, &au) + if err == nil { + *u = User(au) + return nil + } + // Data type is not known, check if it is a json string + // which contains a star, which is permitted in the spec + var str string + err = json.Unmarshal(data, &str) + if err == nil { + if str != "*" { + return errors.New("unrecognized Principal field") + } + *u = User{AWS: set.CreateStringSet("*")} + return nil + } + return err +} + // Statement - minio policy statement type Statement struct { Actions set.StringSet `json:"Action"` @@ -564,14 +593,14 @@ func GetPolicy(statements []Statement, bucketName string, prefix string) BucketP return policy } -// GetPolicies - returns a map of policies rules of given bucket name, prefix in given statements. -func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy { +// GetPolicies - returns a map of policies of given bucket name, prefix in given statements. +func GetPolicies(statements []Statement, bucketName, prefix string) map[string]BucketPolicy { policyRules := map[string]BucketPolicy{} objResources := set.NewStringSet() // Search all resources related to objects policy for _, s := range statements { for r := range s.Resources { - if strings.HasPrefix(r, awsResourcePrefix+bucketName+"/") { + if strings.HasPrefix(r, awsResourcePrefix+bucketName+"/"+prefix) { objResources.Add(r) } } diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go index 1e5196f7c..b6b455106 100644 --- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go +++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go @@ -26,6 +26,205 @@ import ( "github.com/minio/minio-go/pkg/set" ) +// TestUnmarshalBucketPolicy tests unmarsheling various examples +// of bucket policies, to verify the correctness of BucketAccessPolicy +// struct defined in this package. +func TestUnmarshalBucketPolicy(t *testing.T) { + var testCases = []struct { + policyData string + shouldSucceed bool + }{ + // Test 1 + {policyData: `{ + "Version":"2012-10-17", + "Statement":[ + { + "Sid":"AddCannedAcl", + "Effect":"Allow", + "Principal": {"AWS": ["arn:aws:iam::111122223333:root","arn:aws:iam::444455556666:root"]}, + "Action":["s3:PutObject","s3:PutObjectAcl"], + "Resource":["arn:aws:s3:::examplebucket/*"], + "Condition":{"StringEquals":{"s3:x-amz-acl":["public-read"]}} + } + ] +}`, shouldSucceed: true}, + // Test 2 + {policyData: `{ + "Version":"2012-10-17", + "Statement":[ + { + "Sid":"AddPerm", + "Effect":"Allow", + "Principal": "*", + "Action":["s3:GetObject"], + "Resource":["arn:aws:s3:::examplebucket/*"] + } + ] +}`, shouldSucceed: true}, + // Test 3 + {policyData: `{ + "Version": "2012-10-17", + "Id": "S3PolicyId1", + "Statement": [ + { + "Sid": "IPAllow", + "Effect": "Allow", + "Principal": "*", + "Action": "s3:*", + "Resource": "arn:aws:s3:::examplebucket/*", + "Condition": { + "IpAddress": {"aws:SourceIp": "54.240.143.0/24"}, + "NotIpAddress": {"aws:SourceIp": "54.240.143.188/32"} + } + } + ] +}`, shouldSucceed: true}, + // Test 4 + {policyData: `{ + "Id":"PolicyId2", + "Version":"2012-10-17", + "Statement":[ + { + "Sid":"AllowIPmix", + "Effect":"Allow", + "Principal":"*", + "Action":"s3:*", + "Resource":"arn:aws:s3:::examplebucket/*", + "Condition": { + "IpAddress": { + "aws:SourceIp": [ + "54.240.143.0/24", + "2001:DB8:1234:5678::/64" + ] + }, + "NotIpAddress": { + "aws:SourceIp": [ + "54.240.143.128/30", + "2001:DB8:1234:5678:ABCD::/80" + ] + } + } + } + ] +}`, shouldSucceed: true}, + // Test 5 + {policyData: `{ + "Version":"2012-10-17", + "Id":"http referer policy example", + "Statement":[ + { + "Sid":"Allow get requests originating from www.example.com and example.com.", + "Effect":"Allow", + "Principal":"*", + "Action":"s3:GetObject", + "Resource":"arn:aws:s3:::examplebucket/*", + "Condition":{ + "StringLike":{"aws:Referer":["http://www.example.com/*","http://example.com/*"]} + } + } + ] +}`, shouldSucceed: true}, + // Test 6 + {policyData: `{ + "Version": "2012-10-17", + "Id": "http referer policy example", + "Statement": [ + { + "Sid": "Allow get requests referred by www.example.com and example.com.", + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::examplebucket/*", + "Condition": { + "StringLike": {"aws:Referer": ["http://www.example.com/*","http://example.com/*"]} + } + }, + { + "Sid": "Explicit deny to ensure requests are allowed only from specific referer.", + "Effect": "Deny", + "Principal": "*", + "Action": "s3:*", + "Resource": "arn:aws:s3:::examplebucket/*", + "Condition": { + "StringNotLike": {"aws:Referer": ["http://www.example.com/*","http://example.com/*"]} + } + } + ] +}`, shouldSucceed: true}, + + // Test 7 + {policyData: `{ + "Version":"2012-10-17", + "Id":"PolicyForCloudFrontPrivateContent", + "Statement":[ + { + "Sid":" Grant a CloudFront Origin Identity access to support private content", + "Effect":"Allow", + "Principal":{"CanonicalUser":"79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be"}, + "Action":"s3:GetObject", + "Resource":"arn:aws:s3:::example-bucket/*" + } + ] +}`, shouldSucceed: true}, + // Test 8 + {policyData: `{ + "Version":"2012-10-17", + "Statement":[ + { + "Sid":"111", + "Effect":"Allow", + "Principal":{"AWS":"1111111111"}, + "Action":"s3:PutObject", + "Resource":"arn:aws:s3:::examplebucket/*" + }, + { + "Sid":"112", + "Effect":"Deny", + "Principal":{"AWS":"1111111111" }, + "Action":"s3:PutObject", + "Resource":"arn:aws:s3:::examplebucket/*", + "Condition": { + "StringNotEquals": {"s3:x-amz-grant-full-control":["emailAddress=xyz@amazon.com"]} + } + } + ] +}`, shouldSucceed: true}, + // Test 9 + {policyData: `{ + "Version":"2012-10-17", + "Statement":[ + { + "Sid":"InventoryAndAnalyticsExamplePolicy", + "Effect":"Allow", + "Principal": {"Service": "s3.amazonaws.com"}, + "Action":["s3:PutObject"], + "Resource":["arn:aws:s3:::destination-bucket/*"], + "Condition": { + "ArnLike": { + "aws:SourceArn": "arn:aws:s3:::source-bucket" + }, + "StringEquals": { + "aws:SourceAccount": "1234567890", + "s3:x-amz-acl": "bucket-owner-full-control" + } + } + } + ] +}`, shouldSucceed: true}, + } + + for i, testCase := range testCases { + var policy BucketAccessPolicy + err := json.Unmarshal([]byte(testCase.policyData), &policy) + if testCase.shouldSucceed && err != nil { + t.Fatalf("Test %d: expected to succeed but it has an error: %v", i+1, err) + } + if !testCase.shouldSucceed && err == nil { + t.Fatalf("Test %d: expected to fail but succeeded", i+1) + } + } +} + // isValidStatement() is called and the result is validated. func TestIsValidStatement(t *testing.T) { testCases := []struct { @@ -1469,7 +1668,7 @@ func TestListBucketPolicies(t *testing.T) { } for _, testCase := range testCases { - policyRules := GetPolicies(testCase.statements, testCase.bucketName) + policyRules := GetPolicies(testCase.statements, testCase.bucketName, "") if !reflect.DeepEqual(testCase.expectedResult, policyRules) { t.Fatalf("%+v:\n expected: %+v, got: %+v", testCase, testCase.expectedResult, policyRules) } diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go index 0b90c41f6..b4070938e 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go @@ -25,7 +25,6 @@ import ( "fmt" "net/http" "net/url" - "path/filepath" "sort" "strconv" "strings" @@ -40,21 +39,17 @@ const ( ) // Encode input URL path to URL encoded path. -func encodeURL2Path(req *http.Request) (path string) { - reqHost := getHostAddr(req) - // Encode URL path. - if isS3, _ := filepath.Match("*.s3*.amazonaws.com", reqHost); isS3 { - bucketName := reqHost[:strings.LastIndex(reqHost, ".s3")] - path = "/" + bucketName - path += req.URL.Path - path = s3utils.EncodePath(path) - return - } - if strings.HasSuffix(reqHost, ".storage.googleapis.com") { - path = "/" + strings.TrimSuffix(reqHost, ".storage.googleapis.com") - path += req.URL.Path - path = s3utils.EncodePath(path) - return +func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { + if virtualHost { + reqHost := getHostAddr(req) + dotPos := strings.Index(reqHost, ".") + if dotPos > -1 { + bucketName := reqHost[:dotPos] + path = "/" + bucketName + path += req.URL.Path + path = s3utils.EncodePath(path) + return + } } path = s3utils.EncodePath(req.URL.Path) return @@ -62,7 +57,7 @@ func encodeURL2Path(req *http.Request) (path string) { // PreSignV2 - presign the request in following style. // https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. -func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request { +func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { // Presign is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -78,7 +73,7 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in } // Get presigned string to sign. - stringToSign := preStringToSignV2(req) + stringToSign := preStringToSignV2(req, virtualHost) hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm.Write([]byte(stringToSign)) @@ -132,7 +127,7 @@ func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { // CanonicalizedProtocolHeaders = // SignV2 sign the request before Do() (AWS Signature Version 2). -func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request { +func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { // Signature calculation is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -147,7 +142,7 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request } // Calculate HMAC for secretAccessKey. - stringToSign := stringToSignV2(req) + stringToSign := stringToSignV2(req, virtualHost) hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm.Write([]byte(stringToSign)) @@ -172,14 +167,14 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request // Expires + "\n" + // CanonicalizedProtocolHeaders + // CanonicalizedResource; -func preStringToSignV2(req http.Request) string { +func preStringToSignV2(req http.Request, virtualHost bool) string { buf := new(bytes.Buffer) // Write standard headers. writePreSignV2Headers(buf, req) // Write canonicalized protocol headers if any. writeCanonicalizedHeaders(buf, req) // Write canonicalized Query resources if any. - writeCanonicalizedResource(buf, req) + writeCanonicalizedResource(buf, req, virtualHost) return buf.String() } @@ -199,14 +194,14 @@ func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { // Date + "\n" + // CanonicalizedProtocolHeaders + // CanonicalizedResource; -func stringToSignV2(req http.Request) string { +func stringToSignV2(req http.Request, virtualHost bool) string { buf := new(bytes.Buffer) // Write standard headers. writeSignV2Headers(buf, req) // Write canonicalized protocol headers if any. writeCanonicalizedHeaders(buf, req) // Write canonicalized Query resources if any. - writeCanonicalizedResource(buf, req) + writeCanonicalizedResource(buf, req, virtualHost) return buf.String() } @@ -288,11 +283,11 @@ var resourceList = []string{ // CanonicalizedResource = [ "/" + Bucket ] + // + // [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) { +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { // Save request URL. requestURL := req.URL // Get encoded URL path. - buf.WriteString(encodeURL2Path(&req)) + buf.WriteString(encodeURL2Path(&req, virtualHost)) if requestURL.RawQuery != "" { var n int vals, _ := url.ParseQuery(requestURL.RawQuery) diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go index d53483e4e..75115d19c 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go @@ -24,7 +24,7 @@ import ( ) // Tests signature calculation. -func TestSignatureCalculation(t *testing.T) { +func TestSignatureCalculationV4(t *testing.T) { req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil) if err != nil { t.Fatal("Error:", err) @@ -39,16 +39,6 @@ func TestSignatureCalculation(t *testing.T) { t.Fatal("Error: anonymous credentials should not have Signature query resource.") } - req = SignV2(*req, "", "") - if req.Header.Get("Authorization") != "" { - t.Fatal("Error: anonymous credentials should not have Authorization header.") - } - - req = PreSignV2(*req, "", "", 0) - if strings.Contains(req.URL.RawQuery, "Signature") { - t.Fatal("Error: anonymous credentials should not have Signature query resource.") - } - req = SignV4(*req, "ACCESS-KEY", "SECRET-KEY", "", "us-east-1") if req.Header.Get("Authorization") == "" { t.Fatal("Error: normal credentials should have Authorization header.") @@ -58,14 +48,42 @@ func TestSignatureCalculation(t *testing.T) { if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") { t.Fatal("Error: normal credentials should have Signature query resource.") } +} - req = SignV2(*req, "ACCESS-KEY", "SECRET-KEY") - if req.Header.Get("Authorization") == "" { - t.Fatal("Error: normal credentials should have Authorization header.") +func TestSignatureCalculationV2(t *testing.T) { + + var testCases = []struct { + endpointURL string + virtualHost bool + }{ + {endpointURL: "https://s3.amazonaws.com/", virtualHost: false}, + {endpointURL: "https://testbucket.s3.amazonaws.com/", virtualHost: true}, } - req = PreSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0) - if !strings.Contains(req.URL.RawQuery, "Signature") { - t.Fatal("Error: normal credentials should not have Signature query resource.") + for i, testCase := range testCases { + req, err := http.NewRequest("GET", testCase.endpointURL, nil) + if err != nil { + t.Fatalf("Test %d, Error: %v", i+1, err) + } + + req = SignV2(*req, "", "", testCase.virtualHost) + if req.Header.Get("Authorization") != "" { + t.Fatalf("Test %d, Error: anonymous credentials should not have Authorization header.", i+1) + } + + req = PreSignV2(*req, "", "", 0, testCase.virtualHost) + if strings.Contains(req.URL.RawQuery, "Signature") { + t.Fatalf("Test %d, Error: anonymous credentials should not have Signature query resource.", i+1) + } + + req = SignV2(*req, "ACCESS-KEY", "SECRET-KEY", testCase.virtualHost) + if req.Header.Get("Authorization") == "" { + t.Fatalf("Test %d, Error: normal credentials should have Authorization header.", i+1) + } + + req = PreSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0, testCase.virtualHost) + if !strings.Contains(req.URL.RawQuery, "Signature") { + t.Fatalf("Test %d, Error: normal credentials should not have Signature query resource.", i+1) + } } } diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go index 407eddab3..e7fc7b38c 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go @@ -27,48 +27,59 @@ import ( // Tests url encoding. func TestEncodeURL2Path(t *testing.T) { type urlStrings struct { + virtualHost bool bucketName string objName string encodedObjName string } - bucketName := "bucketName" want := []urlStrings{ { + virtualHost: true, bucketName: "bucketName", objName: "本語", encodedObjName: "%E6%9C%AC%E8%AA%9E", }, { + virtualHost: true, bucketName: "bucketName", objName: "本語.1", encodedObjName: "%E6%9C%AC%E8%AA%9E.1", }, { + virtualHost: true, objName: ">123>3123123", bucketName: "bucketName", encodedObjName: "%3E123%3E3123123", }, { + virtualHost: true, bucketName: "bucketName", objName: "test 1 2.txt", encodedObjName: "test%201%202.txt", }, { + virtualHost: false, bucketName: "test.bucketName", objName: "test++ 1.txt", encodedObjName: "test%2B%2B%201.txt", }, } - for _, o := range want { - u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName)) - if err != nil { - t.Fatal("Error:", err) + for i, o := range want { + var hostURL string + if o.virtualHost { + hostURL = fmt.Sprintf("https://%s.s3.amazonaws.com/%s", o.bucketName, o.objName) + } else { + hostURL = fmt.Sprintf("https://s3.amazonaws.com/%s/%s", o.bucketName, o.objName) } - urlPath := "/" + bucketName + "/" + o.encodedObjName - if urlPath != encodeURL2Path(&http.Request{URL: u}) { - t.Fatal("Error") + u, err := url.Parse(hostURL) + if err != nil { + t.Fatalf("Test %d, Error: %v", i+1, err) + } + expectedPath := "/" + o.bucketName + "/" + o.encodedObjName + if foundPath := encodeURL2Path(&http.Request{URL: u}, o.virtualHost); foundPath != expectedPath { + t.Fatalf("Test %d, Error: expected = `%v`, found = `%v`", i+1, expectedPath, foundPath) } } diff --git a/vendor/github.com/minio/minio-go/transport.go b/vendor/github.com/minio/minio-go/transport.go index e2dafe172..88700cfe7 100644 --- a/vendor/github.com/minio/minio-go/transport.go +++ b/vendor/github.com/minio/minio-go/transport.go @@ -2,7 +2,7 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. + * Copyright 2017-2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,9 +25,10 @@ import ( "time" ) -// This default transport is similar to http.DefaultTransport -// but with additional DisableCompression: -var defaultMinioTransport http.RoundTripper = &http.Transport{ +// DefaultTransport - this default transport is similar to +// http.DefaultTransport but with additional param DisableCompression +// is set to true to avoid decompressing content with 'gzip' encoding. +var DefaultTransport http.RoundTripper = &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ Timeout: 30 * time.Second, @@ -35,6 +36,7 @@ var defaultMinioTransport http.RoundTripper = &http.Transport{ DualStack: true, }).DialContext, MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go index 0f92546d3..2f02ac89f 100644 --- a/vendor/github.com/minio/minio-go/utils.go +++ b/vendor/github.com/minio/minio-go/utils.go @@ -221,16 +221,10 @@ var supportedHeaders = []string{ "cache-control", "content-encoding", "content-disposition", + "content-language", // Add more supported headers here. } -// cseHeaders is list of client side encryption headers -var cseHeaders = []string{ - "X-Amz-Iv", - "X-Amz-Key", - "X-Amz-Matdesc", -} - // isStorageClassHeader returns true if the header is a supported storage class header func isStorageClassHeader(headerKey string) bool { return strings.ToLower(amzStorageClass) == strings.ToLower(headerKey) @@ -247,19 +241,6 @@ func isStandardHeader(headerKey string) bool { return false } -// isCSEHeader returns true if header is a client side encryption header. -func isCSEHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - for _, h := range cseHeaders { - header := strings.ToLower(h) - if (header == key) || - (("x-amz-meta-" + header) == key) { - return true - } - } - return false -} - // sseHeaders is list of server side encryption headers var sseHeaders = []string{ "x-amz-server-side-encryption", diff --git a/vendor/github.com/minio/minio-go/utils_test.go b/vendor/github.com/minio/minio-go/utils_test.go index 5411cc91a..2e60f77a2 100644 --- a/vendor/github.com/minio/minio-go/utils_test.go +++ b/vendor/github.com/minio/minio-go/utils_test.go @@ -312,6 +312,7 @@ func TestIsStandardHeader(t *testing.T) { {"content-type", true}, {"cache-control", true}, {"content-disposition", true}, + {"content-language", true}, {"random-header", false}, } @@ -349,32 +350,6 @@ func TestIsSSEHeader(t *testing.T) { } } -// Tests if header is client encryption header -func TestIsCSEHeader(t *testing.T) { - testCases := []struct { - // Input. - header string - // Expected result. - expectedValue bool - }{ - {"x-amz-iv", true}, - {"x-amz-key", true}, - {"x-amz-matdesc", true}, - {"x-amz-meta-x-amz-iv", true}, - {"x-amz-meta-x-amz-key", true}, - {"x-amz-meta-x-amz-matdesc", true}, - {"random-header", false}, - } - - for i, testCase := range testCases { - actual := isCSEHeader(testCase.header) - if actual != testCase.expectedValue { - t.Errorf("Test %d: Expected to pass, but failed", i+1) - } - } - -} - // Tests if header is x-amz-meta or x-amz-acl func TestIsAmzHeader(t *testing.T) { testCases := []struct {