mirror of
https://github.com/octoleo/restic.git
synced 2024-11-17 02:25:12 +00:00
Merge pull request #946 from restic/update-minio-go
Update github.com/minio/minio-go
This commit is contained in:
commit
ba91a76f5f
4
vendor/manifest
vendored
4
vendor/manifest
vendored
@ -28,8 +28,8 @@
|
|||||||
{
|
{
|
||||||
"importpath": "github.com/minio/minio-go",
|
"importpath": "github.com/minio/minio-go",
|
||||||
"repository": "https://github.com/minio/minio-go",
|
"repository": "https://github.com/minio/minio-go",
|
||||||
"revision": "dcaae9ec4d0b0a81d17f22f6d7a186491f6a55ec",
|
"revision": "2f03abaa07d8bc57faef16cda7655ea62a7e0bed",
|
||||||
"branch": "HEAD"
|
"branch": "master"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"importpath": "github.com/pkg/errors",
|
"importpath": "github.com/pkg/errors",
|
||||||
|
41
vendor/src/github.com/minio/minio-go/README.md
vendored
41
vendor/src/github.com/minio/minio-go/README.md
vendored
@ -1,4 +1,4 @@
|
|||||||
# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io)
|
# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge)
|
||||||
|
|
||||||
The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
|
The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
|
||||||
|
|
||||||
@ -8,7 +8,6 @@ The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible
|
|||||||
- Amazon S3
|
- Amazon S3
|
||||||
- Minio
|
- Minio
|
||||||
|
|
||||||
|
|
||||||
- AWS Signature Version 2
|
- AWS Signature Version 2
|
||||||
- Google Cloud Storage (Compatibility Mode)
|
- Google Cloud Storage (Compatibility Mode)
|
||||||
- Openstack Swift + Swift3 middleware
|
- Openstack Swift + Swift3 middleware
|
||||||
@ -19,19 +18,14 @@ This quickstart guide will show you how to install the Minio client SDK, connect
|
|||||||
|
|
||||||
This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
|
This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
|
||||||
|
|
||||||
|
|
||||||
## Download from Github
|
## Download from Github
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
|
||||||
go get -u github.com/minio/minio-go
|
go get -u github.com/minio/minio-go
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Initialize Minio Client
|
## Initialize Minio Client
|
||||||
|
|
||||||
Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
|
Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
|
||||||
|
|
||||||
|
|
||||||
| Parameter | Description|
|
| Parameter | Description|
|
||||||
| :--- | :--- |
|
| :--- | :--- |
|
||||||
| endpoint | URL to object storage service. |
|
| endpoint | URL to object storage service. |
|
||||||
@ -41,7 +35,6 @@ Minio client requires the following four parameters specified to connect to an A
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -62,21 +55,14 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Println("%v", minioClient) // minioClient is now setup
|
log.Println("%v", minioClient) // minioClient is now setup
|
||||||
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Quick Start Example - File Uploader
|
## Quick Start Example - File Uploader
|
||||||
|
|
||||||
This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
|
This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
|
We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
|
||||||
|
|
||||||
#### FileUploader.go
|
### FileUploader.go
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package main
|
package main
|
||||||
|
|
||||||
@ -97,7 +83,7 @@ func main() {
|
|||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make a new bucked called mymusic.
|
// Make a new bucket called mymusic.
|
||||||
bucketName := "mymusic"
|
bucketName := "mymusic"
|
||||||
location := "us-east-1"
|
location := "us-east-1"
|
||||||
|
|
||||||
@ -128,21 +114,17 @@ func main() {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Run FileUploader
|
### Run FileUploader
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
|
||||||
go run file-uploader.go
|
go run file-uploader.go
|
||||||
2016/08/13 17:03:28 Successfully created mymusic
|
2016/08/13 17:03:28 Successfully created mymusic
|
||||||
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
|
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
|
||||||
|
|
||||||
mc ls play/mymusic/
|
mc ls play/mymusic/
|
||||||
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
|
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## API Reference
|
## API Reference
|
||||||
|
|
||||||
The full API Reference is available here.
|
The full API Reference is available here.
|
||||||
|
|
||||||
* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference)
|
* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference)
|
||||||
@ -179,12 +161,18 @@ The full API Reference is available here.
|
|||||||
|
|
||||||
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
|
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
|
||||||
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
|
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
|
||||||
|
* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
|
||||||
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
|
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
|
||||||
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
|
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
|
||||||
* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
|
* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
|
||||||
* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
|
* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
|
||||||
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
|
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
|
||||||
|
|
||||||
|
### API Reference: Encrypted Object Operations
|
||||||
|
|
||||||
|
* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject)
|
||||||
|
* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject)
|
||||||
|
|
||||||
### API Reference : Presigned Operations
|
### API Reference : Presigned Operations
|
||||||
|
|
||||||
* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
|
* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
|
||||||
@ -238,6 +226,11 @@ The full API Reference is available here.
|
|||||||
* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
|
* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
|
||||||
* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
|
* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
|
||||||
|
|
||||||
|
#### Full Examples : Encrypted Object Operations
|
||||||
|
|
||||||
|
* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
|
||||||
|
* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
|
||||||
|
|
||||||
#### Full Examples : Presigned Operations
|
#### Full Examples : Presigned Operations
|
||||||
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
|
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
|
||||||
* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
|
* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
|
||||||
@ -246,7 +239,7 @@ The full API Reference is available here.
|
|||||||
## Explore Further
|
## Explore Further
|
||||||
* [Complete Documentation](https://docs.minio.io)
|
* [Complete Documentation](https://docs.minio.io)
|
||||||
* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
|
* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
|
||||||
* [Go Music Player App- Full Application Example ](https://docs.minio.io/docs/go-music-player-app)
|
* [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app)
|
||||||
|
|
||||||
## Contribute
|
## Contribute
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -48,6 +48,9 @@ type ErrorResponse struct {
|
|||||||
// Region where the bucket is located. This header is returned
|
// Region where the bucket is located. This header is returned
|
||||||
// only in HEAD bucket and ListObjects response.
|
// only in HEAD bucket and ListObjects response.
|
||||||
Region string
|
Region string
|
||||||
|
|
||||||
|
// Headers of the returned S3 XML error
|
||||||
|
Headers http.Header `xml:"-" json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToErrorResponse - Returns parsed ErrorResponse struct from body and
|
// ToErrorResponse - Returns parsed ErrorResponse struct from body and
|
||||||
@ -72,8 +75,15 @@ func ToErrorResponse(err error) ErrorResponse {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error - Returns HTTP error string
|
// Error - Returns S3 error string.
|
||||||
func (e ErrorResponse) Error() string {
|
func (e ErrorResponse) Error() string {
|
||||||
|
if e.Message == "" {
|
||||||
|
msg, ok := s3ErrorResponseMap[e.Code]
|
||||||
|
if !ok {
|
||||||
|
msg = fmt.Sprintf("Error response code %s.", e.Code)
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
return e.Message
|
return e.Message
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,6 +101,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
|||||||
return ErrInvalidArgument(msg)
|
return ErrInvalidArgument(msg)
|
||||||
}
|
}
|
||||||
var errResp ErrorResponse
|
var errResp ErrorResponse
|
||||||
|
|
||||||
err := xmlDecoder(resp.Body, &errResp)
|
err := xmlDecoder(resp.Body, &errResp)
|
||||||
// Xml decoding failed with no body, fall back to HTTP headers.
|
// Xml decoding failed with no body, fall back to HTTP headers.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -101,9 +112,6 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
|||||||
Code: "NoSuchBucket",
|
Code: "NoSuchBucket",
|
||||||
Message: "The specified bucket does not exist.",
|
Message: "The specified bucket does not exist.",
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
|
||||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
errResp = ErrorResponse{
|
errResp = ErrorResponse{
|
||||||
@ -111,9 +119,6 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
|||||||
Message: "The specified key does not exist.",
|
Message: "The specified key does not exist.",
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
Key: objectName,
|
Key: objectName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
|
||||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case http.StatusForbidden:
|
case http.StatusForbidden:
|
||||||
@ -122,30 +127,44 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
|||||||
Message: "Access Denied.",
|
Message: "Access Denied.",
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
Key: objectName,
|
Key: objectName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
|
||||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
|
||||||
}
|
}
|
||||||
case http.StatusConflict:
|
case http.StatusConflict:
|
||||||
errResp = ErrorResponse{
|
errResp = ErrorResponse{
|
||||||
Code: "Conflict",
|
Code: "Conflict",
|
||||||
Message: "Bucket not empty.",
|
Message: "Bucket not empty.",
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
}
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
case http.StatusPreconditionFailed:
|
||||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
errResp = ErrorResponse{
|
||||||
|
Code: "PreconditionFailed",
|
||||||
|
Message: s3ErrorResponseMap["PreconditionFailed"],
|
||||||
|
BucketName: bucketName,
|
||||||
|
Key: objectName,
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
errResp = ErrorResponse{
|
errResp = ErrorResponse{
|
||||||
Code: resp.Status,
|
Code: resp.Status,
|
||||||
Message: resp.Status,
|
Message: resp.Status,
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
|
||||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Save hodID, requestID and region information
|
||||||
|
// from headers if not available through error XML.
|
||||||
|
if errResp.RequestID == "" {
|
||||||
|
errResp.RequestID = resp.Header.Get("x-amz-request-id")
|
||||||
|
}
|
||||||
|
if errResp.HostID == "" {
|
||||||
|
errResp.HostID = resp.Header.Get("x-amz-id-2")
|
||||||
|
}
|
||||||
|
if errResp.Region == "" {
|
||||||
|
errResp.Region = resp.Header.Get("x-amz-bucket-region")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save headers returned in the API XML error
|
||||||
|
errResp.Headers = resp.Header
|
||||||
|
|
||||||
return errResp
|
return errResp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,6 +72,7 @@ func TestHttpRespToErrorResponse(t *testing.T) {
|
|||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
|
Headers: resp.Header,
|
||||||
}
|
}
|
||||||
return errResp
|
return errResp
|
||||||
}
|
}
|
||||||
@ -172,7 +173,7 @@ func TestHttpRespToErrorResponse(t *testing.T) {
|
|||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
actualResult := httpRespToErrorResponse(testCase.inputHTTPResp, testCase.bucketName, testCase.objectName)
|
actualResult := httpRespToErrorResponse(testCase.inputHTTPResp, testCase.bucketName, testCase.objectName)
|
||||||
if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
|
if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
|
||||||
t.Errorf("Test %d: Expected result to be '%+v', but instead got '%+v'", i+1, testCase.expectedResult, actualResult)
|
t.Errorf("Test %d: Expected result to be '%#v', but instead got '%#v'", i+1, testCase.expectedResult, actualResult)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -261,3 +262,21 @@ func TestErrInvalidArgument(t *testing.T) {
|
|||||||
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests if the Message field is missing.
|
||||||
|
func TestErrWithoutMessage(t *testing.T) {
|
||||||
|
errResp := ErrorResponse{
|
||||||
|
Code: "AccessDenied",
|
||||||
|
RequestID: "minio",
|
||||||
|
}
|
||||||
|
if errResp.Error() != "Access Denied." {
|
||||||
|
t.Errorf("Expected \"Access Denied.\", got %s", errResp)
|
||||||
|
}
|
||||||
|
errResp = ErrorResponse{
|
||||||
|
Code: "InvalidArgument",
|
||||||
|
RequestID: "minio",
|
||||||
|
}
|
||||||
|
if errResp.Error() != "Error response code InvalidArgument." {
|
||||||
|
t.Errorf("Expected \"Error response code InvalidArgument.\", got %s", errResp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -78,8 +78,15 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize get object request headers to set the
|
||||||
|
// appropriate range offsets to read from.
|
||||||
|
reqHeaders := NewGetReqHeaders()
|
||||||
|
if st.Size() > 0 {
|
||||||
|
reqHeaders.SetRange(st.Size(), 0)
|
||||||
|
}
|
||||||
|
|
||||||
// Seek to current position for incoming reader.
|
// Seek to current position for incoming reader.
|
||||||
objectReader, objectStat, err := c.getObject(bucketName, objectName, st.Size(), 0)
|
objectReader, objectStat, err := c.getObject(bucketName, objectName, reqHeaders)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -24,8 +24,36 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/encrypt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// GetEncryptedObject deciphers and streams data stored in the server after applying a specifed encryption materiels
|
||||||
|
func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.Reader, error) {
|
||||||
|
|
||||||
|
if encryptMaterials == nil {
|
||||||
|
return nil, ErrInvalidArgument("Unable to recognize empty encryption properties")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch encrypted object
|
||||||
|
encReader, err := c.GetObject(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Stat object to get its encryption metadata
|
||||||
|
st, err := encReader.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup object for decrytion, object is transparently
|
||||||
|
// decrypted as the consumer starts reading.
|
||||||
|
encryptMaterials.SetupDecryptMode(encReader, st.Metadata.Get(amzHeaderIV), st.Metadata.Get(amzHeaderKey))
|
||||||
|
|
||||||
|
// Success.
|
||||||
|
return encryptMaterials, nil
|
||||||
|
}
|
||||||
|
|
||||||
// GetObject - returns an seekable, readable object.
|
// GetObject - returns an seekable, readable object.
|
||||||
func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
@ -39,6 +67,7 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
|||||||
var httpReader io.ReadCloser
|
var httpReader io.ReadCloser
|
||||||
var objectInfo ObjectInfo
|
var objectInfo ObjectInfo
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// Create request channel.
|
// Create request channel.
|
||||||
reqCh := make(chan getRequest)
|
reqCh := make(chan getRequest)
|
||||||
// Create response channel.
|
// Create response channel.
|
||||||
@ -51,6 +80,9 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
|||||||
defer close(reqCh)
|
defer close(reqCh)
|
||||||
defer close(resCh)
|
defer close(resCh)
|
||||||
|
|
||||||
|
// Used to verify if etag of object has changed since last read.
|
||||||
|
var etag string
|
||||||
|
|
||||||
// Loop through the incoming control messages and read data.
|
// Loop through the incoming control messages and read data.
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -69,16 +101,22 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
|||||||
if req.isFirstReq {
|
if req.isFirstReq {
|
||||||
// First request is a Read/ReadAt.
|
// First request is a Read/ReadAt.
|
||||||
if req.isReadOp {
|
if req.isReadOp {
|
||||||
|
reqHeaders := NewGetReqHeaders()
|
||||||
// Differentiate between wanting the whole object and just a range.
|
// Differentiate between wanting the whole object and just a range.
|
||||||
if req.isReadAt {
|
if req.isReadAt {
|
||||||
// If this is a ReadAt request only get the specified range.
|
// If this is a ReadAt request only get the specified range.
|
||||||
// Range is set with respect to the offset and length of the buffer requested.
|
// Range is set with respect to the offset and length of the buffer requested.
|
||||||
// Do not set objectInfo from the first readAt request because it will not get
|
// Do not set objectInfo from the first readAt request because it will not get
|
||||||
// the whole object.
|
// the whole object.
|
||||||
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
|
reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
|
||||||
|
httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
|
||||||
} else {
|
} else {
|
||||||
|
if req.Offset > 0 {
|
||||||
|
reqHeaders.SetRange(req.Offset, 0)
|
||||||
|
}
|
||||||
|
|
||||||
// First request is a Read request.
|
// First request is a Read request.
|
||||||
httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0)
|
httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resCh <- getResponse{
|
resCh <- getResponse{
|
||||||
@ -86,6 +124,7 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
etag = objectInfo.ETag
|
||||||
// Read at least firstReq.Buffer bytes, if not we have
|
// Read at least firstReq.Buffer bytes, if not we have
|
||||||
// reached our EOF.
|
// reached our EOF.
|
||||||
size, err := io.ReadFull(httpReader, req.Buffer)
|
size, err := io.ReadFull(httpReader, req.Buffer)
|
||||||
@ -112,13 +151,18 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
|||||||
// Exit the go-routine.
|
// Exit the go-routine.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
etag = objectInfo.ETag
|
||||||
// Send back the first response.
|
// Send back the first response.
|
||||||
resCh <- getResponse{
|
resCh <- getResponse{
|
||||||
objectInfo: objectInfo,
|
objectInfo: objectInfo,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if req.settingObjectInfo { // Request is just to get objectInfo.
|
} else if req.settingObjectInfo { // Request is just to get objectInfo.
|
||||||
objectInfo, err := c.StatObject(bucketName, objectName)
|
reqHeaders := NewGetReqHeaders()
|
||||||
|
if etag != "" {
|
||||||
|
reqHeaders.SetMatchETag(etag)
|
||||||
|
}
|
||||||
|
objectInfo, err := c.statObject(bucketName, objectName, reqHeaders)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resCh <- getResponse{
|
resCh <- getResponse{
|
||||||
Error: err,
|
Error: err,
|
||||||
@ -138,6 +182,10 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
|||||||
// new ones when they haven't been already.
|
// new ones when they haven't been already.
|
||||||
// All readAt requests are new requests.
|
// All readAt requests are new requests.
|
||||||
if req.DidOffsetChange || !req.beenRead {
|
if req.DidOffsetChange || !req.beenRead {
|
||||||
|
reqHeaders := NewGetReqHeaders()
|
||||||
|
if etag != "" {
|
||||||
|
reqHeaders.SetMatchETag(etag)
|
||||||
|
}
|
||||||
if httpReader != nil {
|
if httpReader != nil {
|
||||||
// Close previously opened http reader.
|
// Close previously opened http reader.
|
||||||
httpReader.Close()
|
httpReader.Close()
|
||||||
@ -145,9 +193,15 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
|||||||
// If this request is a readAt only get the specified range.
|
// If this request is a readAt only get the specified range.
|
||||||
if req.isReadAt {
|
if req.isReadAt {
|
||||||
// Range is set with respect to the offset and length of the buffer requested.
|
// Range is set with respect to the offset and length of the buffer requested.
|
||||||
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
|
reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
|
||||||
|
httpReader, _, err = c.getObject(bucketName, objectName, reqHeaders)
|
||||||
} else {
|
} else {
|
||||||
httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0)
|
// Range is set with respect to the offset.
|
||||||
|
if req.Offset > 0 {
|
||||||
|
reqHeaders.SetRange(req.Offset, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resCh <- getResponse{
|
resCh <- getResponse{
|
||||||
@ -202,8 +256,8 @@ type getResponse struct {
|
|||||||
objectInfo ObjectInfo // Used for the first request.
|
objectInfo ObjectInfo // Used for the first request.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object represents an open object. It implements Read, ReadAt,
|
// Object represents an open object. It implements
|
||||||
// Seeker, Close for a HTTP stream.
|
// Reader, ReaderAt, Seeker, Closer for a HTTP stream.
|
||||||
type Object struct {
|
type Object struct {
|
||||||
// Mutex.
|
// Mutex.
|
||||||
mutex *sync.Mutex
|
mutex *sync.Mutex
|
||||||
@ -241,6 +295,12 @@ type Object struct {
|
|||||||
func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
|
func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
|
||||||
o.reqCh <- request
|
o.reqCh <- request
|
||||||
response := <-o.resCh
|
response := <-o.resCh
|
||||||
|
|
||||||
|
// Return any error to the top level.
|
||||||
|
if response.Error != nil {
|
||||||
|
return response, response.Error
|
||||||
|
}
|
||||||
|
|
||||||
// This was the first request.
|
// This was the first request.
|
||||||
if !o.isStarted {
|
if !o.isStarted {
|
||||||
// The object has been operated on.
|
// The object has been operated on.
|
||||||
@ -256,11 +316,6 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
|
|||||||
if !o.beenRead {
|
if !o.beenRead {
|
||||||
o.beenRead = response.didRead
|
o.beenRead = response.didRead
|
||||||
}
|
}
|
||||||
// Return any error to the top level.
|
|
||||||
if response.Error != nil {
|
|
||||||
return response, response.Error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Data are ready on the wire, no need to reinitiate connection in lower level
|
// Data are ready on the wire, no need to reinitiate connection in lower level
|
||||||
o.seekData = false
|
o.seekData = false
|
||||||
|
|
||||||
@ -566,7 +621,7 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<-
|
|||||||
//
|
//
|
||||||
// For more information about the HTTP Range header.
|
// For more information about the HTTP Range header.
|
||||||
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
|
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
|
||||||
func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectInfo, error) {
|
func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) {
|
||||||
// Validate input arguments.
|
// Validate input arguments.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return nil, ObjectInfo{}, err
|
return nil, ObjectInfo{}, err
|
||||||
@ -575,15 +630,10 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
|
|||||||
return nil, ObjectInfo{}, err
|
return nil, ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set all the necessary reqHeaders.
|
||||||
customHeader := make(http.Header)
|
customHeader := make(http.Header)
|
||||||
// Set ranges if length and offset are valid.
|
for key, value := range reqHeaders.Header {
|
||||||
// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
|
customHeader[key] = value
|
||||||
if length > 0 && offset >= 0 {
|
|
||||||
customHeader.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
|
|
||||||
} else if offset > 0 && length == 0 {
|
|
||||||
customHeader.Set("Range", fmt.Sprintf("bytes=%d-", offset))
|
|
||||||
} else if length < 0 && offset == 0 {
|
|
||||||
customHeader.Set("Range", fmt.Sprintf("bytes=%d", length))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute GET on objectName.
|
// Execute GET on objectName.
|
||||||
@ -591,6 +641,7 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
|
|||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
customHeader: customHeader,
|
customHeader: customHeader,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ObjectInfo{}, err
|
return nil, ObjectInfo{}, err
|
||||||
@ -617,6 +668,7 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
|
|||||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get content-type.
|
// Get content-type.
|
||||||
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
|
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
|
||||||
if contentType == "" {
|
if contentType == "" {
|
||||||
|
@ -34,8 +34,12 @@ func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy p
|
|||||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
||||||
return policy.BucketPolicyNone, err
|
return policy.BucketPolicyNone, err
|
||||||
}
|
}
|
||||||
policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix)
|
policyInfo, err := c.getBucketPolicy(bucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
errResponse := ToErrorResponse(err)
|
||||||
|
if errResponse.Code == "NoSuchBucketPolicy" {
|
||||||
|
return policy.BucketPolicyNone, nil
|
||||||
|
}
|
||||||
return policy.BucketPolicyNone, err
|
return policy.BucketPolicyNone, err
|
||||||
}
|
}
|
||||||
return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
|
return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
|
||||||
@ -50,15 +54,24 @@ func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolic
|
|||||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
||||||
return map[string]policy.BucketPolicy{}, err
|
return map[string]policy.BucketPolicy{}, err
|
||||||
}
|
}
|
||||||
policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix)
|
policyInfo, err := c.getBucketPolicy(bucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
errResponse := ToErrorResponse(err)
|
||||||
|
if errResponse.Code == "NoSuchBucketPolicy" {
|
||||||
|
return map[string]policy.BucketPolicy{}, nil
|
||||||
|
}
|
||||||
return map[string]policy.BucketPolicy{}, err
|
return map[string]policy.BucketPolicy{}, err
|
||||||
}
|
}
|
||||||
return policy.GetPolicies(policyInfo.Statements, bucketName), nil
|
return policy.GetPolicies(policyInfo.Statements, bucketName), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Default empty bucket access policy.
|
||||||
|
var emptyBucketAccessPolicy = policy.BucketAccessPolicy{
|
||||||
|
Version: "2012-10-17",
|
||||||
|
}
|
||||||
|
|
||||||
// Request server for current bucket policy.
|
// Request server for current bucket policy.
|
||||||
func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (policy.BucketAccessPolicy, error) {
|
func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, error) {
|
||||||
// Get resources properly escaped and lined up before
|
// Get resources properly escaped and lined up before
|
||||||
// using them in http request.
|
// using them in http request.
|
||||||
urlValues := make(url.Values)
|
urlValues := make(url.Values)
|
||||||
@ -68,25 +81,23 @@ func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (policy.
|
|||||||
resp, err := c.executeMethod("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
})
|
})
|
||||||
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return policy.BucketAccessPolicy{}, err
|
return emptyBucketAccessPolicy, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
errResponse := httpRespToErrorResponse(resp, bucketName, "")
|
return emptyBucketAccessPolicy, httpRespToErrorResponse(resp, bucketName, "")
|
||||||
if ToErrorResponse(errResponse).Code == "NoSuchBucketPolicy" {
|
|
||||||
return policy.BucketAccessPolicy{Version: "2012-10-17"}, nil
|
|
||||||
}
|
|
||||||
return policy.BucketAccessPolicy{}, errResponse
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
|
bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return policy.BucketAccessPolicy{}, err
|
return emptyBucketAccessPolicy, err
|
||||||
}
|
}
|
||||||
|
|
||||||
policy := policy.BucketAccessPolicy{}
|
policy := policy.BucketAccessPolicy{}
|
||||||
|
50
vendor/src/github.com/minio/minio-go/api-list.go
vendored
50
vendor/src/github.com/minio/minio-go/api-list.go
vendored
@ -35,7 +35,7 @@ import (
|
|||||||
//
|
//
|
||||||
func (c Client) ListBuckets() ([]BucketInfo, error) {
|
func (c Client) ListBuckets() ([]BucketInfo, error) {
|
||||||
// Execute GET on service.
|
// Execute GET on service.
|
||||||
resp, err := c.executeMethod("GET", requestMetadata{})
|
resp, err := c.executeMethod("GET", requestMetadata{contentSHA256Bytes: emptySHA256})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -168,14 +168,14 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
|
|||||||
// ?delimiter - A delimiter is a character you use to group keys.
|
// ?delimiter - A delimiter is a character you use to group keys.
|
||||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||||
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
||||||
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (listBucketV2Result, error) {
|
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return listBucketV2Result{}, err
|
return ListBucketV2Result{}, err
|
||||||
}
|
}
|
||||||
// Validate object prefix.
|
// Validate object prefix.
|
||||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
||||||
return listBucketV2Result{}, err
|
return ListBucketV2Result{}, err
|
||||||
}
|
}
|
||||||
// Get resources properly escaped and lined up before
|
// Get resources properly escaped and lined up before
|
||||||
// using them in http request.
|
// using them in http request.
|
||||||
@ -213,19 +213,20 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s
|
|||||||
resp, err := c.executeMethod("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
})
|
})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listBucketV2Result{}, err
|
return ListBucketV2Result{}, err
|
||||||
}
|
}
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return listBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "")
|
return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode listBuckets XML.
|
// Decode listBuckets XML.
|
||||||
listBucketResult := listBucketV2Result{}
|
listBucketResult := ListBucketV2Result{}
|
||||||
err = xmlDecoder(resp.Body, &listBucketResult)
|
err = xmlDecoder(resp.Body, &listBucketResult)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listBucketResult, err
|
return listBucketResult, err
|
||||||
@ -347,14 +348,14 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
|
|||||||
// ?delimiter - A delimiter is a character you use to group keys.
|
// ?delimiter - A delimiter is a character you use to group keys.
|
||||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||||
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
||||||
func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (listBucketResult, error) {
|
func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) {
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return listBucketResult{}, err
|
return ListBucketResult{}, err
|
||||||
}
|
}
|
||||||
// Validate object prefix.
|
// Validate object prefix.
|
||||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
||||||
return listBucketResult{}, err
|
return ListBucketResult{}, err
|
||||||
}
|
}
|
||||||
// Get resources properly escaped and lined up before
|
// Get resources properly escaped and lined up before
|
||||||
// using them in http request.
|
// using them in http request.
|
||||||
@ -383,18 +384,19 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
|
|||||||
resp, err := c.executeMethod("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
})
|
})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listBucketResult{}, err
|
return ListBucketResult{}, err
|
||||||
}
|
}
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return listBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
|
return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Decode listBuckets XML.
|
// Decode listBuckets XML.
|
||||||
listBucketResult := listBucketResult{}
|
listBucketResult := ListBucketResult{}
|
||||||
err = xmlDecoder(resp.Body, &listBucketResult)
|
err = xmlDecoder(resp.Body, &listBucketResult)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listBucketResult, err
|
return listBucketResult, err
|
||||||
@ -528,7 +530,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
|
|||||||
// ?delimiter - A delimiter is a character you use to group keys.
|
// ?delimiter - A delimiter is a character you use to group keys.
|
||||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||||
// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
|
// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
|
||||||
func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (listMultipartUploadsResult, error) {
|
func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) {
|
||||||
// Get resources properly escaped and lined up before using them in http request.
|
// Get resources properly escaped and lined up before using them in http request.
|
||||||
urlValues := make(url.Values)
|
urlValues := make(url.Values)
|
||||||
// Set uploads.
|
// Set uploads.
|
||||||
@ -561,18 +563,19 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
|
|||||||
resp, err := c.executeMethod("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
})
|
})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listMultipartUploadsResult{}, err
|
return ListMultipartUploadsResult{}, err
|
||||||
}
|
}
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return listMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
|
return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Decode response body.
|
// Decode response body.
|
||||||
listMultipartUploadsResult := listMultipartUploadsResult{}
|
listMultipartUploadsResult := ListMultipartUploadsResult{}
|
||||||
err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
|
err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listMultipartUploadsResult, err
|
return listMultipartUploadsResult, err
|
||||||
@ -581,10 +584,10 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listObjectParts list all object parts recursively.
|
// listObjectParts list all object parts recursively.
|
||||||
func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]objectPart, err error) {
|
func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
|
||||||
// Part number marker for the next batch of request.
|
// Part number marker for the next batch of request.
|
||||||
var nextPartNumberMarker int
|
var nextPartNumberMarker int
|
||||||
partsInfo = make(map[int]objectPart)
|
partsInfo = make(map[int]ObjectPart)
|
||||||
for {
|
for {
|
||||||
// Get list of uploaded parts a maximum of 1000 per request.
|
// Get list of uploaded parts a maximum of 1000 per request.
|
||||||
listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
|
listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
|
||||||
@ -659,7 +662,7 @@ func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (
|
|||||||
// ?part-number-marker - Specifies the part after which listing should
|
// ?part-number-marker - Specifies the part after which listing should
|
||||||
// begin.
|
// begin.
|
||||||
// ?max-parts - Maximum parts to be listed per request.
|
// ?max-parts - Maximum parts to be listed per request.
|
||||||
func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) {
|
func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) {
|
||||||
// Get resources properly escaped and lined up before using them in http request.
|
// Get resources properly escaped and lined up before using them in http request.
|
||||||
urlValues := make(url.Values)
|
urlValues := make(url.Values)
|
||||||
// Set part number marker.
|
// Set part number marker.
|
||||||
@ -679,18 +682,19 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa
|
|||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
})
|
})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listObjectPartsResult{}, err
|
return ListObjectPartsResult{}, err
|
||||||
}
|
}
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return listObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Decode list object parts XML.
|
// Decode list object parts XML.
|
||||||
listObjectPartsResult := listObjectPartsResult{}
|
listObjectPartsResult := ListObjectPartsResult{}
|
||||||
err = xmlDecoder(resp.Body, &listObjectPartsResult)
|
err = xmlDecoder(resp.Body, &listObjectPartsResult)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listObjectPartsResult, err
|
return listObjectPartsResult, err
|
||||||
|
@ -49,6 +49,7 @@ func (c Client) getBucketNotification(bucketName string) (BucketNotification, er
|
|||||||
resp, err := c.executeMethod("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
})
|
})
|
||||||
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
@ -102,6 +103,14 @@ type eventMeta struct {
|
|||||||
Object objectMeta `json:"object"`
|
Object objectMeta `json:"object"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sourceInfo represents information on the client that
|
||||||
|
// triggered the event notification.
|
||||||
|
type sourceInfo struct {
|
||||||
|
Host string `json:"host"`
|
||||||
|
Port string `json:"port"`
|
||||||
|
UserAgent string `json:"userAgent"`
|
||||||
|
}
|
||||||
|
|
||||||
// NotificationEvent represents an Amazon an S3 bucket notification event.
|
// NotificationEvent represents an Amazon an S3 bucket notification event.
|
||||||
type NotificationEvent struct {
|
type NotificationEvent struct {
|
||||||
EventVersion string `json:"eventVersion"`
|
EventVersion string `json:"eventVersion"`
|
||||||
@ -113,6 +122,7 @@ type NotificationEvent struct {
|
|||||||
RequestParameters map[string]string `json:"requestParameters"`
|
RequestParameters map[string]string `json:"requestParameters"`
|
||||||
ResponseElements map[string]string `json:"responseElements"`
|
ResponseElements map[string]string `json:"responseElements"`
|
||||||
S3 eventMeta `json:"s3"`
|
S3 eventMeta `json:"s3"`
|
||||||
|
Source sourceInfo `json:"source"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NotificationInfo - represents the collection of notification events, additionally
|
// NotificationInfo - represents the collection of notification events, additionally
|
||||||
@ -163,6 +173,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
|
|||||||
resp, err := c.executeMethod("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
|
@ -41,7 +41,14 @@ import (
|
|||||||
//
|
//
|
||||||
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
|
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||||
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
|
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
|
||||||
func (c Client) MakeBucket(bucketName string, location string) error {
|
func (c Client) MakeBucket(bucketName string, location string) (err error) {
|
||||||
|
defer func() {
|
||||||
|
// Save the location into cache on a successful makeBucket response.
|
||||||
|
if err == nil {
|
||||||
|
c.bucketLocCache.Set(bucketName, location)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Validate the input arguments.
|
// Validate the input arguments.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -52,45 +59,70 @@ func (c Client) MakeBucket(bucketName string, location string) error {
|
|||||||
location = "us-east-1"
|
location = "us-east-1"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate the request.
|
// Try creating bucket with the provided region, in case of
|
||||||
|
// invalid region error let's guess the appropriate region
|
||||||
|
// from S3 API headers
|
||||||
|
|
||||||
|
// Create a done channel to control 'newRetryTimer' go routine.
|
||||||
|
doneCh := make(chan struct{}, 1)
|
||||||
|
|
||||||
|
// Indicate to our routine to exit cleanly upon return.
|
||||||
|
defer close(doneCh)
|
||||||
|
|
||||||
|
// Blank indentifier is kept here on purpose since 'range' without
|
||||||
|
// blank identifiers is only supported since go1.4
|
||||||
|
// https://golang.org/doc/go1.4#forrange.
|
||||||
|
for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
|
||||||
|
// Initialize the makeBucket request.
|
||||||
req, err := c.makeBucketRequest(bucketName, location)
|
req, err := c.makeBucketRequest(bucketName, location)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute the request.
|
// Execute make bucket request.
|
||||||
resp, err := c.do(req)
|
resp, err := c.do(req)
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp != nil {
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return httpRespToErrorResponse(resp, bucketName, "")
|
err := httpRespToErrorResponse(resp, bucketName, "")
|
||||||
|
errResp := ToErrorResponse(err)
|
||||||
|
if errResp.Code == "InvalidRegion" && errResp.Region != "" {
|
||||||
|
// Fetch bucket region found in headers
|
||||||
|
// of S3 error response, attempt bucket
|
||||||
|
// create again.
|
||||||
|
location = errResp.Region
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
// Nothing to retry, fail.
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the location into cache on a successful makeBucket response.
|
// Control reaches here when bucket create was successful,
|
||||||
c.bucketLocCache.Set(bucketName, location)
|
// break out.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
// Return.
|
// Success.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeBucketRequest constructs request for makeBucket.
|
// Low level wrapper API For makeBucketRequest.
|
||||||
func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) {
|
func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) {
|
||||||
// Validate input arguments.
|
// Validate input arguments.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// In case of Amazon S3. The make bucket issued on already
|
// In case of Amazon S3. The make bucket issued on
|
||||||
// existing bucket would fail with 'AuthorizationMalformed' error
|
// already existing bucket would fail with
|
||||||
// if virtual style is used. So we default to 'path style' as that
|
// 'AuthorizationMalformed' error if virtual style is
|
||||||
// is the preferred method here. The final location of the
|
// used. So we default to 'path style' as that is the
|
||||||
// 'bucket' is provided through XML LocationConstraint data with
|
// preferred method here. The final location of the
|
||||||
// the request.
|
// 'bucket' is provided through XML LocationConstraint
|
||||||
|
// data with the request.
|
||||||
targetURL := c.endpointURL
|
targetURL := c.endpointURL
|
||||||
targetURL.Path = path.Join(bucketName, "") + "/"
|
targetURL.Path = path.Join(bucketName, "") + "/"
|
||||||
|
|
||||||
@ -103,7 +135,8 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
|
|||||||
// set UserAgent for the request.
|
// set UserAgent for the request.
|
||||||
c.setUserAgent(req)
|
c.setUserAgent(req)
|
||||||
|
|
||||||
// set sha256 sum for signature calculation only with signature version '4'.
|
// set sha256 sum for signature calculation only with
|
||||||
|
// signature version '4'.
|
||||||
if c.signature.isV4() {
|
if c.signature.isV4() {
|
||||||
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
|
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
|
||||||
}
|
}
|
||||||
@ -157,11 +190,14 @@ func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPo
|
|||||||
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bucketPolicy.IsValidBucketPolicy() {
|
if !bucketPolicy.IsValidBucketPolicy() {
|
||||||
return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
|
return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
|
||||||
}
|
}
|
||||||
policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix)
|
|
||||||
if err != nil {
|
policyInfo, err := c.getBucketPolicy(bucketName)
|
||||||
|
errResponse := ToErrorResponse(err)
|
||||||
|
if err != nil && errResponse.Code != "NoSuchBucketPolicy" {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -238,6 +274,7 @@ func (c Client) removeBucketPolicy(bucketName string) error {
|
|||||||
resp, err := c.executeMethod("DELETE", requestMetadata{
|
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
})
|
})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -44,7 +44,7 @@ func isReadAt(reader io.Reader) (ok bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// shouldUploadPart - verify if part should be uploaded.
|
// shouldUploadPart - verify if part should be uploaded.
|
||||||
func shouldUploadPart(objPart objectPart, uploadReq uploadPartReq) bool {
|
func shouldUploadPart(objPart ObjectPart, uploadReq uploadPartReq) bool {
|
||||||
// If part not found should upload the part.
|
// If part not found should upload the part.
|
||||||
if uploadReq.Part == nil {
|
if uploadReq.Part == nil {
|
||||||
return true
|
return true
|
||||||
@ -185,9 +185,9 @@ func (c Client) newUploadID(bucketName, objectName string, metaData map[string][
|
|||||||
|
|
||||||
// getMpartUploadSession returns the upload id and the uploaded parts to continue a previous upload session
|
// getMpartUploadSession returns the upload id and the uploaded parts to continue a previous upload session
|
||||||
// or initiate a new multipart session if no current one found
|
// or initiate a new multipart session if no current one found
|
||||||
func (c Client) getMpartUploadSession(bucketName, objectName string, metaData map[string][]string) (string, map[int]objectPart, error) {
|
func (c Client) getMpartUploadSession(bucketName, objectName string, metaData map[string][]string) (string, map[int]ObjectPart, error) {
|
||||||
// A map of all uploaded parts.
|
// A map of all uploaded parts.
|
||||||
var partsInfo map[int]objectPart
|
var partsInfo map[int]ObjectPart
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
uploadID, err := c.findUploadID(bucketName, objectName)
|
uploadID, err := c.findUploadID(bucketName, objectName)
|
||||||
@ -220,7 +220,7 @@ func (c Client) getMpartUploadSession(bucketName, objectName string, metaData ma
|
|||||||
|
|
||||||
// Allocate partsInfo if not done yet
|
// Allocate partsInfo if not done yet
|
||||||
if partsInfo == nil {
|
if partsInfo == nil {
|
||||||
partsInfo = make(map[int]objectPart)
|
partsInfo = make(map[int]ObjectPart)
|
||||||
}
|
}
|
||||||
|
|
||||||
return uploadID, partsInfo, nil
|
return uploadID, partsInfo, nil
|
||||||
|
@ -91,25 +91,11 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
|
|||||||
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: S3 doesn't allow anonymous multipart requests.
|
|
||||||
if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous {
|
|
||||||
if fileSize > int64(maxSinglePutObjectSize) {
|
|
||||||
return 0, ErrorResponse{
|
|
||||||
Code: "NotImplemented",
|
|
||||||
Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize),
|
|
||||||
Key: objectName,
|
|
||||||
BucketName: bucketName,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Do not compute MD5 for anonymous requests to Amazon
|
|
||||||
// S3. Uploads up to 5GiB in size.
|
|
||||||
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Small object upload is initiated for uploads for input data size smaller than 5MiB.
|
// Small object upload is initiated for uploads for input data size smaller than 5MiB.
|
||||||
if fileSize < minPartSize && fileSize >= 0 {
|
if fileSize < minPartSize && fileSize >= 0 {
|
||||||
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload all large objects as multipart.
|
// Upload all large objects as multipart.
|
||||||
n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -187,7 +173,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
|
|||||||
close(uploadPartsCh)
|
close(uploadPartsCh)
|
||||||
|
|
||||||
// Use three 'workers' to upload parts in parallel.
|
// Use three 'workers' to upload parts in parallel.
|
||||||
for w := 1; w <= 3; w++ {
|
for w := 1; w <= totalWorkers; w++ {
|
||||||
go func() {
|
go func() {
|
||||||
// Deal with each part as it comes through the channel.
|
// Deal with each part as it comes through the channel.
|
||||||
for uploadReq := range uploadPartsCh {
|
for uploadReq := range uploadPartsCh {
|
||||||
@ -228,7 +214,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create the part to be uploaded.
|
// Create the part to be uploaded.
|
||||||
verifyObjPart := objectPart{
|
verifyObjPart := ObjectPart{
|
||||||
ETag: hex.EncodeToString(hashSums["md5"]),
|
ETag: hex.EncodeToString(hashSums["md5"]),
|
||||||
PartNumber: uploadReq.PartNum,
|
PartNumber: uploadReq.PartNum,
|
||||||
Size: partSize,
|
Size: partSize,
|
||||||
@ -242,7 +228,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
|
|||||||
// Verify if part should be uploaded.
|
// Verify if part should be uploaded.
|
||||||
if shouldUploadPart(verifyObjPart, uploadReq) {
|
if shouldUploadPart(verifyObjPart, uploadReq) {
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
var objPart objectPart
|
var objPart ObjectPart
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
uploadedPartsCh <- uploadedPartRes{
|
uploadedPartsCh <- uploadedPartRes{
|
||||||
@ -285,7 +271,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Store the part to be completed.
|
// Store the part to be completed.
|
||||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
|
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||||
ETag: part.ETag,
|
ETag: part.ETag,
|
||||||
PartNumber: part.PartNumber,
|
PartNumber: part.PartNumber,
|
||||||
})
|
})
|
||||||
|
@ -66,6 +66,112 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read
|
|||||||
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress)
|
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// putObjectMultipartStreamNoChecksum - upload a large object using
|
||||||
|
// multipart upload and streaming signature for signing payload.
|
||||||
|
// N B We don't resume an incomplete multipart upload, we overwrite
|
||||||
|
// existing parts of an incomplete upload.
|
||||||
|
func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
|
||||||
|
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) {
|
||||||
|
|
||||||
|
// Input validation.
|
||||||
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if err := isValidObjectName(objectName); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
|
||||||
|
uploadID, err := c.findUploadID(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if uploadID == "" {
|
||||||
|
// Initiates a new multipart request
|
||||||
|
uploadID, err = c.newUploadID(bucketName, objectName, metadata)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the optimal parts info for a given size.
|
||||||
|
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||||
|
var totalUploadedSize int64
|
||||||
|
|
||||||
|
// Initialize parts uploaded map.
|
||||||
|
partsInfo := make(map[int]ObjectPart)
|
||||||
|
|
||||||
|
// Part number always starts with '1'.
|
||||||
|
var partNumber int
|
||||||
|
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
|
||||||
|
// Update progress reader appropriately to the latest offset
|
||||||
|
// as we read from the source.
|
||||||
|
hookReader := newHook(reader, progress)
|
||||||
|
|
||||||
|
// Proceed to upload the part.
|
||||||
|
if partNumber == totalPartsCount {
|
||||||
|
partSize = lastPartSize
|
||||||
|
}
|
||||||
|
|
||||||
|
var objPart ObjectPart
|
||||||
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID,
|
||||||
|
io.LimitReader(hookReader, partSize), partNumber, nil, nil, partSize)
|
||||||
|
// For unknown size, Read EOF we break away.
|
||||||
|
// We do not have to upload till totalPartsCount.
|
||||||
|
if err == io.EOF && size < 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return totalUploadedSize, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save successfully uploaded part metadata.
|
||||||
|
partsInfo[partNumber] = objPart
|
||||||
|
|
||||||
|
// Save successfully uploaded size.
|
||||||
|
totalUploadedSize += partSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify if we uploaded all the data.
|
||||||
|
if size > 0 {
|
||||||
|
if totalUploadedSize != size {
|
||||||
|
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Complete multipart upload.
|
||||||
|
var complMultipartUpload completeMultipartUpload
|
||||||
|
|
||||||
|
// Loop over total uploaded parts to save them in
|
||||||
|
// Parts array before completing the multipart request.
|
||||||
|
for i := 1; i < partNumber; i++ {
|
||||||
|
part, ok := partsInfo[i]
|
||||||
|
if !ok {
|
||||||
|
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||||
|
}
|
||||||
|
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||||
|
ETag: part.ETag,
|
||||||
|
PartNumber: part.PartNumber,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort all completed parts.
|
||||||
|
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||||
|
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
|
||||||
|
if err != nil {
|
||||||
|
return totalUploadedSize, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return final size.
|
||||||
|
return totalUploadedSize, nil
|
||||||
|
}
|
||||||
|
|
||||||
// putObjectStream uploads files bigger than 64MiB, and also supports
|
// putObjectStream uploads files bigger than 64MiB, and also supports
|
||||||
// special case where size is unknown i.e '-1'.
|
// special case where size is unknown i.e '-1'.
|
||||||
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
@ -113,11 +219,9 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
|||||||
|
|
||||||
// Calculates hash sums while copying partSize bytes into tmpBuffer.
|
// Calculates hash sums while copying partSize bytes into tmpBuffer.
|
||||||
prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize)
|
prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize)
|
||||||
if rErr != nil {
|
if rErr != nil && rErr != io.EOF {
|
||||||
if rErr != io.EOF {
|
|
||||||
return 0, rErr
|
return 0, rErr
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
var reader io.Reader
|
var reader io.Reader
|
||||||
// Update progress reader appropriately to the latest offset
|
// Update progress reader appropriately to the latest offset
|
||||||
@ -127,13 +231,13 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
|||||||
part, ok := partsInfo[partNumber]
|
part, ok := partsInfo[partNumber]
|
||||||
|
|
||||||
// Verify if part should be uploaded.
|
// Verify if part should be uploaded.
|
||||||
if !ok || shouldUploadPart(objectPart{
|
if !ok || shouldUploadPart(ObjectPart{
|
||||||
ETag: hex.EncodeToString(hashSums["md5"]),
|
ETag: hex.EncodeToString(hashSums["md5"]),
|
||||||
PartNumber: partNumber,
|
PartNumber: partNumber,
|
||||||
Size: prtSize,
|
Size: prtSize,
|
||||||
}, uploadPartReq{PartNum: partNumber, Part: &part}) {
|
}, uploadPartReq{PartNum: partNumber, Part: &part}) {
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
var objPart objectPart
|
var objPart ObjectPart
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Reset the temporary buffer upon any error.
|
// Reset the temporary buffer upon any error.
|
||||||
@ -181,7 +285,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
|||||||
if !ok {
|
if !ok {
|
||||||
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||||
}
|
}
|
||||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
|
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||||
ETag: part.ETag,
|
ETag: part.ETag,
|
||||||
PartNumber: part.PartNumber,
|
PartNumber: part.PartNumber,
|
||||||
})
|
})
|
||||||
@ -253,25 +357,25 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData
|
|||||||
}
|
}
|
||||||
|
|
||||||
// uploadPart - Uploads a part in a multipart upload.
|
// uploadPart - Uploads a part in a multipart upload.
|
||||||
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
|
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (ObjectPart, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return objectPart{}, err
|
return ObjectPart{}, err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := isValidObjectName(objectName); err != nil {
|
||||||
return objectPart{}, err
|
return ObjectPart{}, err
|
||||||
}
|
}
|
||||||
if size > maxPartSize {
|
if size > maxPartSize {
|
||||||
return objectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName)
|
return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName)
|
||||||
}
|
}
|
||||||
if size <= -1 {
|
if size <= -1 {
|
||||||
return objectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
|
return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
|
||||||
}
|
}
|
||||||
if partNumber <= 0 {
|
if partNumber <= 0 {
|
||||||
return objectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
|
return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
|
||||||
}
|
}
|
||||||
if uploadID == "" {
|
if uploadID == "" {
|
||||||
return objectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
|
return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get resources properly escaped and lined up before using them in http request.
|
// Get resources properly escaped and lined up before using them in http request.
|
||||||
@ -295,15 +399,15 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
|
|||||||
resp, err := c.executeMethod("PUT", reqMetadata)
|
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objectPart{}, err
|
return ObjectPart{}, err
|
||||||
}
|
}
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return objectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Once successfully uploaded, return completed part.
|
// Once successfully uploaded, return completed part.
|
||||||
objPart := objectPart{}
|
objPart := ObjectPart{}
|
||||||
objPart.Size = size
|
objPart.Size = size
|
||||||
objPart.PartNumber = partNumber
|
objPart.PartNumber = partNumber
|
||||||
// Trim off the odd double quotes from ETag in the beginning and end.
|
// Trim off the odd double quotes from ETag in the beginning and end.
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/encrypt"
|
||||||
"github.com/minio/minio-go/pkg/s3utils"
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,6 +31,29 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
|
|||||||
return c.PutObjectWithMetadata(bucketName, objectName, reader, metaData, progress)
|
return c.PutObjectWithMetadata(bucketName, objectName, reader, metaData, progress)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PutEncryptedObject - Encrypt and store object.
|
||||||
|
func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
|
|
||||||
|
if encryptMaterials == nil {
|
||||||
|
return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := encryptMaterials.SetupEncryptMode(reader); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if metaData == nil {
|
||||||
|
metaData = make(map[string][]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the necessary encryption headers, for future decryption.
|
||||||
|
metaData[amzHeaderIV] = []string{encryptMaterials.GetIV()}
|
||||||
|
metaData[amzHeaderKey] = []string{encryptMaterials.GetKey()}
|
||||||
|
metaData[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()}
|
||||||
|
|
||||||
|
return c.PutObjectWithMetadata(bucketName, objectName, encryptMaterials, metaData, progress)
|
||||||
|
}
|
||||||
|
|
||||||
// PutObjectWithMetadata - with metadata.
|
// PutObjectWithMetadata - with metadata.
|
||||||
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
@ -75,24 +99,6 @@ func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.R
|
|||||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
|
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: S3 doesn't allow anonymous multipart requests.
|
|
||||||
if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous {
|
|
||||||
if size <= -1 {
|
|
||||||
return 0, ErrorResponse{
|
|
||||||
Code: "NotImplemented",
|
|
||||||
Message: "Content-Length cannot be negative for anonymous requests.",
|
|
||||||
Key: objectName,
|
|
||||||
BucketName: bucketName,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if size > maxSinglePutObjectSize {
|
|
||||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
|
||||||
}
|
|
||||||
// Do not compute MD5 for anonymous requests to Amazon
|
|
||||||
// S3. Uploads up to 5GiB in size.
|
|
||||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
|
|
||||||
}
|
|
||||||
|
|
||||||
// putSmall object.
|
// putSmall object.
|
||||||
if size < minPartSize && size >= 0 {
|
if size < minPartSize && size >= 0 {
|
||||||
return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
|
return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
|
||||||
@ -115,3 +121,81 @@ func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.R
|
|||||||
}
|
}
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PutObjectStreaming using AWS streaming signature V4
|
||||||
|
func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) {
|
||||||
|
return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutObjectStreamingWithMetadata using AWS streaming signature V4
|
||||||
|
func (c Client) PutObjectStreamingWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string) (n int64, err error) {
|
||||||
|
return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, metadata, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutObjectStreamingWithProgress using AWS streaming signature V4
|
||||||
|
func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
|
// NOTE: Streaming signature is not supported by GCS.
|
||||||
|
if s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||||
|
return 0, ErrorResponse{
|
||||||
|
Code: "NotImplemented",
|
||||||
|
Message: "AWS streaming signature v4 is not supported with Google Cloud Storage",
|
||||||
|
Key: objectName,
|
||||||
|
BucketName: bucketName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// This method should return error with signature v2 minioClient.
|
||||||
|
if c.signature.isV2() {
|
||||||
|
return 0, ErrorResponse{
|
||||||
|
Code: "NotImplemented",
|
||||||
|
Message: "AWS streaming signature v4 is not supported with minio client initialized for AWS signature v2",
|
||||||
|
Key: objectName,
|
||||||
|
BucketName: bucketName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size of the object.
|
||||||
|
var size int64
|
||||||
|
|
||||||
|
// Get reader size.
|
||||||
|
size, err = getReaderSize(reader)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for largest object size allowed.
|
||||||
|
if size > int64(maxMultipartPutObjectSize) {
|
||||||
|
return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If size cannot be found on a stream, it is not possible
|
||||||
|
// to upload using streaming signature, fall back to multipart.
|
||||||
|
if size < 0 {
|
||||||
|
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set signature type to streaming signature v4.
|
||||||
|
c.signature = SignatureV4Streaming
|
||||||
|
|
||||||
|
if size < minPartSize && size >= 0 {
|
||||||
|
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||||
|
}
|
||||||
|
|
||||||
|
// For all sizes greater than 64MiB do multipart.
|
||||||
|
n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||||
|
if err != nil {
|
||||||
|
errResp := ToErrorResponse(err)
|
||||||
|
// Verify if multipart functionality is not available, if not
|
||||||
|
// fall back to single PutObject operation.
|
||||||
|
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||||
|
// Verify if size of reader is greater than '5GiB'.
|
||||||
|
if size > maxSinglePutObjectSize {
|
||||||
|
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||||
|
}
|
||||||
|
// Fall back to uploading as single PutObject operation.
|
||||||
|
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
@ -32,16 +32,16 @@ type uploadedPartRes struct {
|
|||||||
Error error // Any error encountered while uploading the part.
|
Error error // Any error encountered while uploading the part.
|
||||||
PartNum int // Number of the part uploaded.
|
PartNum int // Number of the part uploaded.
|
||||||
Size int64 // Size of the part uploaded.
|
Size int64 // Size of the part uploaded.
|
||||||
Part *objectPart
|
Part *ObjectPart
|
||||||
}
|
}
|
||||||
|
|
||||||
type uploadPartReq struct {
|
type uploadPartReq struct {
|
||||||
PartNum int // Number of the part uploaded.
|
PartNum int // Number of the part uploaded.
|
||||||
Part *objectPart // Size of the part uploaded.
|
Part *ObjectPart // Size of the part uploaded.
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldUploadPartReadAt - verify if part should be uploaded.
|
// shouldUploadPartReadAt - verify if part should be uploaded.
|
||||||
func shouldUploadPartReadAt(objPart objectPart, uploadReq uploadPartReq) bool {
|
func shouldUploadPartReadAt(objPart ObjectPart, uploadReq uploadPartReq) bool {
|
||||||
// If part not found part should be uploaded.
|
// If part not found part should be uploaded.
|
||||||
if uploadReq.Part == nil {
|
if uploadReq.Part == nil {
|
||||||
return true
|
return true
|
||||||
@ -115,7 +115,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
|||||||
close(uploadPartsCh)
|
close(uploadPartsCh)
|
||||||
|
|
||||||
// Receive each part number from the channel allowing three parallel uploads.
|
// Receive each part number from the channel allowing three parallel uploads.
|
||||||
for w := 1; w <= 3; w++ {
|
for w := 1; w <= totalWorkers; w++ {
|
||||||
go func() {
|
go func() {
|
||||||
// Read defaults to reading at 5MiB buffer.
|
// Read defaults to reading at 5MiB buffer.
|
||||||
readAtBuffer := make([]byte, optimalReadBufferSize)
|
readAtBuffer := make([]byte, optimalReadBufferSize)
|
||||||
@ -164,7 +164,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify object if its uploaded.
|
// Verify object if its uploaded.
|
||||||
verifyObjPart := objectPart{
|
verifyObjPart := ObjectPart{
|
||||||
PartNumber: uploadReq.PartNum,
|
PartNumber: uploadReq.PartNum,
|
||||||
Size: partSize,
|
Size: partSize,
|
||||||
}
|
}
|
||||||
@ -178,7 +178,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
|||||||
// to update any progress bar.
|
// to update any progress bar.
|
||||||
if shouldUploadPartReadAt(verifyObjPart, uploadReq) {
|
if shouldUploadPartReadAt(verifyObjPart, uploadReq) {
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
var objPart objectPart
|
var objPart ObjectPart
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
uploadedPartsCh <- uploadedPartRes{
|
uploadedPartsCh <- uploadedPartRes{
|
||||||
@ -224,7 +224,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Store the parts to be completed in order.
|
// Store the parts to be completed in order.
|
||||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
|
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||||
ETag: part.ETag,
|
ETag: part.ETag,
|
||||||
PartNumber: part.PartNumber,
|
PartNumber: part.PartNumber,
|
||||||
})
|
})
|
||||||
|
@ -125,7 +125,7 @@ func getReaderSize(reader io.Reader) (size int64, err error) {
|
|||||||
|
|
||||||
// completedParts is a collection of parts sortable by their part numbers.
|
// completedParts is a collection of parts sortable by their part numbers.
|
||||||
// used for sorting the uploaded parts before completing the multipart request.
|
// used for sorting the uploaded parts before completing the multipart request.
|
||||||
type completedParts []completePart
|
type completedParts []CompletePart
|
||||||
|
|
||||||
func (a completedParts) Len() int { return len(a) }
|
func (a completedParts) Len() int { return len(a) }
|
||||||
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
@ -143,7 +143,6 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
|
|||||||
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
|
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
|
||||||
// So we fall back to single PUT operation with the maximum limit of 5GiB.
|
// So we fall back to single PUT operation with the maximum limit of 5GiB.
|
||||||
//
|
//
|
||||||
// NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation.
|
|
||||||
func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
|
func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
|
||||||
return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil)
|
return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil)
|
||||||
}
|
}
|
||||||
@ -230,11 +229,9 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
|
|||||||
reader = tmpFile
|
reader = tmpFile
|
||||||
}
|
}
|
||||||
// Return error if its not io.EOF.
|
// Return error if its not io.EOF.
|
||||||
if err != nil {
|
if err != nil && err != io.EOF {
|
||||||
if err != io.EOF {
|
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Execute put object.
|
// Execute put object.
|
||||||
st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData)
|
st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -36,6 +36,7 @@ func (c Client) RemoveBucket(bucketName string) error {
|
|||||||
// Execute DELETE on bucket.
|
// Execute DELETE on bucket.
|
||||||
resp, err := c.executeMethod("DELETE", requestMetadata{
|
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
})
|
})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -66,6 +67,7 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
|
|||||||
resp, err := c.executeMethod("DELETE", requestMetadata{
|
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
})
|
})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -208,7 +210,6 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RemoveIncompleteUpload aborts an partially uploaded object.
|
// RemoveIncompleteUpload aborts an partially uploaded object.
|
||||||
// Requires explicit authentication, no anonymous requests are allowed for multipart API.
|
|
||||||
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
|
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
@ -252,6 +253,7 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er
|
|||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
})
|
})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -41,8 +41,8 @@ type commonPrefix struct {
|
|||||||
Prefix string
|
Prefix string
|
||||||
}
|
}
|
||||||
|
|
||||||
// listBucketResult container for listObjects V2 response.
|
// ListBucketV2Result container for listObjects response version 2.
|
||||||
type listBucketV2Result struct {
|
type ListBucketV2Result struct {
|
||||||
// A response can contain CommonPrefixes only if you have
|
// A response can contain CommonPrefixes only if you have
|
||||||
// specified a delimiter.
|
// specified a delimiter.
|
||||||
CommonPrefixes []commonPrefix
|
CommonPrefixes []commonPrefix
|
||||||
@ -70,8 +70,8 @@ type listBucketV2Result struct {
|
|||||||
StartAfter string
|
StartAfter string
|
||||||
}
|
}
|
||||||
|
|
||||||
// listBucketResult container for listObjects response.
|
// ListBucketResult container for listObjects response.
|
||||||
type listBucketResult struct {
|
type ListBucketResult struct {
|
||||||
// A response can contain CommonPrefixes only if you have
|
// A response can contain CommonPrefixes only if you have
|
||||||
// specified a delimiter.
|
// specified a delimiter.
|
||||||
CommonPrefixes []commonPrefix
|
CommonPrefixes []commonPrefix
|
||||||
@ -102,8 +102,8 @@ type listBucketResult struct {
|
|||||||
Prefix string
|
Prefix string
|
||||||
}
|
}
|
||||||
|
|
||||||
// listMultipartUploadsResult container for ListMultipartUploads response
|
// ListMultipartUploadsResult container for ListMultipartUploads response
|
||||||
type listMultipartUploadsResult struct {
|
type ListMultipartUploadsResult struct {
|
||||||
Bucket string
|
Bucket string
|
||||||
KeyMarker string
|
KeyMarker string
|
||||||
UploadIDMarker string `xml:"UploadIdMarker"`
|
UploadIDMarker string `xml:"UploadIdMarker"`
|
||||||
@ -131,8 +131,8 @@ type copyObjectResult struct {
|
|||||||
LastModified string // time string format "2006-01-02T15:04:05.000Z"
|
LastModified string // time string format "2006-01-02T15:04:05.000Z"
|
||||||
}
|
}
|
||||||
|
|
||||||
// objectPart container for particular part of an object.
|
// ObjectPart container for particular part of an object.
|
||||||
type objectPart struct {
|
type ObjectPart struct {
|
||||||
// Part number identifies the part.
|
// Part number identifies the part.
|
||||||
PartNumber int
|
PartNumber int
|
||||||
|
|
||||||
@ -147,8 +147,8 @@ type objectPart struct {
|
|||||||
Size int64
|
Size int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// listObjectPartsResult container for ListObjectParts response.
|
// ListObjectPartsResult container for ListObjectParts response.
|
||||||
type listObjectPartsResult struct {
|
type ListObjectPartsResult struct {
|
||||||
Bucket string
|
Bucket string
|
||||||
Key string
|
Key string
|
||||||
UploadID string `xml:"UploadId"`
|
UploadID string `xml:"UploadId"`
|
||||||
@ -163,7 +163,7 @@ type listObjectPartsResult struct {
|
|||||||
|
|
||||||
// Indicates whether the returned list of parts is truncated.
|
// Indicates whether the returned list of parts is truncated.
|
||||||
IsTruncated bool
|
IsTruncated bool
|
||||||
ObjectParts []objectPart `xml:"Part"`
|
ObjectParts []ObjectPart `xml:"Part"`
|
||||||
|
|
||||||
EncodingType string
|
EncodingType string
|
||||||
}
|
}
|
||||||
@ -185,9 +185,9 @@ type completeMultipartUploadResult struct {
|
|||||||
ETag string
|
ETag string
|
||||||
}
|
}
|
||||||
|
|
||||||
// completePart sub container lists individual part numbers and their
|
// CompletePart sub container lists individual part numbers and their
|
||||||
// md5sum, part of completeMultipartUpload.
|
// md5sum, part of completeMultipartUpload.
|
||||||
type completePart struct {
|
type CompletePart struct {
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
|
||||||
|
|
||||||
// Part number identifies the part.
|
// Part number identifies the part.
|
||||||
@ -198,7 +198,7 @@ type completePart struct {
|
|||||||
// completeMultipartUpload container for completing multipart upload.
|
// completeMultipartUpload container for completing multipart upload.
|
||||||
type completeMultipartUpload struct {
|
type completeMultipartUpload struct {
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
|
||||||
Parts []completePart `xml:"Part"`
|
Parts []CompletePart `xml:"Part"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// createBucketConfiguration container for bucket configuration.
|
// createBucketConfiguration container for bucket configuration.
|
||||||
|
23
vendor/src/github.com/minio/minio-go/api-stat.go
vendored
23
vendor/src/github.com/minio/minio-go/api-stat.go
vendored
@ -35,6 +35,7 @@ func (c Client) BucketExists(bucketName string) (bool, error) {
|
|||||||
// Execute HEAD on bucketName.
|
// Execute HEAD on bucketName.
|
||||||
resp, err := c.executeMethod("HEAD", requestMetadata{
|
resp, err := c.executeMethod("HEAD", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
})
|
})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -85,11 +86,31 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
|||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := isValidObjectName(objectName); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
|
reqHeaders := NewHeadReqHeaders()
|
||||||
|
return c.statObject(bucketName, objectName, reqHeaders)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lower level API for statObject supporting pre-conditions and range headers.
|
||||||
|
func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) {
|
||||||
|
// Input validation.
|
||||||
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
|
return ObjectInfo{}, err
|
||||||
|
}
|
||||||
|
if err := isValidObjectName(objectName); err != nil {
|
||||||
|
return ObjectInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
customHeader := make(http.Header)
|
||||||
|
for k, v := range reqHeaders.Header {
|
||||||
|
customHeader[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
// Execute HEAD on objectName.
|
// Execute HEAD on objectName.
|
||||||
resp, err := c.executeMethod("HEAD", requestMetadata{
|
resp, err := c.executeMethod("HEAD", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
|
contentSHA256Bytes: emptySHA256,
|
||||||
|
customHeader: customHeader,
|
||||||
})
|
})
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -122,6 +143,7 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse Last-Modified has http time format.
|
// Parse Last-Modified has http time format.
|
||||||
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
|
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -135,6 +157,7 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
|||||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch content type if any present.
|
// Fetch content type if any present.
|
||||||
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
|
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
|
||||||
if contentType == "" {
|
if contentType == "" {
|
||||||
|
124
vendor/src/github.com/minio/minio-go/api.go
vendored
124
vendor/src/github.com/minio/minio-go/api.go
vendored
@ -20,6 +20,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -74,6 +75,9 @@ type Client struct {
|
|||||||
// S3 specific accelerated endpoint.
|
// S3 specific accelerated endpoint.
|
||||||
s3AccelerateEndpoint string
|
s3AccelerateEndpoint string
|
||||||
|
|
||||||
|
// Region endpoint
|
||||||
|
region string
|
||||||
|
|
||||||
// Random seed.
|
// Random seed.
|
||||||
random *rand.Rand
|
random *rand.Rand
|
||||||
}
|
}
|
||||||
@ -81,7 +85,7 @@ type Client struct {
|
|||||||
// Global constants.
|
// Global constants.
|
||||||
const (
|
const (
|
||||||
libraryName = "minio-go"
|
libraryName = "minio-go"
|
||||||
libraryVersion = "2.0.4"
|
libraryVersion = "2.1.0"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User Agent should always following the below style.
|
// User Agent should always following the below style.
|
||||||
@ -100,6 +104,7 @@ func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set to use signature version '2'.
|
// Set to use signature version '2'.
|
||||||
clnt.signature = SignatureV2
|
clnt.signature = SignatureV2
|
||||||
return clnt, nil
|
return clnt, nil
|
||||||
@ -112,26 +117,40 @@ func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set to use signature version '4'.
|
// Set to use signature version '4'.
|
||||||
clnt.signature = SignatureV4
|
clnt.signature = SignatureV4
|
||||||
return clnt, nil
|
return clnt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// New - instantiate minio client Client, adds automatic verification
|
// New - instantiate minio client Client, adds automatic verification of signature.
|
||||||
// of signature.
|
func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
|
||||||
func New(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
|
return NewWithRegion(endpoint, accessKeyID, secretAccessKey, secure, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWithRegion - instantiate minio client, with region configured. Unlike New(),
|
||||||
|
// NewWithRegion avoids bucket-location lookup operations and it is slightly faster.
|
||||||
|
// Use this function when if your application deals with single region.
|
||||||
|
func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) {
|
||||||
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
|
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Google cloud storage should be set to signature V2, force it if not.
|
// Google cloud storage should be set to signature V2, force it if not.
|
||||||
if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
|
if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
|
||||||
clnt.signature = SignatureV2
|
clnt.signature = SignatureV2
|
||||||
}
|
}
|
||||||
|
|
||||||
// If Amazon S3 set to signature v2.n
|
// If Amazon S3 set to signature v2.n
|
||||||
if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
|
if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
|
||||||
clnt.signature = SignatureV4
|
clnt.signature = SignatureV4
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sets custom region, if region is empty bucket location cache is used automatically.
|
||||||
|
clnt.region = region
|
||||||
|
|
||||||
|
// Success..
|
||||||
return clnt, nil
|
return clnt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,8 +160,7 @@ type lockedRandSource struct {
|
|||||||
src rand.Source
|
src rand.Source
|
||||||
}
|
}
|
||||||
|
|
||||||
// Int63 returns a non-negative pseudo-random 63-bit integer as an
|
// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
|
||||||
// int64.
|
|
||||||
func (r *lockedRandSource) Int63() (n int64) {
|
func (r *lockedRandSource) Int63() (n int64) {
|
||||||
r.lk.Lock()
|
r.lk.Lock()
|
||||||
n = r.src.Int63()
|
n = r.src.Int63()
|
||||||
@ -181,9 +199,6 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl
|
|||||||
clnt := new(Client)
|
clnt := new(Client)
|
||||||
clnt.accessKeyID = accessKeyID
|
clnt.accessKeyID = accessKeyID
|
||||||
clnt.secretAccessKey = secretAccessKey
|
clnt.secretAccessKey = secretAccessKey
|
||||||
if clnt.accessKeyID == "" || clnt.secretAccessKey == "" {
|
|
||||||
clnt.anonymous = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remember whether we are using https or not
|
// Remember whether we are using https or not
|
||||||
clnt.secure = secure
|
clnt.secure = secure
|
||||||
@ -299,8 +314,7 @@ var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
|
|||||||
|
|
||||||
// Filter out signature value from Authorization header.
|
// Filter out signature value from Authorization header.
|
||||||
func (c Client) filterSignature(req *http.Request) {
|
func (c Client) filterSignature(req *http.Request) {
|
||||||
// For anonymous requests, no need to filter.
|
if _, ok := req.Header["Authorization"]; !ok {
|
||||||
if c.anonymous {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Handle if Signature V2.
|
// Handle if Signature V2.
|
||||||
@ -411,7 +425,7 @@ func (c Client) do(req *http.Request) (*http.Response, error) {
|
|||||||
return nil, &url.Error{
|
return nil, &url.Error{
|
||||||
Op: urlErr.Op,
|
Op: urlErr.Op,
|
||||||
URL: urlErr.URL,
|
URL: urlErr.URL,
|
||||||
Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL),
|
Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -460,9 +474,13 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
|
|||||||
if metadata.contentBody != nil {
|
if metadata.contentBody != nil {
|
||||||
// Check if body is seekable then it is retryable.
|
// Check if body is seekable then it is retryable.
|
||||||
bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
|
bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
|
||||||
|
switch bodySeeker {
|
||||||
|
case os.Stdin, os.Stdout, os.Stderr:
|
||||||
|
isRetryable = false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a done channel to control 'ListObjects' go routine.
|
// Create a done channel to control 'newRetryTimer' go routine.
|
||||||
doneCh := make(chan struct{}, 1)
|
doneCh := make(chan struct{}, 1)
|
||||||
|
|
||||||
// Indicate to our routine to exit cleanly upon return.
|
// Indicate to our routine to exit cleanly upon return.
|
||||||
@ -471,7 +489,7 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
|
|||||||
// Blank indentifier is kept here on purpose since 'range' without
|
// Blank indentifier is kept here on purpose since 'range' without
|
||||||
// blank identifiers is only supported since go1.4
|
// blank identifiers is only supported since go1.4
|
||||||
// https://golang.org/doc/go1.4#forrange.
|
// https://golang.org/doc/go1.4#forrange.
|
||||||
for _ = range c.newRetryTimer(MaxRetry, time.Second, time.Second*30, MaxJitter, doneCh) {
|
for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
|
||||||
// Retry executes the following function body if request has an
|
// Retry executes the following function body if request has an
|
||||||
// error until maxRetries have been exhausted, retry attempts are
|
// error until maxRetries have been exhausted, retry attempts are
|
||||||
// performed after waiting for a given period of time in a
|
// performed after waiting for a given period of time in a
|
||||||
@ -520,15 +538,22 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the body.
|
// Save the body.
|
||||||
errBodySeeker := bytes.NewReader(errBodyBytes)
|
errBodySeeker := bytes.NewReader(errBodyBytes)
|
||||||
res.Body = ioutil.NopCloser(errBodySeeker)
|
res.Body = ioutil.NopCloser(errBodySeeker)
|
||||||
|
|
||||||
// For errors verify if its retryable otherwise fail quickly.
|
// For errors verify if its retryable otherwise fail quickly.
|
||||||
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
|
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
|
||||||
// Bucket region if set in error response, we can retry the
|
|
||||||
// request with the new region.
|
// Save the body back again.
|
||||||
if errResponse.Region != "" {
|
errBodySeeker.Seek(0, 0) // Seek back to starting point.
|
||||||
|
res.Body = ioutil.NopCloser(errBodySeeker)
|
||||||
|
|
||||||
|
// Bucket region if set in error response and the error
|
||||||
|
// code dictates invalid region, we can retry the request
|
||||||
|
// with the new region.
|
||||||
|
if errResponse.Code == "InvalidRegion" && errResponse.Region != "" {
|
||||||
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
|
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
|
||||||
continue // Retry.
|
continue // Retry.
|
||||||
}
|
}
|
||||||
@ -543,10 +568,6 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
|
|||||||
continue // Retry.
|
continue // Retry.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the body back again.
|
|
||||||
errBodySeeker.Seek(0, 0) // Seek back to starting point.
|
|
||||||
res.Body = ioutil.NopCloser(errBodySeeker)
|
|
||||||
|
|
||||||
// For all other cases break out of the retry loop.
|
// For all other cases break out of the retry loop.
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -588,39 +609,29 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Initialize a new HTTP request for the method.
|
// Initialize a new HTTP request for the method.
|
||||||
req, err = http.NewRequest(method, targetURL.String(), nil)
|
req, err = http.NewRequest(method, targetURL.String(), metadata.contentBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Anonymous request.
|
||||||
|
anonymous := c.accessKeyID == "" || c.secretAccessKey == ""
|
||||||
|
|
||||||
// Generate presign url if needed, return right here.
|
// Generate presign url if needed, return right here.
|
||||||
if metadata.expires != 0 && metadata.presignURL {
|
if metadata.expires != 0 && metadata.presignURL {
|
||||||
if c.anonymous {
|
if anonymous {
|
||||||
return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.")
|
return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.")
|
||||||
}
|
}
|
||||||
if c.signature.isV2() {
|
if c.signature.isV2() {
|
||||||
// Presign URL with signature v2.
|
// Presign URL with signature v2.
|
||||||
req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
|
req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
|
||||||
} else {
|
} else if c.signature.isV4() {
|
||||||
// Presign URL with signature v4.
|
// Presign URL with signature v4.
|
||||||
req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
|
req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
|
||||||
}
|
}
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set content body if available.
|
|
||||||
if metadata.contentBody != nil {
|
|
||||||
req.Body = ioutil.NopCloser(metadata.contentBody)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME: Enable this when Google Cloud Storage properly supports 100-continue.
|
|
||||||
// Skip setting 'expect' header for Google Cloud Storage, there
|
|
||||||
// are some known issues - https://github.com/restic/restic/issues/520
|
|
||||||
if !s3utils.IsGoogleEndpoint(c.endpointURL) && c.s3AccelerateEndpoint == "" {
|
|
||||||
// Set 'Expect' header for the request.
|
|
||||||
req.Header.Set("Expect", "100-continue")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set 'User-Agent' header for the request.
|
// Set 'User-Agent' header for the request.
|
||||||
c.setUserAgent(req)
|
c.setUserAgent(req)
|
||||||
|
|
||||||
@ -634,38 +645,33 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
|||||||
req.ContentLength = metadata.contentLength
|
req.ContentLength = metadata.contentLength
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set sha256 sum only for non anonymous credentials.
|
|
||||||
if !c.anonymous {
|
|
||||||
// set sha256 sum for signature calculation only with
|
|
||||||
// signature version '4'.
|
|
||||||
if c.signature.isV4() {
|
|
||||||
shaHeader := unsignedPayload
|
|
||||||
if !c.secure {
|
|
||||||
if metadata.contentSHA256Bytes == nil {
|
|
||||||
shaHeader = hex.EncodeToString(sum256([]byte{}))
|
|
||||||
} else {
|
|
||||||
shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// set md5Sum for content protection.
|
// set md5Sum for content protection.
|
||||||
if metadata.contentMD5Bytes != nil {
|
if metadata.contentMD5Bytes != nil {
|
||||||
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
|
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sign the request for all authenticated requests.
|
if anonymous {
|
||||||
if !c.anonymous {
|
return req, nil
|
||||||
if c.signature.isV2() {
|
} // Sign the request for all authenticated requests.
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case c.signature.isV2():
|
||||||
// Add signature version '2' authorization header.
|
// Add signature version '2' authorization header.
|
||||||
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
|
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
|
||||||
} else if c.signature.isV4() {
|
case c.signature.isStreamingV4() && method == "PUT":
|
||||||
|
req = s3signer.StreamingSignV4(req, c.accessKeyID,
|
||||||
|
c.secretAccessKey, location, metadata.contentLength, time.Now().UTC())
|
||||||
|
default:
|
||||||
|
// Set sha256 sum for signature calculation only with signature version '4'.
|
||||||
|
shaHeader := unsignedPayload
|
||||||
|
if len(metadata.contentSHA256Bytes) > 0 {
|
||||||
|
shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes)
|
||||||
|
}
|
||||||
|
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
|
||||||
|
|
||||||
// Add signature version '4' authorization header.
|
// Add signature version '4' authorization header.
|
||||||
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location)
|
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Return request.
|
// Return request.
|
||||||
return req, nil
|
return req, nil
|
||||||
|
@ -36,6 +36,9 @@ func TestMakeBucketErrorV2(t *testing.T) {
|
|||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping functional tests for short runs")
|
t.Skip("skipping functional tests for short runs")
|
||||||
}
|
}
|
||||||
|
if os.Getenv("S3_ADDRESS") != "s3.amazonaws.com" {
|
||||||
|
t.Skip("skipping region functional tests for non s3 runs")
|
||||||
|
}
|
||||||
|
|
||||||
// Seed random based on current time.
|
// Seed random based on current time.
|
||||||
rand.Seed(time.Now().Unix())
|
rand.Seed(time.Now().Unix())
|
||||||
@ -198,14 +201,14 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
i := 0
|
i := 0
|
||||||
for i < 25 {
|
for i < 25 {
|
||||||
_, err = io.CopyN(writer, r, 128*1024)
|
_, cerr := io.CopyN(writer, r, 128*1024)
|
||||||
if err != nil {
|
if cerr != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", cerr, bucketName)
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
r.Seek(0, 0)
|
r.Seek(0, 0)
|
||||||
}
|
}
|
||||||
writer.CloseWithError(errors.New("Proactively closed to be verified later."))
|
writer.CloseWithError(errors.New("proactively closed to be verified later"))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
objectName := bucketName + "-resumable"
|
objectName := bucketName + "-resumable"
|
||||||
@ -213,7 +216,7 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Error: PutObject should fail.")
|
t.Fatal("Error: PutObject should fail.")
|
||||||
}
|
}
|
||||||
if err.Error() != "Proactively closed to be verified later." {
|
if err.Error() != "proactively closed to be verified later" {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
err = c.RemoveIncompleteUpload(bucketName, objectName)
|
err = c.RemoveIncompleteUpload(bucketName, objectName)
|
||||||
@ -571,6 +574,9 @@ func TestMakeBucketRegionsV2(t *testing.T) {
|
|||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping functional tests for short runs")
|
t.Skip("skipping functional tests for short runs")
|
||||||
}
|
}
|
||||||
|
if os.Getenv("S3_ADDRESS") != "s3.amazonaws.com" {
|
||||||
|
t.Skip("skipping region functional tests for non s3 runs")
|
||||||
|
}
|
||||||
|
|
||||||
// Seed random based on current time.
|
// Seed random based on current time.
|
||||||
rand.Seed(time.Now().Unix())
|
rand.Seed(time.Now().Unix())
|
||||||
|
@ -19,6 +19,7 @@ package minio
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
crand "crypto/rand"
|
crand "crypto/rand"
|
||||||
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -28,9 +29,11 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/encrypt"
|
||||||
"github.com/minio/minio-go/pkg/policy"
|
"github.com/minio/minio-go/pkg/policy"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -64,6 +67,9 @@ func TestMakeBucketError(t *testing.T) {
|
|||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping functional tests for short runs")
|
t.Skip("skipping functional tests for short runs")
|
||||||
}
|
}
|
||||||
|
if os.Getenv("S3_ADDRESS") != "s3.amazonaws.com" {
|
||||||
|
t.Skip("skipping region functional tests for non s3 runs")
|
||||||
|
}
|
||||||
|
|
||||||
// Seed random based on current time.
|
// Seed random based on current time.
|
||||||
rand.Seed(time.Now().Unix())
|
rand.Seed(time.Now().Unix())
|
||||||
@ -110,6 +116,9 @@ func TestMakeBucketRegions(t *testing.T) {
|
|||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping functional tests for short runs")
|
t.Skip("skipping functional tests for short runs")
|
||||||
}
|
}
|
||||||
|
if os.Getenv("S3_ADDRESS") != "s3.amazonaws.com" {
|
||||||
|
t.Skip("skipping region functional tests for non s3 runs")
|
||||||
|
}
|
||||||
|
|
||||||
// Seed random based on current time.
|
// Seed random based on current time.
|
||||||
rand.Seed(time.Now().Unix())
|
rand.Seed(time.Now().Unix())
|
||||||
@ -302,7 +311,7 @@ func TestPutObjectWithMetadata(t *testing.T) {
|
|||||||
// Object custom metadata
|
// Object custom metadata
|
||||||
customContentType := "custom/contenttype"
|
customContentType := "custom/contenttype"
|
||||||
|
|
||||||
n, err := c.PutObjectWithMetadata(bucketName, objectName, bytes.NewReader(buf), map[string][]string{"Content-Type": []string{customContentType}}, nil)
|
n, err := c.PutObjectWithMetadata(bucketName, objectName, bytes.NewReader(buf), map[string][]string{"Content-Type": {customContentType}}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName, objectName)
|
t.Fatal("Error:", err, bucketName, objectName)
|
||||||
}
|
}
|
||||||
@ -346,6 +355,70 @@ func TestPutObjectWithMetadata(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test put object with streaming signature.
|
||||||
|
func TestPutObjectStreaming(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping function tests for short runs")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed random based on current time.
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
// Instantiate new minio client object.
|
||||||
|
c, err := NewV4(
|
||||||
|
os.Getenv("S3_ADDRESS"),
|
||||||
|
os.Getenv("ACCESS_KEY"),
|
||||||
|
os.Getenv("SECRET_KEY"),
|
||||||
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable tracing, write to stderr.
|
||||||
|
// c.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Set user agent.
|
||||||
|
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
||||||
|
|
||||||
|
// Generate a new random bucket name.
|
||||||
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
|
||||||
|
"minio-go-test")
|
||||||
|
|
||||||
|
// Make a new bucket.
|
||||||
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload an object.
|
||||||
|
sizes := []int64{0, 64*1024 - 1, 64 * 1024}
|
||||||
|
objectName := "test-object"
|
||||||
|
for i, size := range sizes {
|
||||||
|
data := bytes.Repeat([]byte("a"), int(size))
|
||||||
|
n, err := c.PutObjectStreaming(bucketName, objectName, bytes.NewReader(data))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d Error: %v %s %s", i+1, err, bucketName, objectName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != size {
|
||||||
|
t.Errorf("Test %d Expected upload object size %d but got %d", i+1, size, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the object.
|
||||||
|
err = c.RemoveObject(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the bucket.
|
||||||
|
err = c.RemoveBucket(bucketName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Test listing partially uploaded objects.
|
// Test listing partially uploaded objects.
|
||||||
func TestListPartiallyUploaded(t *testing.T) {
|
func TestListPartiallyUploaded(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
@ -387,17 +460,14 @@ func TestListPartiallyUploaded(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
i := 0
|
i := 0
|
||||||
for i < 25 {
|
for i < 25 {
|
||||||
_, err = io.CopyN(writer, r, (minPartSize*2)/25)
|
_, cerr := io.CopyN(writer, r, (minPartSize*2)/25)
|
||||||
if err != nil {
|
if cerr != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", cerr, bucketName)
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
r.Seek(0, 0)
|
r.Seek(0, 0)
|
||||||
}
|
}
|
||||||
err := writer.CloseWithError(errors.New("Proactively closed to be verified later."))
|
writer.CloseWithError(errors.New("proactively closed to be verified later"))
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
objectName := bucketName + "-resumable"
|
objectName := bucketName + "-resumable"
|
||||||
@ -405,7 +475,7 @@ func TestListPartiallyUploaded(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Error: PutObject should fail.")
|
t.Fatal("Error: PutObject should fail.")
|
||||||
}
|
}
|
||||||
if err.Error() != "Proactively closed to be verified later." {
|
if err.Error() != "proactively closed to be verified later" {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -650,7 +720,8 @@ func TestRemoveMultipleObjects(t *testing.T) {
|
|||||||
objectName := "sample" + strconv.Itoa(i) + ".txt"
|
objectName := "sample" + strconv.Itoa(i) + ".txt"
|
||||||
_, err = c.PutObject(bucketName, objectName, r, "application/octet-stream")
|
_, err = c.PutObject(bucketName, objectName, r, "application/octet-stream")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: PutObject shouldn't fail.")
|
t.Error("Error: PutObject shouldn't fail.", err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
objectsCh <- objectName
|
objectsCh <- objectName
|
||||||
}
|
}
|
||||||
@ -715,17 +786,14 @@ func TestRemovePartiallyUploaded(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
i := 0
|
i := 0
|
||||||
for i < 25 {
|
for i < 25 {
|
||||||
_, err = io.CopyN(writer, r, 128*1024)
|
_, cerr := io.CopyN(writer, r, 128*1024)
|
||||||
if err != nil {
|
if cerr != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", cerr, bucketName)
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
r.Seek(0, 0)
|
r.Seek(0, 0)
|
||||||
}
|
}
|
||||||
err := writer.CloseWithError(errors.New("Proactively closed to be verified later."))
|
writer.CloseWithError(errors.New("proactively closed to be verified later"))
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
objectName := bucketName + "-resumable"
|
objectName := bucketName + "-resumable"
|
||||||
@ -733,7 +801,7 @@ func TestRemovePartiallyUploaded(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Error: PutObject should fail.")
|
t.Fatal("Error: PutObject should fail.")
|
||||||
}
|
}
|
||||||
if err.Error() != "Proactively closed to be verified later." {
|
if err.Error() != "proactively closed to be verified later" {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
err = c.RemoveIncompleteUpload(bucketName, objectName)
|
err = c.RemoveIncompleteUpload(bucketName, objectName)
|
||||||
@ -1784,10 +1852,178 @@ func TestCopyObject(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestEncryptionPutGet tests client side encryption
|
||||||
|
func TestEncryptionPutGet(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping functional tests for the short runs")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed random based on current time.
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
// Instantiate new minio client object.
|
||||||
|
c, err := New(
|
||||||
|
os.Getenv("S3_ADDRESS"),
|
||||||
|
os.Getenv("ACCESS_KEY"),
|
||||||
|
os.Getenv("SECRET_KEY"),
|
||||||
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable tracing, write to stderr.
|
||||||
|
// c.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Set user agent.
|
||||||
|
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
||||||
|
|
||||||
|
// Generate a new random bucket name.
|
||||||
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
|
||||||
|
|
||||||
|
// Make a new bucket.
|
||||||
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a symmetric key
|
||||||
|
symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
|
||||||
|
|
||||||
|
// Generate an assymmetric key from predefine public and private certificates
|
||||||
|
privateKey, err := hex.DecodeString(
|
||||||
|
"30820277020100300d06092a864886f70d0101010500048202613082025d" +
|
||||||
|
"0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" +
|
||||||
|
"bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" +
|
||||||
|
"5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" +
|
||||||
|
"cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" +
|
||||||
|
"15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" +
|
||||||
|
"c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" +
|
||||||
|
"57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" +
|
||||||
|
"5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" +
|
||||||
|
"bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" +
|
||||||
|
"41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" +
|
||||||
|
"0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" +
|
||||||
|
"d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" +
|
||||||
|
"f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" +
|
||||||
|
"27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" +
|
||||||
|
"6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" +
|
||||||
|
"d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" +
|
||||||
|
"bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" +
|
||||||
|
"bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" +
|
||||||
|
"0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" +
|
||||||
|
"47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" +
|
||||||
|
"9945cb5c7d")
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" +
|
||||||
|
"b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" +
|
||||||
|
"97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" +
|
||||||
|
"5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" +
|
||||||
|
"c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" +
|
||||||
|
"80a89e43f29b570203010001")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate an asymmetric key
|
||||||
|
asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Object custom metadata
|
||||||
|
customContentType := "custom/contenttype"
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
buf []byte
|
||||||
|
encKey encrypt.Key
|
||||||
|
}{
|
||||||
|
{encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)},
|
||||||
|
{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)},
|
||||||
|
{encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)},
|
||||||
|
{encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)},
|
||||||
|
{encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)},
|
||||||
|
{encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)},
|
||||||
|
{encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)},
|
||||||
|
{encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)},
|
||||||
|
{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)},
|
||||||
|
{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)},
|
||||||
|
{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
|
||||||
|
|
||||||
|
{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)},
|
||||||
|
{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)},
|
||||||
|
{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)},
|
||||||
|
{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)},
|
||||||
|
{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)},
|
||||||
|
{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
// Generate a random object name
|
||||||
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
|
|
||||||
|
// Secured object
|
||||||
|
cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put encrypted data
|
||||||
|
_, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials, map[string][]string{"Content-Type": {customContentType}}, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d, error: %v %v %v", i+1, err, bucketName, objectName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the data back
|
||||||
|
r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d, error: %v %v %v", i+1, err, bucketName, objectName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare the sent object with the received one
|
||||||
|
recvBuffer := bytes.NewBuffer([]byte{})
|
||||||
|
if _, err = io.Copy(recvBuffer, r); err != nil {
|
||||||
|
t.Fatalf("Test %d, error: %v", i+1, err)
|
||||||
|
}
|
||||||
|
if recvBuffer.Len() != len(testCase.buf) {
|
||||||
|
t.Fatalf("Test %d, error: number of bytes of received object does not match, want %v, got %v\n",
|
||||||
|
i+1, len(testCase.buf), recvBuffer.Len())
|
||||||
|
}
|
||||||
|
if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
|
||||||
|
t.Fatalf("Test %d, error: Encrypted sent is not equal to decrypted, want `%x`, go `%x`", i+1, testCase.buf, recvBuffer.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove test object
|
||||||
|
err = c.RemoveObject(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test %d, error: %v", i+1, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove test bucket
|
||||||
|
err = c.RemoveBucket(bucketName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func TestBucketNotification(t *testing.T) {
|
func TestBucketNotification(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping functional tests for the short runs")
|
t.Skip("skipping functional tests for the short runs")
|
||||||
}
|
}
|
||||||
|
if os.Getenv("NOTIFY_BUCKET") == "" ||
|
||||||
|
os.Getenv("NOTIFY_SERVICE") == "" ||
|
||||||
|
os.Getenv("NOTIFY_REGION") == "" ||
|
||||||
|
os.Getenv("NOTIFY_ACCOUNTID") == "" ||
|
||||||
|
os.Getenv("NOTIFY_RESOURCE") == "" {
|
||||||
|
t.Skip("skipping notification test if not configured")
|
||||||
|
}
|
||||||
|
|
||||||
// Seed random based on current time.
|
// Seed random based on current time.
|
||||||
rand.Seed(time.Now().Unix())
|
rand.Seed(time.Now().Unix())
|
||||||
@ -2187,3 +2423,78 @@ func TestFunctional(t *testing.T) {
|
|||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test for validating GetObject Reader* methods functioning when the
|
||||||
|
// object is modified in the object store.
|
||||||
|
func TestGetObjectObjectModified(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping functional tests for the short runs")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instantiate new minio client object.
|
||||||
|
c, err := NewV4(
|
||||||
|
os.Getenv("S3_ADDRESS"),
|
||||||
|
os.Getenv("ACCESS_KEY"),
|
||||||
|
os.Getenv("SECRET_KEY"),
|
||||||
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable tracing, write to stderr.
|
||||||
|
// c.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Set user agent.
|
||||||
|
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
||||||
|
|
||||||
|
// Make a new bucket.
|
||||||
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
|
||||||
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName)
|
||||||
|
}
|
||||||
|
defer c.RemoveBucket(bucketName)
|
||||||
|
|
||||||
|
// Upload an object.
|
||||||
|
objectName := "myobject"
|
||||||
|
content := "helloworld"
|
||||||
|
_, err = c.PutObject(bucketName, objectName, strings.NewReader(content), "application/text")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to upload %s/%s: %v", bucketName, objectName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer c.RemoveObject(bucketName, objectName)
|
||||||
|
|
||||||
|
reader, err := c.GetObject(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to get object %s/%s: %v", bucketName, objectName, err)
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
|
// Read a few bytes of the object.
|
||||||
|
b := make([]byte, 5)
|
||||||
|
n, err := reader.ReadAt(b, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read object %s/%s at an offset: %v", bucketName, objectName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload different contents to the same object while object is being read.
|
||||||
|
newContent := "goodbyeworld"
|
||||||
|
_, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), "application/text")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to upload %s/%s: %v", bucketName, objectName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Confirm that a Stat() call in between doesn't change the Object's cached etag.
|
||||||
|
_, err = reader.Stat()
|
||||||
|
if err.Error() != s3ErrorResponseMap["PreconditionFailed"] {
|
||||||
|
t.Errorf("Expected Stat to fail with error %s but received %s", s3ErrorResponseMap["PreconditionFailed"], err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read again only to find object contents have been modified since last read.
|
||||||
|
_, err = reader.ReadAt(b, int64(n))
|
||||||
|
if err.Error() != s3ErrorResponseMap["PreconditionFailed"] {
|
||||||
|
t.Errorf("Expected ReadAt to fail with error %s but received %s", s3ErrorResponseMap["PreconditionFailed"], err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -24,7 +24,7 @@ install:
|
|||||||
build_script:
|
build_script:
|
||||||
- go vet ./...
|
- go vet ./...
|
||||||
- gofmt -s -l .
|
- gofmt -s -l .
|
||||||
- golint github.com/minio/minio-go...
|
- golint -set_exit_status github.com/minio/minio-go...
|
||||||
- deadcode
|
- deadcode
|
||||||
- ineffassign .
|
- ineffassign .
|
||||||
- go test -short -v
|
- go test -short -v
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/minio/minio-go/pkg/s3signer"
|
"github.com/minio/minio-go/pkg/s3signer"
|
||||||
@ -84,9 +83,6 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
|
|||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
|
||||||
return location, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
|
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
|
||||||
// For china specifically we need to set everything to
|
// For china specifically we need to set everything to
|
||||||
@ -96,6 +92,15 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
|
|||||||
return "cn-north-1", nil
|
return "cn-north-1", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Region set then no need to fetch bucket location.
|
||||||
|
if c.region != "" {
|
||||||
|
return c.region, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
||||||
|
return location, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize a new request.
|
// Initialize a new request.
|
||||||
req, err := c.getBucketLocationRequest(bucketName)
|
req, err := c.getBucketLocationRequest(bucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -125,7 +130,7 @@ func processBucketLocationResponse(resp *http.Response, bucketName string) (buck
|
|||||||
// For access denied error, it could be an anonymous
|
// For access denied error, it could be an anonymous
|
||||||
// request. Move forward and let the top level callers
|
// request. Move forward and let the top level callers
|
||||||
// succeed if possible based on their policy.
|
// succeed if possible based on their policy.
|
||||||
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
if errResp.Code == "AccessDenied" {
|
||||||
return "us-east-1", nil
|
return "us-east-1", nil
|
||||||
}
|
}
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -28,10 +28,13 @@ type NotificationEventType string
|
|||||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
|
||||||
const (
|
const (
|
||||||
ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*"
|
ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*"
|
||||||
ObjectCreatePut = "s3:ObjectCreated:Put"
|
ObjectCreatedPut = "s3:ObjectCreated:Put"
|
||||||
ObjectCreatedPost = "s3:ObjectCreated:Post"
|
ObjectCreatedPost = "s3:ObjectCreated:Post"
|
||||||
ObjectCreatedCopy = "s3:ObjectCreated:Copy"
|
ObjectCreatedCopy = "s3:ObjectCreated:Copy"
|
||||||
ObjectCreatedCompleteMultipartUpload = "sh:ObjectCreated:CompleteMultipartUpload"
|
ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
|
||||||
|
ObjectAccessedGet = "s3:ObjectAccessed:Get"
|
||||||
|
ObjectAccessedHead = "s3:ObjectAccessed:Head"
|
||||||
|
ObjectAccessedAll = "s3:ObjectAccessed:*"
|
||||||
ObjectRemovedAll = "s3:ObjectRemoved:*"
|
ObjectRemovedAll = "s3:ObjectRemoved:*"
|
||||||
ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
|
ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
|
||||||
ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
|
ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
|
||||||
|
@ -45,8 +45,18 @@ const optimalReadBufferSize = 1024 * 1024 * 5
|
|||||||
// we don't want to sign the request payload
|
// we don't want to sign the request payload
|
||||||
const unsignedPayload = "UNSIGNED-PAYLOAD"
|
const unsignedPayload = "UNSIGNED-PAYLOAD"
|
||||||
|
|
||||||
|
// Total number of parallel workers used for multipart operation.
|
||||||
|
var totalWorkers = 3
|
||||||
|
|
||||||
// Signature related constants.
|
// Signature related constants.
|
||||||
const (
|
const (
|
||||||
signV4Algorithm = "AWS4-HMAC-SHA256"
|
signV4Algorithm = "AWS4-HMAC-SHA256"
|
||||||
iso8601DateFormat = "20060102T150405Z"
|
iso8601DateFormat = "20060102T150405Z"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Encryption headers stored along with the object.
|
||||||
|
const (
|
||||||
|
amzHeaderIV = "X-Amz-Meta-X-Amz-Iv"
|
||||||
|
amzHeaderKey = "X-Amz-Meta-X-Amz-Key"
|
||||||
|
amzHeaderMatDesc = "X-Amz-Meta-X-Amz-Matdesc"
|
||||||
|
)
|
||||||
|
113
vendor/src/github.com/minio/minio-go/core.go
vendored
Normal file
113
vendor/src/github.com/minio/minio-go/core.go
vendored
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/policy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
|
||||||
|
type Core struct {
|
||||||
|
*Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCore - Returns new initialized a Core client, this CoreClient should be
|
||||||
|
// only used under special conditions such as need to access lower primitives
|
||||||
|
// and being able to use them to write your own wrappers.
|
||||||
|
func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) {
|
||||||
|
var s3Client Core
|
||||||
|
client, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s3Client.Client = client
|
||||||
|
return &s3Client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListObjects - List all the objects at a prefix, optionally with marker and delimiter
|
||||||
|
// you can further filter the results.
|
||||||
|
func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {
|
||||||
|
return c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses
|
||||||
|
// continuationToken instead of marker to further filter the results.
|
||||||
|
func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
|
||||||
|
return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutObject - Upload object. Uploads using single PUT call.
|
||||||
|
func (c Core) PutObject(bucket, object string, size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectInfo, error) {
|
||||||
|
return c.putObjectDo(bucket, object, data, md5Sum, sha256Sum, size, metadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMultipartUpload - Initiates new multipart upload and returns the new uploaID.
|
||||||
|
func (c Core) NewMultipartUpload(bucket, object string, metadata map[string][]string) (uploadID string, err error) {
|
||||||
|
result, err := c.initiateMultipartUpload(bucket, object, metadata)
|
||||||
|
return result.UploadID, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListMultipartUploads - List incomplete uploads.
|
||||||
|
func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) {
|
||||||
|
return c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutObjectPart - Upload an object part.
|
||||||
|
func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) {
|
||||||
|
return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListObjectParts - List uploaded parts of an incomplete upload.x
|
||||||
|
func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) {
|
||||||
|
return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
|
||||||
|
func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error {
|
||||||
|
_, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{Parts: parts})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AbortMultipartUpload - Abort an incomplete upload.
|
||||||
|
func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error {
|
||||||
|
return c.abortMultipartUpload(bucket, object, uploadID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBucketPolicy - fetches bucket access policy for a given bucket.
|
||||||
|
func (c Core) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
|
||||||
|
return c.getBucketPolicy(bucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBucketPolicy - applies a new bucket access policy for a given bucket.
|
||||||
|
func (c Core) PutBucketPolicy(bucket string, bucketPolicy policy.BucketAccessPolicy) error {
|
||||||
|
return c.putBucketPolicy(bucket, bucketPolicy)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetObject is a lower level API implemented to support reading
|
||||||
|
// partial objects and also downloading objects with special conditions
|
||||||
|
// matching etag, modtime etc.
|
||||||
|
func (c Core) GetObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) {
|
||||||
|
return c.getObject(bucketName, objectName, reqHeaders)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatObject is a lower level API implemented to support special
|
||||||
|
// conditions matching etag, modtime on a request.
|
||||||
|
func (c Core) StatObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) {
|
||||||
|
return c.statObject(bucketName, objectName, reqHeaders)
|
||||||
|
}
|
377
vendor/src/github.com/minio/minio-go/core_test.go
vendored
Normal file
377
vendor/src/github.com/minio/minio-go/core_test.go
vendored
Normal file
@ -0,0 +1,377 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/md5"
|
||||||
|
crand "crypto/rand"
|
||||||
|
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests for Core GetObject() function.
|
||||||
|
func TestGetObjectCore(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping functional tests for the short runs")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed random based on current time.
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
// Instantiate new minio core client object.
|
||||||
|
c, err := NewCore(
|
||||||
|
os.Getenv("S3_ADDRESS"),
|
||||||
|
os.Getenv("ACCESS_KEY"),
|
||||||
|
os.Getenv("SECRET_KEY"),
|
||||||
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable tracing, write to stderr.
|
||||||
|
// c.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Set user agent.
|
||||||
|
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
||||||
|
|
||||||
|
// Generate a new random bucket name.
|
||||||
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
|
||||||
|
|
||||||
|
// Make a new bucket.
|
||||||
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate data more than 32K
|
||||||
|
buf := bytes.Repeat([]byte("3"), rand.Intn(1<<20)+32*1024)
|
||||||
|
|
||||||
|
// Save the data
|
||||||
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
|
n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName, objectName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != int64(len(buf)) {
|
||||||
|
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
|
||||||
|
}
|
||||||
|
|
||||||
|
reqHeaders := NewGetReqHeaders()
|
||||||
|
|
||||||
|
offset := int64(2048)
|
||||||
|
|
||||||
|
// read directly
|
||||||
|
buf1 := make([]byte, 512)
|
||||||
|
buf2 := make([]byte, 512)
|
||||||
|
buf3 := make([]byte, n)
|
||||||
|
buf4 := make([]byte, 1)
|
||||||
|
|
||||||
|
reqHeaders.SetRange(offset, offset+int64(len(buf1))-1)
|
||||||
|
reader, objectInfo, err := c.GetObject(bucketName, objectName, reqHeaders)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
m, err := io.ReadFull(reader, buf1)
|
||||||
|
reader.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if objectInfo.Size != int64(m) {
|
||||||
|
t.Fatalf("Error: GetObject read shorter bytes before reaching EOF, want %v, got %v\n", objectInfo.Size, m)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(buf1, buf[offset:offset+512]) {
|
||||||
|
t.Fatal("Error: Incorrect read between two GetObject from same offset.")
|
||||||
|
}
|
||||||
|
offset += 512
|
||||||
|
|
||||||
|
reqHeaders.SetRange(offset, offset+int64(len(buf2))-1)
|
||||||
|
reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err = io.ReadFull(reader, buf2)
|
||||||
|
reader.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if objectInfo.Size != int64(m) {
|
||||||
|
t.Fatalf("Error: GetObject read shorter bytes before reaching EOF, want %v, got %v\n", objectInfo.Size, m)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(buf2, buf[offset:offset+512]) {
|
||||||
|
t.Fatal("Error: Incorrect read between two GetObject from same offset.")
|
||||||
|
}
|
||||||
|
|
||||||
|
reqHeaders.SetRange(0, int64(len(buf3)))
|
||||||
|
reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err = io.ReadFull(reader, buf3)
|
||||||
|
if err != nil {
|
||||||
|
reader.Close()
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
reader.Close()
|
||||||
|
|
||||||
|
if objectInfo.Size != int64(m) {
|
||||||
|
t.Fatalf("Error: GetObject read shorter bytes before reaching EOF, want %v, got %v\n", objectInfo.Size, m)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(buf3, buf) {
|
||||||
|
t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
|
||||||
|
}
|
||||||
|
|
||||||
|
reqHeaders = NewGetReqHeaders()
|
||||||
|
reqHeaders.SetMatchETag("etag")
|
||||||
|
_, _, err = c.GetObject(bucketName, objectName, reqHeaders)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Unexpected GetObject should fail with mismatching etags")
|
||||||
|
}
|
||||||
|
if errResp := ToErrorResponse(err); errResp.Code != "PreconditionFailed" {
|
||||||
|
t.Fatalf("Expected \"PreconditionFailed\" as code, got %s instead", errResp.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
reqHeaders = NewGetReqHeaders()
|
||||||
|
reqHeaders.SetMatchETagExcept("etag")
|
||||||
|
reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err = io.ReadFull(reader, buf3)
|
||||||
|
reader.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if objectInfo.Size != int64(m) {
|
||||||
|
t.Fatalf("Error: GetObject read shorter bytes before reaching EOF, want %v, got %v\n", objectInfo.Size, m)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(buf3, buf) {
|
||||||
|
t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
|
||||||
|
}
|
||||||
|
|
||||||
|
reqHeaders = NewGetReqHeaders()
|
||||||
|
reqHeaders.SetRange(0, 0)
|
||||||
|
reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err = io.ReadFull(reader, buf4)
|
||||||
|
reader.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if objectInfo.Size != int64(m) {
|
||||||
|
t.Fatalf("Error: GetObject read shorter bytes before reaching EOF, want %v, got %v\n", objectInfo.Size, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveObject(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error: ", err)
|
||||||
|
}
|
||||||
|
err = c.RemoveBucket(bucketName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests get bucket policy core API.
|
||||||
|
func TestGetBucketPolicy(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping functional tests for short runs")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed random based on current time.
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
// Instantiate new minio client object.
|
||||||
|
c, err := NewCore(
|
||||||
|
os.Getenv("S3_ADDRESS"),
|
||||||
|
os.Getenv("ACCESS_KEY"),
|
||||||
|
os.Getenv("SECRET_KEY"),
|
||||||
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable to debug
|
||||||
|
// c.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Set user agent.
|
||||||
|
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
||||||
|
|
||||||
|
// Generate a new random bucket name.
|
||||||
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
|
||||||
|
|
||||||
|
// Make a new bucket.
|
||||||
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify if bucket exits and you have access.
|
||||||
|
var exists bool
|
||||||
|
exists, err = c.BucketExists(bucketName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName)
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Error: could not find ", bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Asserting the default bucket policy.
|
||||||
|
bucketPolicy, err := c.GetBucketPolicy(bucketName)
|
||||||
|
if err != nil {
|
||||||
|
errResp := ToErrorResponse(err)
|
||||||
|
if errResp.Code != "NoSuchBucketPolicy" {
|
||||||
|
t.Error("Error:", err, bucketName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(bucketPolicy, emptyBucketAccessPolicy) {
|
||||||
|
t.Errorf("Bucket policy expected %#v, got %#v", emptyBucketAccessPolicy, bucketPolicy)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveBucket(bucketName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test Core PutObject.
|
||||||
|
func TestCorePutObject(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping functional tests for short runs")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed random based on current time.
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
// Instantiate new minio client object.
|
||||||
|
c, err := NewCore(
|
||||||
|
os.Getenv("S3_ADDRESS"),
|
||||||
|
os.Getenv("ACCESS_KEY"),
|
||||||
|
os.Getenv("SECRET_KEY"),
|
||||||
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable tracing, write to stderr.
|
||||||
|
// c.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Set user agent.
|
||||||
|
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
|
||||||
|
|
||||||
|
// Generate a new random bucket name.
|
||||||
|
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
|
||||||
|
|
||||||
|
// Make a new bucket.
|
||||||
|
err = c.MakeBucket(bucketName, "us-east-1")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, minPartSize)
|
||||||
|
|
||||||
|
size, err := io.ReadFull(crand.Reader, buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
if size != minPartSize {
|
||||||
|
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the data
|
||||||
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
|
// Object content type
|
||||||
|
objectContentType := "binary/octet-stream"
|
||||||
|
metadata := make(map[string][]string)
|
||||||
|
metadata["Content-Type"] = []string{objectContentType}
|
||||||
|
|
||||||
|
objInfo, err := c.PutObject(bucketName, objectName, int64(len(buf)), bytes.NewReader(buf), md5.New().Sum(nil), nil, metadata)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Error expected: nil, got: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
objInfo, err = c.PutObject(bucketName, objectName, int64(len(buf)), bytes.NewReader(buf), nil, sum256(nil), metadata)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Error expected: nil, got: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
objInfo, err = c.PutObject(bucketName, objectName, int64(len(buf)), bytes.NewReader(buf), nil, nil, metadata)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName, objectName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if objInfo.Size != int64(len(buf)) {
|
||||||
|
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the data back
|
||||||
|
r, err := c.Client.GetObject(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName, objectName)
|
||||||
|
}
|
||||||
|
|
||||||
|
st, err := r.Stat()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err, bucketName, objectName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if st.Size != int64(len(buf)) {
|
||||||
|
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
|
||||||
|
len(buf), st.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
if st.ContentType != objectContentType {
|
||||||
|
t.Fatalf("Error: Content types don't match, expected: %+v, found: %+v\n", objectContentType, st.ContentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := r.Close(); err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := r.Close(); err == nil {
|
||||||
|
t.Fatal("Error: object is already closed, should return error")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveObject(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.RemoveBucket(bucketName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
}
|
333
vendor/src/github.com/minio/minio-go/docs/API.md
vendored
333
vendor/src/github.com/minio/minio-go/docs/API.md
vendored
@ -5,7 +5,6 @@
|
|||||||
## Minio
|
## Minio
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -25,13 +24,11 @@ func main() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## AWS S3
|
## AWS S3
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -51,20 +48,19 @@ func main() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
| Bucket operations |Object operations | Presigned operations | Bucket Policy/Notification Operations | Client custom settings |
|
| Bucket operations |Object operations | Encrypted Object operations | Presigned operations | Bucket Policy/Notification Operations | Client custom settings |
|
||||||
|:---|:---|:---|:---|:---|
|
|:---|:---|:---|:---|:---|:---|
|
||||||
|[`MakeBucket`](#MakeBucket) |[`GetObject`](#GetObject) | [`PresignedGetObject`](#PresignedGetObject) |[`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
|
|[`MakeBucket`](#MakeBucket) |[`GetObject`](#GetObject) | [`NewSymmetricKey`](#NewSymmetricKey) | [`PresignedGetObject`](#PresignedGetObject) |[`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
|
||||||
|[`ListBuckets`](#ListBuckets) |[`PutObject`](#PutObject) |[`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
|
|[`ListBuckets`](#ListBuckets) |[`PutObject`](#PutObject) | [`NewAsymmetricKey`](#NewAsymmetricKey) |[`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
|
||||||
|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
|
|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) | [`GetEncryptedObject`](#GetEncryptedObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
|
||||||
| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
|
| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | [`PutObjectStreaming`](#PutObjectStreaming) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
|
||||||
|[`ListObjects`](#ListObjects) |[`RemoveObject`](#RemoveObject) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
|
|[`ListObjects`](#ListObjects) |[`RemoveObject`](#RemoveObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
|
||||||
|[`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) |
|
|[`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) |
|
||||||
|[`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | [`ListenBucketNotification`](#ListenBucketNotification) |
|
|[`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`ListenBucketNotification`](#ListenBucketNotification) |
|
||||||
| | [`FPutObject`](#FPutObject) | | |
|
| | [`FPutObject`](#FPutObject) | | | |
|
||||||
| | [`FGetObject`](#FGetObject) | | |
|
| | [`FGetObject`](#FGetObject) | | | |
|
||||||
|
|
||||||
## 1. Constructor
|
## 1. Constructor
|
||||||
<a name="Minio"></a>
|
<a name="Minio"></a>
|
||||||
@ -74,7 +70,6 @@ Initializes a new client object.
|
|||||||
|
|
||||||
__Parameters__
|
__Parameters__
|
||||||
|
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`endpoint` | _string_ |S3 compatible object storage endpoint |
|
|`endpoint` | _string_ |S3 compatible object storage endpoint |
|
||||||
@ -82,6 +77,18 @@ __Parameters__
|
|||||||
|`secretAccessKey` | _string_ |Secret key for the object storage |
|
|`secretAccessKey` | _string_ |Secret key for the object storage |
|
||||||
|`ssl` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise |
|
|`ssl` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise |
|
||||||
|
|
||||||
|
### NewWithRegion(endpoint, accessKeyID, secretAccessKey string, ssl bool, region string) (*Client, error)
|
||||||
|
Initializes minio client, with region configured. Unlike New(), NewWithRegion avoids bucket-location lookup operations and it is slightly faster. Use this function when if your application deals with single region.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---| :---|
|
||||||
|
|`endpoint` | _string_ |S3 compatible object storage endpoint |
|
||||||
|
|`accessKeyID` |_string_ |Access key for the object storage |
|
||||||
|
|`secretAccessKey` | _string_ |Secret key for the object storage |
|
||||||
|
|`ssl` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise |
|
||||||
|
|`region`| _string_ | Region for the object storage |
|
||||||
|
|
||||||
## 2. Bucket operations
|
## 2. Bucket operations
|
||||||
|
|
||||||
@ -89,7 +96,6 @@ __Parameters__
|
|||||||
### MakeBucket(bucketName, location string) error
|
### MakeBucket(bucketName, location string) error
|
||||||
Creates a new bucket.
|
Creates a new bucket.
|
||||||
|
|
||||||
|
|
||||||
__Parameters__
|
__Parameters__
|
||||||
|
|
||||||
| Param | Type | Description |
|
| Param | Type | Description |
|
||||||
@ -111,14 +117,12 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
err := minioClient.MakeBucket("mybucket", "us-east-1")
|
err := minioClient.MakeBucket("mybucket", "us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
fmt.Println("Successfully created mybucket.")
|
fmt.Println("Successfully created mybucket.")
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="ListBuckets"></a>
|
<a name="ListBuckets"></a>
|
||||||
@ -141,7 +145,6 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
buckets, err := minioClient.ListBuckets()
|
buckets, err := minioClient.ListBuckets()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
@ -150,7 +153,6 @@ buckets, err := minioClient.ListBuckets()
|
|||||||
for _, bucket := range buckets {
|
for _, bucket := range buckets {
|
||||||
fmt.Println(bucket)
|
fmt.Println(bucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="BucketExists"></a>
|
<a name="BucketExists"></a>
|
||||||
@ -178,7 +180,6 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
found, err := minioClient.BucketExists("mybucket")
|
found, err := minioClient.BucketExists("mybucket")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
@ -187,7 +188,6 @@ if err != nil {
|
|||||||
if found {
|
if found {
|
||||||
fmt.Println("Bucket found")
|
fmt.Println("Bucket found")
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="RemoveBucket"></a>
|
<a name="RemoveBucket"></a>
|
||||||
@ -206,13 +206,11 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
err := minioClient.RemoveBucket("mybucket")
|
err := minioClient.RemoveBucket("mybucket")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="ListObjects"></a>
|
<a name="ListObjects"></a>
|
||||||
@ -246,7 +244,6 @@ __Return Value__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
// Create a done channel to control 'ListObjects' go routine.
|
// Create a done channel to control 'ListObjects' go routine.
|
||||||
doneCh := make(chan struct{})
|
doneCh := make(chan struct{})
|
||||||
|
|
||||||
@ -262,7 +259,6 @@ for object := range objectCh {
|
|||||||
}
|
}
|
||||||
fmt.Println(object)
|
fmt.Println(object)
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@ -297,7 +293,6 @@ __Return Value__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
// Create a done channel to control 'ListObjectsV2' go routine.
|
// Create a done channel to control 'ListObjectsV2' go routine.
|
||||||
doneCh := make(chan struct{})
|
doneCh := make(chan struct{})
|
||||||
|
|
||||||
@ -313,7 +308,6 @@ for object := range objectCh {
|
|||||||
}
|
}
|
||||||
fmt.Println(object)
|
fmt.Println(object)
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="ListIncompleteUploads"></a>
|
<a name="ListIncompleteUploads"></a>
|
||||||
@ -351,7 +345,6 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
// Create a done channel to control 'ListObjects' go routine.
|
// Create a done channel to control 'ListObjects' go routine.
|
||||||
doneCh := make(chan struct{})
|
doneCh := make(chan struct{})
|
||||||
|
|
||||||
@ -367,7 +360,6 @@ for multiPartObject := range multiPartObjectCh {
|
|||||||
}
|
}
|
||||||
fmt.Println(multiPartObject)
|
fmt.Println(multiPartObject)
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 3. Object operations
|
## 3. Object operations
|
||||||
@ -375,7 +367,7 @@ for multiPartObject := range multiPartObjectCh {
|
|||||||
<a name="GetObject"></a>
|
<a name="GetObject"></a>
|
||||||
### GetObject(bucketName, objectName string) (*Object, error)
|
### GetObject(bucketName, objectName string) (*Object, error)
|
||||||
|
|
||||||
Downloads an object.
|
Returns a stream of the object data. Most of the common errors occur when reading the stream.
|
||||||
|
|
||||||
|
|
||||||
__Parameters__
|
__Parameters__
|
||||||
@ -399,7 +391,6 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
object, err := minioClient.GetObject("mybucket", "photo.jpg")
|
object, err := minioClient.GetObject("mybucket", "photo.jpg")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
@ -414,7 +405,6 @@ if _, err = io.Copy(localFile, object); err != nil {
|
|||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="FGetObject"></a>
|
<a name="FGetObject"></a>
|
||||||
@ -436,19 +426,19 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
err := minioClient.FGetObject("mybucket", "photo.jpg", "/tmp/photo.jpg")
|
err := minioClient.FGetObject("mybucket", "photo.jpg", "/tmp/photo.jpg")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="PutObject"></a>
|
<a name="PutObject"></a>
|
||||||
### PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int, err error)
|
### PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int, err error)
|
||||||
|
|
||||||
Uploads an object.
|
Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object as parts of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
|
||||||
|
|
||||||
|
In the event that PutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, PutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
|
||||||
|
|
||||||
|
|
||||||
__Parameters__
|
__Parameters__
|
||||||
@ -465,13 +455,7 @@ __Parameters__
|
|||||||
__Example__
|
__Example__
|
||||||
|
|
||||||
|
|
||||||
Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
|
|
||||||
|
|
||||||
In the event that PutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, PutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
|
|
||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
file, err := os.Open("my-testfile")
|
file, err := os.Open("my-testfile")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
@ -484,7 +468,39 @@ if err != nil {
|
|||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
<a name="PutObjectStreaming"></a>
|
||||||
|
### PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int, err error)
|
||||||
|
|
||||||
|
Uploads an object as multiple chunks keeping memory consumption constant. It is similar to PutObject in how objects are broken into multiple parts. Each part in turn is transferred as multiple chunks with constant memory usage. However resuming previously failed uploads from where it was left is not supported.
|
||||||
|
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---|:---|
|
||||||
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|
|`objectName` | _string_ |Name of the object |
|
||||||
|
|`reader` | _io.Reader_ |Any Go type that implements io.Reader |
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
|
||||||
|
|
||||||
|
```go
|
||||||
|
file, err := os.Open("my-testfile")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
n, err := minioClient.PutObjectStreaming("mybucket", "myobject", file)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@ -511,7 +527,7 @@ __Example__
|
|||||||
```go
|
```go
|
||||||
// Use-case-1
|
// Use-case-1
|
||||||
// To copy an existing object to a new object with _no_ copy conditions.
|
// To copy an existing object to a new object with _no_ copy conditions.
|
||||||
copyConditions := minio.CopyConditions{}
|
copyConds := minio.CopyConditions{}
|
||||||
err := minioClient.CopyObject("mybucket", "myobject", "my-sourcebucketname/my-sourceobjectname", copyConds)
|
err := minioClient.CopyObject("mybucket", "myobject", "my-sourcebucketname/my-sourceobjectname", copyConds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
@ -541,14 +557,17 @@ if err != nil {
|
|||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="FPutObject"></a>
|
<a name="FPutObject"></a>
|
||||||
### FPutObject(bucketName, objectName, filePath, contentType string) error
|
### FPutObject(bucketName, objectName, filePath, contentType string) (length int64, err error)
|
||||||
|
|
||||||
Uploads contents from a file to objectName.
|
Uploads contents from a file to objectName.
|
||||||
|
|
||||||
|
FPutObject uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than the 64MiB in size, FPutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
|
||||||
|
|
||||||
|
In the event that FPutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, FPutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
|
||||||
|
|
||||||
|
|
||||||
__Parameters__
|
__Parameters__
|
||||||
|
|
||||||
@ -564,18 +583,12 @@ __Parameters__
|
|||||||
__Example__
|
__Example__
|
||||||
|
|
||||||
|
|
||||||
FPutObject uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than the 64MiB in size, FPutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
|
|
||||||
|
|
||||||
In the event that FPutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, FPutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
n, err := minioClient.FPutObject("mybucket", "myobject.csv", "/tmp/otherobject.csv", "application/csv")
|
n, err := minioClient.FPutObject("mybucket", "myobject.csv", "/tmp/otherobject.csv", "application/csv")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="StatObject"></a>
|
<a name="StatObject"></a>
|
||||||
@ -612,14 +625,12 @@ __Return Value__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
objInfo, err := minioClient.StatObject("mybucket", "photo.jpg")
|
objInfo, err := minioClient.StatObject("mybucket", "photo.jpg")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
fmt.Println(objInfo)
|
fmt.Println(objInfo)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="RemoveObject"></a>
|
<a name="RemoveObject"></a>
|
||||||
@ -638,13 +649,11 @@ __Parameters__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
err := minioClient.RemoveObject("mybucket", "photo.jpg")
|
err := minioClient.RemoveObject("mybucket", "photo.jpg")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
<a name="RemoveObjects"></a>
|
<a name="RemoveObjects"></a>
|
||||||
### RemoveObjects(bucketName string, objectsCh chan string) errorCh chan minio.RemoveObjectError
|
### RemoveObjects(bucketName string, objectsCh chan string) errorCh chan minio.RemoveObjectError
|
||||||
@ -669,12 +678,10 @@ __Return Values__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
errorCh := minioClient.RemoveObjects("mybucket", objectsCh)
|
errorCh := minioClient.RemoveObjects("mybucket", objectsCh)
|
||||||
for e := range errorCh {
|
for e := range errorCh {
|
||||||
fmt.Println("Error detected during deletion: " + e.Err.Error())
|
fmt.Println("Error detected during deletion: " + e.Err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@ -696,17 +703,187 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
err := minioClient.RemoveIncompleteUpload("mybucket", "photo.jpg")
|
err := minioClient.RemoveIncompleteUpload("mybucket", "photo.jpg")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 4. Presigned operations
|
## 4. Encrypted object operations
|
||||||
|
|
||||||
|
<a name="NewSymmetricKey"></a>
|
||||||
|
### NewSymmetricKey(key []byte) *minio.SymmetricKey
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---| :---|
|
||||||
|
|`key` | _string_ |Name of the bucket |
|
||||||
|
|
||||||
|
|
||||||
|
__Return Value__
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---| :---|
|
||||||
|
|`symmetricKey` | _*minio.SymmetricKey_ |_minio.SymmetricKey_ represents a symmetric key structure which can be used to encrypt and decrypt data. |
|
||||||
|
|
||||||
|
```go
|
||||||
|
symKey := minio.NewSymmetricKey([]byte("my-secret-key-00"))
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
<a name="NewAsymmetricKey"></a>
|
||||||
|
### NewAsymmetricKey(privateKey []byte, publicKey[]byte) (*minio.AsymmetricKey, error)
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---| :---|
|
||||||
|
|`privateKey` | _[]byte_ | Private key data |
|
||||||
|
|`publicKey` | _[]byte_ | Public key data |
|
||||||
|
|
||||||
|
|
||||||
|
__Return Value__
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---| :---|
|
||||||
|
|`asymmetricKey` | _*minio.AsymmetricKey_ | represents an asymmetric key structure which can be used to encrypt and decrypt data. |
|
||||||
|
|`err` | _error_ | encountered errors. |
|
||||||
|
|
||||||
|
|
||||||
|
```go
|
||||||
|
privateKey, err := ioutil.ReadFile("private.key")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
publicKey, err := ioutil.ReadFile("public.key")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the asymmetric key
|
||||||
|
asymmetricKey, err := minio.NewAsymmetricKey(privateKey, publicKey)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
<a name="GetEncryptedObject"></a>
|
||||||
|
### GetEncryptedObject(bucketName, objectName string, encryptMaterials minio.EncryptionMaterials) (io.Reader, error)
|
||||||
|
|
||||||
|
Returns the decrypted stream of the object data based of the given encryption materiels. Most of the common errors occur when reading the stream.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---| :---|
|
||||||
|
|`bucketName` | _string_ | Name of the bucket |
|
||||||
|
|`objectName` | _string_ | Name of the object |
|
||||||
|
|`encryptMaterials` | _minio.EncryptionMaterials_ | The module to decrypt the object data |
|
||||||
|
|
||||||
|
|
||||||
|
__Return Value__
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---| :---|
|
||||||
|
|`stream` | _io.Reader_ | Returns the deciphered object reader. |
|
||||||
|
|`err` | _error | Returns errors. |
|
||||||
|
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Generate a master symmetric key
|
||||||
|
key := minio.NewSymmetricKey("my-secret-key-00")
|
||||||
|
|
||||||
|
// Build the CBC encryption material
|
||||||
|
cbcMaterials, err := NewCBCSecureMaterials(key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
object, err := minioClient.GetEncryptedObject("mybucket", "photo.jpg", cbcMaterials)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
localFile, err := os.Create("/tmp/local-file.jpg")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = io.Copy(localFile, object); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
<a name="PutEncryptedObject"></a>
|
||||||
|
|
||||||
|
### PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials minio.EncryptionMaterials, metadata map[string][]string, progress io.Reader) (n int, err error)
|
||||||
|
|
||||||
|
Encrypt and upload an object.
|
||||||
|
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---| :---|
|
||||||
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|
|`objectName` | _string_ |Name of the object |
|
||||||
|
|`reader` | _io.Reader_ |Any Go type that implements io.Reader |
|
||||||
|
|`encryptMaterials` | _minio.EncryptionMaterials_ | The module that encrypts data |
|
||||||
|
|`metadata` | _map[string][]string_ | Object metadata to be stored |
|
||||||
|
|`progress` | io.Reader | A reader to update the upload progress |
|
||||||
|
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Load a private key
|
||||||
|
privateKey, err := ioutil.ReadFile("private.key")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load a public key
|
||||||
|
publicKey, err := ioutil.ReadFile("public.key")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build an asymmetric key
|
||||||
|
key, err := NewAssymetricKey(privateKey, publicKey)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the CBC encryption module
|
||||||
|
cbcMaterials, err := NewCBCSecureMaterials(key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open a file to upload
|
||||||
|
file, err := os.Open("my-testfile")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
// Upload the encrypted form of the file
|
||||||
|
n, err := minioClient.PutEncryptedObject("mybucket", "myobject", file, encryptMaterials, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 5. Presigned operations
|
||||||
|
|
||||||
<a name="PresignedGetObject"></a>
|
<a name="PresignedGetObject"></a>
|
||||||
### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
|
### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
|
||||||
@ -728,7 +905,6 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
// Set request parameters for content-disposition.
|
// Set request parameters for content-disposition.
|
||||||
reqParams := make(url.Values)
|
reqParams := make(url.Values)
|
||||||
reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
|
reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
|
||||||
@ -739,7 +915,6 @@ if err != nil {
|
|||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="PresignedPutObject"></a>
|
<a name="PresignedPutObject"></a>
|
||||||
@ -765,7 +940,6 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
// Generates a url which expires in a day.
|
// Generates a url which expires in a day.
|
||||||
expiry := time.Second * 24 * 60 * 60 // 1 day.
|
expiry := time.Second * 24 * 60 * 60 // 1 day.
|
||||||
presignedURL, err := minioClient.PresignedPutObject("mybucket", "myobject", expiry)
|
presignedURL, err := minioClient.PresignedPutObject("mybucket", "myobject", expiry)
|
||||||
@ -774,7 +948,6 @@ if err != nil {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
fmt.Println(presignedURL)
|
fmt.Println(presignedURL)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="PresignedPostPolicy"></a>
|
<a name="PresignedPostPolicy"></a>
|
||||||
@ -786,16 +959,13 @@ Create policy :
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
policy := minio.NewPostPolicy()
|
policy := minio.NewPostPolicy()
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Apply upload policy restrictions:
|
Apply upload policy restrictions:
|
||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
policy.SetBucket("mybucket")
|
policy.SetBucket("mybucket")
|
||||||
policy.SetKey("myobject")
|
policy.SetKey("myobject")
|
||||||
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
|
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
|
||||||
@ -813,7 +983,6 @@ if err != nil {
|
|||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@ -829,7 +998,7 @@ fmt.Printf("-F file=@/etc/bash.bashrc ")
|
|||||||
fmt.Printf("%s\n", url)
|
fmt.Printf("%s\n", url)
|
||||||
```
|
```
|
||||||
|
|
||||||
## 5. Bucket policy/notification operations
|
## 6. Bucket policy/notification operations
|
||||||
|
|
||||||
<a name="SetBucketPolicy"></a>
|
<a name="SetBucketPolicy"></a>
|
||||||
### SetBucketPolicy(bucketname, objectPrefix string, policy policy.BucketPolicy) error
|
### SetBucketPolicy(bucketname, objectPrefix string, policy policy.BucketPolicy) error
|
||||||
@ -845,11 +1014,11 @@ __Parameters__
|
|||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |Name of the bucket|
|
|`bucketName` | _string_ |Name of the bucket|
|
||||||
|`objectPrefix` | _string_ |Name of the object prefix|
|
|`objectPrefix` | _string_ |Name of the object prefix|
|
||||||
|`policy` | _policy.BucketPolicy_ |Policy can be one of the following: |
|
|`policy` | _policy.BucketPolicy_ |Policy can be one of the following, |
|
||||||
|| |policy.BucketPolicyNone|
|
| | | _policy.BucketPolicyNone_ |
|
||||||
| | |policy.BucketPolicyReadOnly|
|
| | | _policy.BucketPolicyReadOnly_ |
|
||||||
|| |policy.BucketPolicyReadWrite|
|
| | | _policy.BucketPolicyReadWrite_ |
|
||||||
| | |policy.BucketPolicyWriteOnly|
|
| | | _policy.BucketPolicyWriteOnly_ |
|
||||||
|
|
||||||
|
|
||||||
__Return Values__
|
__Return Values__
|
||||||
@ -864,13 +1033,11 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
err := minioClient.SetBucketPolicy("mybucket", "myprefix", policy.BucketPolicyReadWrite)
|
err := minioClient.SetBucketPolicy("mybucket", "myprefix", policy.BucketPolicyReadWrite)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="GetBucketPolicy"></a>
|
<a name="GetBucketPolicy"></a>
|
||||||
@ -900,14 +1067,12 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
bucketPolicy, err := minioClient.GetBucketPolicy("mybucket", "")
|
bucketPolicy, err := minioClient.GetBucketPolicy("mybucket", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
fmt.Println("Access permissions for mybucket is", bucketPolicy)
|
fmt.Println("Access permissions for mybucket is", bucketPolicy)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="ListBucketPolicies"></a>
|
<a name="ListBucketPolicies"></a>
|
||||||
@ -935,7 +1100,6 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
bucketPolicies, err := minioClient.ListBucketPolicies("mybucket", "")
|
bucketPolicies, err := minioClient.ListBucketPolicies("mybucket", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
@ -944,7 +1108,6 @@ if err != nil {
|
|||||||
for resource, permission := range bucketPolicies {
|
for resource, permission := range bucketPolicies {
|
||||||
fmt.Println(resource, " => ", permission)
|
fmt.Println(resource, " => ", permission)
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="GetBucketNotification"></a>
|
<a name="GetBucketNotification"></a>
|
||||||
@ -1087,7 +1250,6 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
// Create a done channel to control 'ListenBucketNotification' go routine.
|
// Create a done channel to control 'ListenBucketNotification' go routine.
|
||||||
doneCh := make(chan struct{})
|
doneCh := make(chan struct{})
|
||||||
|
|
||||||
@ -1097,6 +1259,7 @@ defer close(doneCh)
|
|||||||
// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
|
// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
|
||||||
for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
|
for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
|
||||||
"s3:ObjectCreated:*",
|
"s3:ObjectCreated:*",
|
||||||
|
"s3:ObjectAccessed:*",
|
||||||
"s3:ObjectRemoved:*",
|
"s3:ObjectRemoved:*",
|
||||||
}, doneCh) {
|
}, doneCh) {
|
||||||
if notificationInfo.Err != nil {
|
if notificationInfo.Err != nil {
|
||||||
@ -1106,7 +1269,7 @@ for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET"
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## 6. Client custom settings
|
## 7. Client custom settings
|
||||||
|
|
||||||
<a name="SetAppInfo"></a>
|
<a name="SetAppInfo"></a>
|
||||||
### SetAppInfo(appName, appVersion string)
|
### SetAppInfo(appName, appVersion string)
|
||||||
@ -1124,10 +1287,8 @@ __Example__
|
|||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
// Set Application name and version to be used in subsequent API requests.
|
// Set Application name and version to be used in subsequent API requests.
|
||||||
minioClient.SetAppInfo("myCloudApp", "1.0.0")
|
minioClient.SetAppInfo("myCloudApp", "1.0.0")
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="SetCustomTransport"></a>
|
<a name="SetCustomTransport"></a>
|
||||||
@ -1170,6 +1331,6 @@ __Parameters__
|
|||||||
|`acceleratedEndpoint` | _string_ | Set to new S3 transfer acceleration endpoint.|
|
|`acceleratedEndpoint` | _string_ | Set to new S3 transfer acceleration endpoint.|
|
||||||
|
|
||||||
|
|
||||||
## 7. Explore Further
|
## 8. Explore Further
|
||||||
|
|
||||||
- [Build your own Go Music Player App example](https://docs.minio.io/docs/go-music-player-app)
|
- [Build your own Go Music Player App example](https://docs.minio.io/docs/go-music-player-app)
|
||||||
|
@ -49,6 +49,7 @@ func main() {
|
|||||||
// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
|
// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
|
||||||
for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
|
for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
|
||||||
"s3:ObjectCreated:*",
|
"s3:ObjectCreated:*",
|
||||||
|
"s3:ObjectAccessed:*",
|
||||||
"s3:ObjectRemoved:*",
|
"s3:ObjectRemoved:*",
|
||||||
}, doneCh) {
|
}, doneCh) {
|
||||||
if notificationInfo.Err != nil {
|
if notificationInfo.Err != nil {
|
||||||
|
86
vendor/src/github.com/minio/minio-go/examples/s3/get-encrypted-object.go
vendored
Normal file
86
vendor/src/github.com/minio/minio-go/examples/s3/get-encrypted-object.go
vendored
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
// +build ignore
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
|
||||||
|
// my-testfile are dummy values, please replace them with original values.
|
||||||
|
|
||||||
|
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
|
||||||
|
// This boolean value is the last argument for New().
|
||||||
|
|
||||||
|
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
|
||||||
|
// determined based on the Endpoint value.
|
||||||
|
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//// Build an asymmetric key from private and public files
|
||||||
|
//
|
||||||
|
// privateKey, err := ioutil.ReadFile("private.key")
|
||||||
|
// if err != nil {
|
||||||
|
// t.Fatal(err)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// publicKey, err := ioutil.ReadFile("public.key")
|
||||||
|
// if err != nil {
|
||||||
|
// t.Fatal(err)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// asymmetricKey, err := NewAsymmetricKey(privateKey, publicKey)
|
||||||
|
// if err != nil {
|
||||||
|
// t.Fatal(err)
|
||||||
|
// }
|
||||||
|
////
|
||||||
|
|
||||||
|
// Build a symmetric key
|
||||||
|
symmetricKey := minio.NewSymmetricKey([]byte("my-secret-key-00"))
|
||||||
|
|
||||||
|
// Build encryption materials which will encrypt uploaded data
|
||||||
|
cbcMaterials, err := minio.NewCBCSecureMaterials(symmetricKey)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a deciphered data from the server, deciphering is assured by cbcMaterials
|
||||||
|
reader, err := s3Client.GetEncryptedObject("my-bucketname", "my-objectname", cbcMaterials)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local file which holds plain data
|
||||||
|
localFile, err := os.Create("my-testfile")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
defer localFile.Close()
|
||||||
|
|
||||||
|
if _, err := io.Copy(localFile, reader); err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
}
|
83
vendor/src/github.com/minio/minio-go/examples/s3/put-encrypted-object.go
vendored
Normal file
83
vendor/src/github.com/minio/minio-go/examples/s3/put-encrypted-object.go
vendored
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
// +build ignore
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
|
||||||
|
// my-objectname are dummy values, please replace them with original values.
|
||||||
|
|
||||||
|
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
|
||||||
|
// This boolean value is the last argument for New().
|
||||||
|
|
||||||
|
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
|
||||||
|
// determined based on the Endpoint value.
|
||||||
|
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open a local file that we will upload
|
||||||
|
file, err := os.Open("my-testfile")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
//// Build an asymmetric key from private and public files
|
||||||
|
//
|
||||||
|
// privateKey, err := ioutil.ReadFile("private.key")
|
||||||
|
// if err != nil {
|
||||||
|
// t.Fatal(err)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// publicKey, err := ioutil.ReadFile("public.key")
|
||||||
|
// if err != nil {
|
||||||
|
// t.Fatal(err)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// asymmetricKey, err := NewAsymmetricKey(privateKey, publicKey)
|
||||||
|
// if err != nil {
|
||||||
|
// t.Fatal(err)
|
||||||
|
// }
|
||||||
|
////
|
||||||
|
|
||||||
|
// Build a symmetric key
|
||||||
|
symmetricKey := minio.NewSymmetricKey([]byte("my-secret-key-00"))
|
||||||
|
|
||||||
|
// Build encryption materials which will encrypt uploaded data
|
||||||
|
cbcMaterials, err := minio.NewCBCSecureMaterials(symmetricKey)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypt file content and upload to the server
|
||||||
|
n, err := s3Client.PutEncryptedObject("my-bucketname", "my-objectname", file, cbcMaterials, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
|
||||||
|
}
|
54
vendor/src/github.com/minio/minio-go/examples/s3/putobject-streaming.go
vendored
Normal file
54
vendor/src/github.com/minio/minio-go/examples/s3/putobject-streaming.go
vendored
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
// +build ignore
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
minio "github.com/minio/minio-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
|
||||||
|
// my-objectname are dummy values, please replace them with original values.
|
||||||
|
|
||||||
|
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
|
||||||
|
// This boolean value is the last argument for New().
|
||||||
|
|
||||||
|
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
|
||||||
|
// determined based on the Endpoint value.
|
||||||
|
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
object, err := os.Open("my-testfile")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
defer object.Close()
|
||||||
|
|
||||||
|
n, err := s3Client.PutObjectStreaming("my-bucketname", "my-objectname", object)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
|
||||||
|
}
|
284
vendor/src/github.com/minio/minio-go/pkg/encrypt/cbc.go
vendored
Normal file
284
vendor/src/github.com/minio/minio-go/pkg/encrypt/cbc.go
vendored
Normal file
@ -0,0 +1,284 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package encrypt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/aes"
|
||||||
|
"crypto/cipher"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Crypt mode - encryption or decryption
|
||||||
|
type cryptMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
encryptMode cryptMode = iota
|
||||||
|
decryptMode
|
||||||
|
)
|
||||||
|
|
||||||
|
// CBCSecureMaterials encrypts/decrypts data using AES CBC algorithm
|
||||||
|
type CBCSecureMaterials struct {
|
||||||
|
|
||||||
|
// Data stream to encrypt/decrypt
|
||||||
|
stream io.Reader
|
||||||
|
|
||||||
|
// Last internal error
|
||||||
|
err error
|
||||||
|
|
||||||
|
// End of file reached
|
||||||
|
eof bool
|
||||||
|
|
||||||
|
// Holds initial data
|
||||||
|
srcBuf *bytes.Buffer
|
||||||
|
|
||||||
|
// Holds transformed data (encrypted or decrypted)
|
||||||
|
dstBuf *bytes.Buffer
|
||||||
|
|
||||||
|
// Encryption algorithm
|
||||||
|
encryptionKey Key
|
||||||
|
|
||||||
|
// Key to encrypts/decrypts data
|
||||||
|
contentKey []byte
|
||||||
|
|
||||||
|
// Encrypted form of contentKey
|
||||||
|
cryptedKey []byte
|
||||||
|
|
||||||
|
// Initialization vector
|
||||||
|
iv []byte
|
||||||
|
|
||||||
|
// matDesc - currently unused
|
||||||
|
matDesc []byte
|
||||||
|
|
||||||
|
// Indicate if we are going to encrypt or decrypt
|
||||||
|
cryptMode cryptMode
|
||||||
|
|
||||||
|
// Helper that encrypts/decrypts data
|
||||||
|
blockMode cipher.BlockMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCBCSecureMaterials builds new CBC crypter module with
|
||||||
|
// the specified encryption key (symmetric or asymmetric)
|
||||||
|
func NewCBCSecureMaterials(key Key) (*CBCSecureMaterials, error) {
|
||||||
|
if key == nil {
|
||||||
|
return nil, errors.New("Unable to recognize empty encryption properties")
|
||||||
|
}
|
||||||
|
return &CBCSecureMaterials{
|
||||||
|
srcBuf: bytes.NewBuffer([]byte{}),
|
||||||
|
dstBuf: bytes.NewBuffer([]byte{}),
|
||||||
|
encryptionKey: key,
|
||||||
|
matDesc: []byte("{}"),
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetupEncryptMode - tells CBC that we are going to encrypt data
|
||||||
|
func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error {
|
||||||
|
// Set mode to encrypt
|
||||||
|
s.cryptMode = encryptMode
|
||||||
|
|
||||||
|
// Set underlying reader
|
||||||
|
s.stream = stream
|
||||||
|
|
||||||
|
s.eof = false
|
||||||
|
s.srcBuf.Reset()
|
||||||
|
s.dstBuf.Reset()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Generate random content key
|
||||||
|
s.contentKey = make([]byte, aes.BlockSize*2)
|
||||||
|
if _, err := rand.Read(s.contentKey); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Encrypt content key
|
||||||
|
s.cryptedKey, err = s.encryptionKey.Encrypt(s.contentKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Generate random IV
|
||||||
|
s.iv = make([]byte, aes.BlockSize)
|
||||||
|
if _, err = rand.Read(s.iv); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// New cipher
|
||||||
|
encryptContentBlock, err := aes.NewCipher(s.contentKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.blockMode = cipher.NewCBCEncrypter(encryptContentBlock, s.iv)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetupDecryptMode - tells CBC that we are going to decrypt data
|
||||||
|
func (s *CBCSecureMaterials) SetupDecryptMode(stream io.Reader, iv string, key string) error {
|
||||||
|
// Set mode to decrypt
|
||||||
|
s.cryptMode = decryptMode
|
||||||
|
|
||||||
|
// Set underlying reader
|
||||||
|
s.stream = stream
|
||||||
|
|
||||||
|
// Reset
|
||||||
|
s.eof = false
|
||||||
|
s.srcBuf.Reset()
|
||||||
|
s.dstBuf.Reset()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Get IV
|
||||||
|
s.iv, err = base64.StdEncoding.DecodeString(iv)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get encrypted content key
|
||||||
|
s.cryptedKey, err = base64.StdEncoding.DecodeString(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt content key
|
||||||
|
s.contentKey, err = s.encryptionKey.Decrypt(s.cryptedKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// New cipher
|
||||||
|
decryptContentBlock, err := aes.NewCipher(s.contentKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.blockMode = cipher.NewCBCDecrypter(decryptContentBlock, s.iv)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetIV - return randomly generated IV (per S3 object), base64 encoded.
|
||||||
|
func (s *CBCSecureMaterials) GetIV() string {
|
||||||
|
return base64.StdEncoding.EncodeToString(s.iv)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetKey - return content encrypting key (cek) in encrypted form, base64 encoded.
|
||||||
|
func (s *CBCSecureMaterials) GetKey() string {
|
||||||
|
return base64.StdEncoding.EncodeToString(s.cryptedKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDesc - user provided encryption material description in JSON (UTF8) format.
|
||||||
|
func (s *CBCSecureMaterials) GetDesc() string {
|
||||||
|
return string(s.matDesc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill buf with encrypted/decrypted data
|
||||||
|
func (s *CBCSecureMaterials) Read(buf []byte) (n int, err error) {
|
||||||
|
// Always fill buf from bufChunk at the end of this function
|
||||||
|
defer func() {
|
||||||
|
if s.err != nil {
|
||||||
|
n, err = 0, s.err
|
||||||
|
} else {
|
||||||
|
n, err = s.dstBuf.Read(buf)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Return
|
||||||
|
if s.eof {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill dest buffer if its length is less than buf
|
||||||
|
for !s.eof && s.dstBuf.Len() < len(buf) {
|
||||||
|
|
||||||
|
srcPart := make([]byte, aes.BlockSize)
|
||||||
|
dstPart := make([]byte, aes.BlockSize)
|
||||||
|
|
||||||
|
// Fill src buffer
|
||||||
|
for s.srcBuf.Len() < aes.BlockSize*2 {
|
||||||
|
_, err = io.CopyN(s.srcBuf, s.stream, aes.BlockSize)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Quit immediately for errors other than io.EOF
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
s.err = err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark current encrypting/decrypting as finished
|
||||||
|
s.eof = (err == io.EOF)
|
||||||
|
|
||||||
|
if s.eof && s.cryptMode == encryptMode {
|
||||||
|
if srcPart, err = pkcs5Pad(s.srcBuf.Bytes(), aes.BlockSize); err != nil {
|
||||||
|
s.err = err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, _ = s.srcBuf.Read(srcPart)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Crypt srcPart content
|
||||||
|
for len(srcPart) > 0 {
|
||||||
|
|
||||||
|
// Crypt current part
|
||||||
|
s.blockMode.CryptBlocks(dstPart, srcPart[:aes.BlockSize])
|
||||||
|
|
||||||
|
// Unpad when this is the last part and we are decrypting
|
||||||
|
if s.eof && s.cryptMode == decryptMode {
|
||||||
|
dstPart, err = pkcs5Unpad(dstPart, aes.BlockSize)
|
||||||
|
if err != nil {
|
||||||
|
s.err = err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send crypted data to dstBuf
|
||||||
|
if _, wErr := s.dstBuf.Write(dstPart); wErr != nil {
|
||||||
|
s.err = wErr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Move to the next part
|
||||||
|
srcPart = srcPart[aes.BlockSize:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unpad a set of bytes following PKCS5 algorithm
|
||||||
|
func pkcs5Unpad(buf []byte, blockSize int) ([]byte, error) {
|
||||||
|
len := len(buf)
|
||||||
|
if len == 0 {
|
||||||
|
return nil, errors.New("buffer is empty")
|
||||||
|
}
|
||||||
|
pad := int(buf[len-1])
|
||||||
|
if pad > len || pad > blockSize {
|
||||||
|
return nil, errors.New("invalid padding size")
|
||||||
|
}
|
||||||
|
return buf[:len-pad], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pad a set of bytes following PKCS5 algorithm
|
||||||
|
func pkcs5Pad(buf []byte, blockSize int) ([]byte, error) {
|
||||||
|
len := len(buf)
|
||||||
|
pad := blockSize - (len % blockSize)
|
||||||
|
padText := bytes.Repeat([]byte{byte(pad)}, pad)
|
||||||
|
return append(buf, padText...), nil
|
||||||
|
}
|
50
vendor/src/github.com/minio/minio-go/pkg/encrypt/interface.go
vendored
Normal file
50
vendor/src/github.com/minio/minio-go/pkg/encrypt/interface.go
vendored
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package encrypt implements a generic interface to encrypt any stream of data.
|
||||||
|
// currently this package implements two types of encryption
|
||||||
|
// - Symmetric encryption using AES.
|
||||||
|
// - Asymmetric encrytion using RSA.
|
||||||
|
package encrypt
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
// Materials - provides generic interface to encrypt any stream of data.
|
||||||
|
type Materials interface {
|
||||||
|
|
||||||
|
// Returns encrypted/decrypted data, io.Reader compatible.
|
||||||
|
Read(b []byte) (int, error)
|
||||||
|
|
||||||
|
// Get randomly generated IV, base64 encoded.
|
||||||
|
GetIV() (iv string)
|
||||||
|
|
||||||
|
// Get content encrypting key (cek) in encrypted form, base64 encoded.
|
||||||
|
GetKey() (key string)
|
||||||
|
|
||||||
|
// Get user provided encryption material description in
|
||||||
|
// JSON (UTF8) format. This is not used, kept for future.
|
||||||
|
GetDesc() (desc string)
|
||||||
|
|
||||||
|
// Setup encrypt mode, further calls of Read() function
|
||||||
|
// will return the encrypted form of data streamed
|
||||||
|
// by the passed reader
|
||||||
|
SetupEncryptMode(stream io.Reader) error
|
||||||
|
|
||||||
|
// Setup decrypted mode, further calls of Read() function
|
||||||
|
// will return the decrypted form of data streamed
|
||||||
|
// by the passed reader
|
||||||
|
SetupDecryptMode(stream io.Reader, iv string, key string) error
|
||||||
|
}
|
165
vendor/src/github.com/minio/minio-go/pkg/encrypt/keys.go
vendored
Normal file
165
vendor/src/github.com/minio/minio-go/pkg/encrypt/keys.go
vendored
Normal file
@ -0,0 +1,165 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package encrypt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/aes"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Key - generic interface to encrypt/decrypt a key.
|
||||||
|
// We use it to encrypt/decrypt content key which is the key
|
||||||
|
// that encrypt/decrypt object data.
|
||||||
|
type Key interface {
|
||||||
|
// Encrypt data using to the set encryption key
|
||||||
|
Encrypt([]byte) ([]byte, error)
|
||||||
|
// Decrypt data using to the set encryption key
|
||||||
|
Decrypt([]byte) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SymmetricKey - encrypts data with a symmetric master key
|
||||||
|
type SymmetricKey struct {
|
||||||
|
masterKey []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypt passed bytes
|
||||||
|
func (s *SymmetricKey) Encrypt(plain []byte) ([]byte, error) {
|
||||||
|
// Initialize an AES encryptor using a master key
|
||||||
|
keyBlock, err := aes.NewCipher(s.masterKey)
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pad the key before encryption
|
||||||
|
plain, _ = pkcs5Pad(plain, aes.BlockSize)
|
||||||
|
|
||||||
|
encKey := []byte{}
|
||||||
|
encPart := make([]byte, aes.BlockSize)
|
||||||
|
|
||||||
|
// Encrypt the passed key by block
|
||||||
|
for {
|
||||||
|
if len(plain) < aes.BlockSize {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Encrypt the passed key
|
||||||
|
keyBlock.Encrypt(encPart, plain[:aes.BlockSize])
|
||||||
|
// Add the encrypted block to the total encrypted key
|
||||||
|
encKey = append(encKey, encPart...)
|
||||||
|
// Pass to the next plain block
|
||||||
|
plain = plain[aes.BlockSize:]
|
||||||
|
}
|
||||||
|
return encKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt passed bytes
|
||||||
|
func (s *SymmetricKey) Decrypt(cipher []byte) ([]byte, error) {
|
||||||
|
// Initialize AES decrypter
|
||||||
|
keyBlock, err := aes.NewCipher(s.masterKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var plain []byte
|
||||||
|
plainPart := make([]byte, aes.BlockSize)
|
||||||
|
|
||||||
|
// Decrypt the encrypted data block by block
|
||||||
|
for {
|
||||||
|
if len(cipher) < aes.BlockSize {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
keyBlock.Decrypt(plainPart, cipher[:aes.BlockSize])
|
||||||
|
// Add the decrypted block to the total result
|
||||||
|
plain = append(plain, plainPart...)
|
||||||
|
// Pass to the next cipher block
|
||||||
|
cipher = cipher[aes.BlockSize:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unpad the resulted plain data
|
||||||
|
plain, err = pkcs5Unpad(plain, aes.BlockSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return plain, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSymmetricKey generates a new encrypt/decrypt crypto using
|
||||||
|
// an AES master key password
|
||||||
|
func NewSymmetricKey(b []byte) *SymmetricKey {
|
||||||
|
return &SymmetricKey{masterKey: b}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsymmetricKey - struct which encrypts/decrypts data
|
||||||
|
// using RSA public/private certificates
|
||||||
|
type AsymmetricKey struct {
|
||||||
|
publicKey *rsa.PublicKey
|
||||||
|
privateKey *rsa.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypt data using public key
|
||||||
|
func (a *AsymmetricKey) Encrypt(plain []byte) ([]byte, error) {
|
||||||
|
cipher, err := rsa.EncryptPKCS1v15(rand.Reader, a.publicKey, plain)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cipher, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt data using public key
|
||||||
|
func (a *AsymmetricKey) Decrypt(cipher []byte) ([]byte, error) {
|
||||||
|
cipher, err := rsa.DecryptPKCS1v15(rand.Reader, a.privateKey, cipher)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cipher, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAsymmetricKey - generates a crypto module able to encrypt/decrypt
|
||||||
|
// data using a pair for private and public key
|
||||||
|
func NewAsymmetricKey(privData []byte, pubData []byte) (*AsymmetricKey, error) {
|
||||||
|
// Parse private key from passed data
|
||||||
|
priv, err := x509.ParsePKCS8PrivateKey(privData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
privKey, ok := priv.(*rsa.PrivateKey)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("not a valid private key")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse public key from passed data
|
||||||
|
pub, err := x509.ParsePKIXPublicKey(pubData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pubKey, ok := pub.(*rsa.PublicKey)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("not a valid public key")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Associate the private key with the passed public key
|
||||||
|
privKey.PublicKey = *pubKey
|
||||||
|
|
||||||
|
return &AsymmetricKey{
|
||||||
|
publicKey: pubKey,
|
||||||
|
privateKey: privKey,
|
||||||
|
}, nil
|
||||||
|
}
|
@ -583,7 +583,7 @@ func GetPolicies(statements []Statement, bucketName string) map[string]BucketPol
|
|||||||
r = r[:len(r)-1]
|
r = r[:len(r)-1]
|
||||||
asterisk = "*"
|
asterisk = "*"
|
||||||
}
|
}
|
||||||
objectPath := r[len(awsResourcePrefix+bucketName)+1 : len(r)]
|
objectPath := r[len(awsResourcePrefix+bucketName)+1:]
|
||||||
p := GetPolicy(statements, bucketName, objectPath)
|
p := GetPolicy(statements, bucketName, objectPath)
|
||||||
policyRules[bucketName+"/"+objectPath+asterisk] = p
|
policyRules[bucketName+"/"+objectPath+asterisk] = p
|
||||||
}
|
}
|
||||||
|
285
vendor/src/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
vendored
Normal file
285
vendor/src/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
vendored
Normal file
@ -0,0 +1,285 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package s3signer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference for constants used below -
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
|
||||||
|
const (
|
||||||
|
streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||||
|
streamingEncoding = "aws-chunked"
|
||||||
|
streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
|
||||||
|
emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||||
|
payloadChunkSize = 64 * 1024
|
||||||
|
chunkSigConstLen = 17 // ";chunk-signature="
|
||||||
|
signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
|
||||||
|
crlfLen = 2 // CRLF
|
||||||
|
)
|
||||||
|
|
||||||
|
// Request headers to be ignored while calculating seed signature for
|
||||||
|
// a request.
|
||||||
|
var ignoredStreamingHeaders = map[string]bool{
|
||||||
|
"Authorization": true,
|
||||||
|
"User-Agent": true,
|
||||||
|
"Content-Type": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSignedChunkLength - calculates the length of chunk metadata
|
||||||
|
func getSignedChunkLength(chunkDataSize int64) int64 {
|
||||||
|
return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
|
||||||
|
chunkSigConstLen +
|
||||||
|
signatureStrLen +
|
||||||
|
crlfLen +
|
||||||
|
chunkDataSize +
|
||||||
|
crlfLen
|
||||||
|
}
|
||||||
|
|
||||||
|
// getStreamLength - calculates the length of the overall stream (data + metadata)
|
||||||
|
func getStreamLength(dataLen, chunkSize int64) int64 {
|
||||||
|
if dataLen <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
chunksCount := int64(dataLen / chunkSize)
|
||||||
|
remainingBytes := int64(dataLen % chunkSize)
|
||||||
|
streamLen := int64(0)
|
||||||
|
streamLen += chunksCount * getSignedChunkLength(chunkSize)
|
||||||
|
if remainingBytes > 0 {
|
||||||
|
streamLen += getSignedChunkLength(remainingBytes)
|
||||||
|
}
|
||||||
|
streamLen += getSignedChunkLength(0)
|
||||||
|
return streamLen
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildChunkStringToSign - returns the string to sign given chunk data
|
||||||
|
// and previous signature.
|
||||||
|
func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
|
||||||
|
stringToSignParts := []string{
|
||||||
|
streamingPayloadHdr,
|
||||||
|
t.Format(iso8601DateFormat),
|
||||||
|
getScope(region, t),
|
||||||
|
previousSig,
|
||||||
|
emptySHA256,
|
||||||
|
hex.EncodeToString(sum256(chunkData)),
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(stringToSignParts, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepareStreamingRequest - prepares a request with appropriate
|
||||||
|
// headers before computing the seed signature.
|
||||||
|
func prepareStreamingRequest(req *http.Request, dataLen int64, timestamp time.Time) {
|
||||||
|
// Set x-amz-content-sha256 header.
|
||||||
|
req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
|
||||||
|
req.Header.Set("Content-Encoding", streamingEncoding)
|
||||||
|
req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
|
||||||
|
|
||||||
|
// Set content length with streaming signature for each chunk included.
|
||||||
|
req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize))
|
||||||
|
req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildChunkHeader - returns the chunk header.
|
||||||
|
// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
|
||||||
|
func buildChunkHeader(chunkLen int64, signature string) []byte {
|
||||||
|
return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
|
||||||
|
func buildChunkSignature(chunkData []byte, reqTime time.Time, region,
|
||||||
|
previousSignature, secretAccessKey string) string {
|
||||||
|
|
||||||
|
chunkStringToSign := buildChunkStringToSign(reqTime, region,
|
||||||
|
previousSignature, chunkData)
|
||||||
|
signingKey := getSigningKey(secretAccessKey, region, reqTime)
|
||||||
|
return getSignature(signingKey, chunkStringToSign)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSeedSignature - returns the seed signature for a given request.
|
||||||
|
func (s *StreamingReader) setSeedSignature(req *http.Request) {
|
||||||
|
// Get canonical request
|
||||||
|
canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders)
|
||||||
|
|
||||||
|
// Get string to sign from canonical request.
|
||||||
|
stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest)
|
||||||
|
|
||||||
|
signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime)
|
||||||
|
|
||||||
|
// Calculate signature.
|
||||||
|
s.seedSignature = getSignature(signingKey, stringToSign)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamingReader implements chunked upload signature as a reader on
|
||||||
|
// top of req.Body's ReaderCloser chunk header;data;... repeat
|
||||||
|
type StreamingReader struct {
|
||||||
|
accessKeyID string
|
||||||
|
secretAccessKey string
|
||||||
|
region string
|
||||||
|
prevSignature string
|
||||||
|
seedSignature string
|
||||||
|
contentLen int64 // Content-Length from req header
|
||||||
|
baseReadCloser io.ReadCloser // underlying io.Reader
|
||||||
|
bytesRead int64 // bytes read from underlying io.Reader
|
||||||
|
buf bytes.Buffer // holds signed chunk
|
||||||
|
chunkBuf []byte // holds raw data read from req Body
|
||||||
|
chunkBufLen int // no. of bytes read so far into chunkBuf
|
||||||
|
done bool // done reading the underlying reader to EOF
|
||||||
|
reqTime time.Time
|
||||||
|
chunkNum int
|
||||||
|
totalChunks int
|
||||||
|
lastChunkSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
// signChunk - signs a chunk read from s.baseReader of chunkLen size.
|
||||||
|
func (s *StreamingReader) signChunk(chunkLen int) {
|
||||||
|
// Compute chunk signature for next header
|
||||||
|
signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime,
|
||||||
|
s.region, s.prevSignature, s.secretAccessKey)
|
||||||
|
|
||||||
|
// For next chunk signature computation
|
||||||
|
s.prevSignature = signature
|
||||||
|
|
||||||
|
// Write chunk header into streaming buffer
|
||||||
|
chunkHdr := buildChunkHeader(int64(chunkLen), signature)
|
||||||
|
s.buf.Write(chunkHdr)
|
||||||
|
|
||||||
|
// Write chunk data into streaming buffer
|
||||||
|
s.buf.Write(s.chunkBuf[:chunkLen])
|
||||||
|
|
||||||
|
// Write the chunk trailer.
|
||||||
|
s.buf.Write([]byte("\r\n"))
|
||||||
|
|
||||||
|
// Reset chunkBufLen for next chunk read.
|
||||||
|
s.chunkBufLen = 0
|
||||||
|
s.chunkNum++
|
||||||
|
}
|
||||||
|
|
||||||
|
// setStreamingAuthHeader - builds and sets authorization header value
|
||||||
|
// for streaming signature.
|
||||||
|
func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
|
||||||
|
credential := GetCredential(s.accessKeyID, s.region, s.reqTime)
|
||||||
|
authParts := []string{
|
||||||
|
signV4Algorithm + " Credential=" + credential,
|
||||||
|
"SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders),
|
||||||
|
"Signature=" + s.seedSignature,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set authorization header.
|
||||||
|
auth := strings.Join(authParts, ",")
|
||||||
|
req.Header.Set("Authorization", auth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamingSignV4 - provides chunked upload signatureV4 support by
|
||||||
|
// implementing io.Reader.
|
||||||
|
func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey,
|
||||||
|
region string, dataLen int64, reqTime time.Time) *http.Request {
|
||||||
|
|
||||||
|
// Set headers needed for streaming signature.
|
||||||
|
prepareStreamingRequest(req, dataLen, reqTime)
|
||||||
|
|
||||||
|
stReader := &StreamingReader{
|
||||||
|
baseReadCloser: req.Body,
|
||||||
|
accessKeyID: accessKeyID,
|
||||||
|
secretAccessKey: secretAccessKey,
|
||||||
|
region: region,
|
||||||
|
reqTime: reqTime,
|
||||||
|
chunkBuf: make([]byte, payloadChunkSize),
|
||||||
|
contentLen: dataLen,
|
||||||
|
chunkNum: 1,
|
||||||
|
totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
|
||||||
|
lastChunkSize: int(dataLen % payloadChunkSize),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the request headers required for chunk upload signing.
|
||||||
|
|
||||||
|
// Compute the seed signature.
|
||||||
|
stReader.setSeedSignature(req)
|
||||||
|
|
||||||
|
// Set the authorization header with the seed signature.
|
||||||
|
stReader.setStreamingAuthHeader(req)
|
||||||
|
|
||||||
|
// Set seed signature as prevSignature for subsequent
|
||||||
|
// streaming signing process.
|
||||||
|
stReader.prevSignature = stReader.seedSignature
|
||||||
|
req.Body = stReader
|
||||||
|
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read - this method performs chunk upload signature providing a
|
||||||
|
// io.Reader interface.
|
||||||
|
func (s *StreamingReader) Read(buf []byte) (int, error) {
|
||||||
|
switch {
|
||||||
|
// After the last chunk is read from underlying reader, we
|
||||||
|
// never re-fill s.buf.
|
||||||
|
case s.done:
|
||||||
|
|
||||||
|
// s.buf will be (re-)filled with next chunk when has lesser
|
||||||
|
// bytes than asked for.
|
||||||
|
case s.buf.Len() < len(buf):
|
||||||
|
s.chunkBufLen = 0
|
||||||
|
for {
|
||||||
|
n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
|
||||||
|
if err == nil || err == io.ErrUnexpectedEOF {
|
||||||
|
s.chunkBufLen += n1
|
||||||
|
s.bytesRead += int64(n1)
|
||||||
|
|
||||||
|
if s.chunkBufLen == payloadChunkSize ||
|
||||||
|
(s.chunkNum == s.totalChunks-1 &&
|
||||||
|
s.chunkBufLen == s.lastChunkSize) {
|
||||||
|
// Sign the chunk and write it to s.buf.
|
||||||
|
s.signChunk(s.chunkBufLen)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if err == io.EOF {
|
||||||
|
// No more data left in baseReader - last chunk.
|
||||||
|
// Done reading the last chunk from baseReader.
|
||||||
|
s.done = true
|
||||||
|
|
||||||
|
// bytes read from baseReader different than
|
||||||
|
// content length provided.
|
||||||
|
if s.bytesRead != s.contentLen {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign the chunk and write it to s.buf.
|
||||||
|
s.signChunk(0)
|
||||||
|
break
|
||||||
|
|
||||||
|
} else {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s.buf.Read(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close - this method makes underlying io.ReadCloser's Close method available.
|
||||||
|
func (s *StreamingReader) Close() error {
|
||||||
|
return s.baseReadCloser.Close()
|
||||||
|
}
|
106
vendor/src/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go
vendored
Normal file
106
vendor/src/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go
vendored
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package s3signer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetSeedSignature(t *testing.T) {
|
||||||
|
accessKeyID := "AKIAIOSFODNN7EXAMPLE"
|
||||||
|
secretAccessKeyID := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||||
|
dataLen := 66560
|
||||||
|
data := bytes.Repeat([]byte("a"), dataLen)
|
||||||
|
body := ioutil.NopCloser(bytes.NewReader(data))
|
||||||
|
|
||||||
|
req := NewRequest("PUT", "/examplebucket/chunkObject.txt", body)
|
||||||
|
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||||
|
req.URL.Host = "s3.amazonaws.com"
|
||||||
|
|
||||||
|
reqTime, err := time.Parse("20060102T150405Z", "20130524T000000Z")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse time - %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, "us-east-1", int64(dataLen), reqTime)
|
||||||
|
actualSeedSignature := req.Body.(*StreamingReader).seedSignature
|
||||||
|
|
||||||
|
expectedSeedSignature := "007480502de61457e955731b0f5d191f7e6f54a8a0f6cc7974a5ebd887965686"
|
||||||
|
if actualSeedSignature != expectedSeedSignature {
|
||||||
|
t.Errorf("Expected %s but received %s", expectedSeedSignature, actualSeedSignature)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunkSignature(t *testing.T) {
|
||||||
|
chunkData := bytes.Repeat([]byte("a"), 65536)
|
||||||
|
reqTime, _ := time.Parse(iso8601DateFormat, "20130524T000000Z")
|
||||||
|
previousSignature := "4f232c4386841ef735655705268965c44a0e4690baa4adea153f7db9fa80a0a9"
|
||||||
|
location := "us-east-1"
|
||||||
|
secretAccessKeyID := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||||
|
expectedSignature := "ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648"
|
||||||
|
actualSignature := buildChunkSignature(chunkData, reqTime, location, previousSignature, secretAccessKeyID)
|
||||||
|
if actualSignature != expectedSignature {
|
||||||
|
t.Errorf("Expected %s but received %s", expectedSignature, actualSignature)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetStreamingAuthorization(t *testing.T) {
|
||||||
|
location := "us-east-1"
|
||||||
|
secretAccessKeyID := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||||
|
accessKeyID := "AKIAIOSFODNN7EXAMPLE"
|
||||||
|
|
||||||
|
req := NewRequest("PUT", "/examplebucket/chunkObject.txt", nil)
|
||||||
|
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||||
|
req.URL.Host = "s3.amazonaws.com"
|
||||||
|
|
||||||
|
dataLen := int64(65 * 1024)
|
||||||
|
reqTime, _ := time.Parse(iso8601DateFormat, "20130524T000000Z")
|
||||||
|
req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, location, dataLen, reqTime)
|
||||||
|
|
||||||
|
expectedAuthorization := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=content-encoding;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-storage-class,Signature=007480502de61457e955731b0f5d191f7e6f54a8a0f6cc7974a5ebd887965686"
|
||||||
|
|
||||||
|
actualAuthorization := req.Header.Get("Authorization")
|
||||||
|
if actualAuthorization != expectedAuthorization {
|
||||||
|
t.Errorf("Expected %s but received %s", expectedAuthorization, actualAuthorization)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStreamingReader(t *testing.T) {
|
||||||
|
reqTime, _ := time.Parse("20060102T150405Z", "20130524T000000Z")
|
||||||
|
location := "us-east-1"
|
||||||
|
secretAccessKeyID := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||||
|
accessKeyID := "AKIAIOSFODNN7EXAMPLE"
|
||||||
|
dataLen := int64(65 * 1024)
|
||||||
|
|
||||||
|
req := NewRequest("PUT", "/examplebucket/chunkObject.txt", nil)
|
||||||
|
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||||
|
req.ContentLength = 65 * 1024
|
||||||
|
req.URL.Host = "s3.amazonaws.com"
|
||||||
|
|
||||||
|
baseReader := ioutil.NopCloser(bytes.NewReader(bytes.Repeat([]byte("a"), 65*1024)))
|
||||||
|
req.Body = baseReader
|
||||||
|
req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, location, dataLen, reqTime)
|
||||||
|
|
||||||
|
b, err := ioutil.ReadAll(req.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Expected no error but received %v %d", err, len(b))
|
||||||
|
}
|
||||||
|
req.Body.Close()
|
||||||
|
}
|
@ -316,7 +316,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign b
|
|||||||
// Request parameters
|
// Request parameters
|
||||||
if len(vv[0]) > 0 {
|
if len(vv[0]) > 0 {
|
||||||
buf.WriteByte('=')
|
buf.WriteByte('=')
|
||||||
buf.WriteString(strings.Replace(url.QueryEscape(vv[0]), "+", "%20", -1))
|
buf.WriteString(vv[0])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,7 @@ const (
|
|||||||
///
|
///
|
||||||
/// Is skipped for obvious reasons
|
/// Is skipped for obvious reasons
|
||||||
///
|
///
|
||||||
var ignoredHeaders = map[string]bool{
|
var v4IgnoredHeaders = map[string]bool{
|
||||||
"Authorization": true,
|
"Authorization": true,
|
||||||
"Content-Type": true,
|
"Content-Type": true,
|
||||||
"Content-Length": true,
|
"Content-Length": true,
|
||||||
@ -122,7 +122,7 @@ func getHashedPayload(req http.Request) string {
|
|||||||
|
|
||||||
// getCanonicalHeaders generate a list of request headers for
|
// getCanonicalHeaders generate a list of request headers for
|
||||||
// signature.
|
// signature.
|
||||||
func getCanonicalHeaders(req http.Request) string {
|
func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string {
|
||||||
var headers []string
|
var headers []string
|
||||||
vals := make(map[string][]string)
|
vals := make(map[string][]string)
|
||||||
for k, vv := range req.Header {
|
for k, vv := range req.Header {
|
||||||
@ -161,7 +161,7 @@ func getCanonicalHeaders(req http.Request) string {
|
|||||||
// getSignedHeaders generate all signed request headers.
|
// getSignedHeaders generate all signed request headers.
|
||||||
// i.e lexically sorted, semicolon-separated list of lowercase
|
// i.e lexically sorted, semicolon-separated list of lowercase
|
||||||
// request header names.
|
// request header names.
|
||||||
func getSignedHeaders(req http.Request) string {
|
func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
|
||||||
var headers []string
|
var headers []string
|
||||||
for k := range req.Header {
|
for k := range req.Header {
|
||||||
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
|
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
|
||||||
@ -183,14 +183,14 @@ func getSignedHeaders(req http.Request) string {
|
|||||||
// <CanonicalHeaders>\n
|
// <CanonicalHeaders>\n
|
||||||
// <SignedHeaders>\n
|
// <SignedHeaders>\n
|
||||||
// <HashedPayload>
|
// <HashedPayload>
|
||||||
func getCanonicalRequest(req http.Request) string {
|
func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) string {
|
||||||
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
|
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
|
||||||
canonicalRequest := strings.Join([]string{
|
canonicalRequest := strings.Join([]string{
|
||||||
req.Method,
|
req.Method,
|
||||||
s3utils.EncodePath(req.URL.Path),
|
s3utils.EncodePath(req.URL.Path),
|
||||||
req.URL.RawQuery,
|
req.URL.RawQuery,
|
||||||
getCanonicalHeaders(req),
|
getCanonicalHeaders(req, ignoredHeaders),
|
||||||
getSignedHeaders(req),
|
getSignedHeaders(req, ignoredHeaders),
|
||||||
getHashedPayload(req),
|
getHashedPayload(req),
|
||||||
}, "\n")
|
}, "\n")
|
||||||
return canonicalRequest
|
return canonicalRequest
|
||||||
@ -219,7 +219,7 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
|
|||||||
credential := GetCredential(accessKeyID, location, t)
|
credential := GetCredential(accessKeyID, location, t)
|
||||||
|
|
||||||
// Get all signed headers.
|
// Get all signed headers.
|
||||||
signedHeaders := getSignedHeaders(req)
|
signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
|
||||||
|
|
||||||
// Set URL query.
|
// Set URL query.
|
||||||
query := req.URL.Query()
|
query := req.URL.Query()
|
||||||
@ -231,7 +231,7 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
|
|||||||
req.URL.RawQuery = query.Encode()
|
req.URL.RawQuery = query.Encode()
|
||||||
|
|
||||||
// Get canonical request.
|
// Get canonical request.
|
||||||
canonicalRequest := getCanonicalRequest(req)
|
canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders)
|
||||||
|
|
||||||
// Get string to sign from canonical request.
|
// Get string to sign from canonical request.
|
||||||
stringToSign := getStringToSignV4(t, location, canonicalRequest)
|
stringToSign := getStringToSignV4(t, location, canonicalRequest)
|
||||||
@ -273,7 +273,7 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
|
|||||||
req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
|
req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
|
||||||
|
|
||||||
// Get canonical request.
|
// Get canonical request.
|
||||||
canonicalRequest := getCanonicalRequest(req)
|
canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders)
|
||||||
|
|
||||||
// Get string to sign from canonical request.
|
// Get string to sign from canonical request.
|
||||||
stringToSign := getStringToSignV4(t, location, canonicalRequest)
|
stringToSign := getStringToSignV4(t, location, canonicalRequest)
|
||||||
@ -285,7 +285,7 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
|
|||||||
credential := GetCredential(accessKeyID, location, t)
|
credential := GetCredential(accessKeyID, location, t)
|
||||||
|
|
||||||
// Get all signed headers.
|
// Get all signed headers.
|
||||||
signedHeaders := getSignedHeaders(req)
|
signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
|
||||||
|
|
||||||
// Calculate signature.
|
// Calculate signature.
|
||||||
signature := getSignature(signingKey, stringToSign)
|
signature := getSignature(signingKey, stringToSign)
|
||||||
|
103
vendor/src/github.com/minio/minio-go/pkg/s3signer/test-utils_test.go
vendored
Normal file
103
vendor/src/github.com/minio/minio-go/pkg/s3signer/test-utils_test.go
vendored
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package s3signer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// N B minio-go should compile on go1.5.3 onwards and httptest package is
|
||||||
|
// available only from go.1.7.x. The following function is taken from
|
||||||
|
// Go httptest package to be able to build on older versions of Go.
|
||||||
|
|
||||||
|
// NewRequest returns a new incoming server Request, suitable
|
||||||
|
// for passing to an http.Handler for testing.
|
||||||
|
//
|
||||||
|
// The target is the RFC 7230 "request-target": it may be either a
|
||||||
|
// path or an absolute URL. If target is an absolute URL, the host name
|
||||||
|
// from the URL is used. Otherwise, "example.com" is used.
|
||||||
|
//
|
||||||
|
// The TLS field is set to a non-nil dummy value if target has scheme
|
||||||
|
// "https".
|
||||||
|
//
|
||||||
|
// The Request.Proto is always HTTP/1.1.
|
||||||
|
//
|
||||||
|
// An empty method means "GET".
|
||||||
|
//
|
||||||
|
// The provided body may be nil. If the body is of type *bytes.Reader,
|
||||||
|
// *strings.Reader, or *bytes.Buffer, the Request.ContentLength is
|
||||||
|
// set.
|
||||||
|
//
|
||||||
|
// NewRequest panics on error for ease of use in testing, where a
|
||||||
|
// panic is acceptable.
|
||||||
|
func NewRequest(method, target string, body io.Reader) *http.Request {
|
||||||
|
if method == "" {
|
||||||
|
method = "GET"
|
||||||
|
}
|
||||||
|
req, err := http.ReadRequest(bufio.NewReader(strings.NewReader(method + " " + target + " HTTP/1.0\r\n\r\n")))
|
||||||
|
if err != nil {
|
||||||
|
panic("invalid NewRequest arguments; " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTP/1.0 was used above to avoid needing a Host field. Change it to 1.1 here.
|
||||||
|
req.Proto = "HTTP/1.1"
|
||||||
|
req.ProtoMinor = 1
|
||||||
|
req.Close = false
|
||||||
|
|
||||||
|
if body != nil {
|
||||||
|
switch v := body.(type) {
|
||||||
|
case *bytes.Buffer:
|
||||||
|
req.ContentLength = int64(v.Len())
|
||||||
|
case *bytes.Reader:
|
||||||
|
req.ContentLength = int64(v.Len())
|
||||||
|
case *strings.Reader:
|
||||||
|
req.ContentLength = int64(v.Len())
|
||||||
|
default:
|
||||||
|
req.ContentLength = -1
|
||||||
|
}
|
||||||
|
if rc, ok := body.(io.ReadCloser); ok {
|
||||||
|
req.Body = rc
|
||||||
|
} else {
|
||||||
|
req.Body = ioutil.NopCloser(body)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 192.0.2.0/24 is "TEST-NET" in RFC 5737 for use solely in
|
||||||
|
// documentation and example source code and should not be
|
||||||
|
// used publicly.
|
||||||
|
req.RemoteAddr = "192.0.2.1:1234"
|
||||||
|
|
||||||
|
if req.Host == "" {
|
||||||
|
req.Host = "example.com"
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(target, "https://") {
|
||||||
|
req.TLS = &tls.ConnectionState{
|
||||||
|
Version: tls.VersionTLS12,
|
||||||
|
HandshakeComplete: true,
|
||||||
|
ServerName: req.Host,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return req
|
||||||
|
}
|
111
vendor/src/github.com/minio/minio-go/request-headers.go
vendored
Normal file
111
vendor/src/github.com/minio/minio-go/request-headers.go
vendored
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016-17 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RequestHeaders - implement methods for setting special
|
||||||
|
// request headers for GET, HEAD object operations.
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
|
||||||
|
type RequestHeaders struct {
|
||||||
|
http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGetReqHeaders - initializes a new request headers for GET request.
|
||||||
|
func NewGetReqHeaders() RequestHeaders {
|
||||||
|
return RequestHeaders{
|
||||||
|
Header: make(http.Header),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHeadReqHeaders - initializes a new request headers for HEAD request.
|
||||||
|
func NewHeadReqHeaders() RequestHeaders {
|
||||||
|
return RequestHeaders{
|
||||||
|
Header: make(http.Header),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMatchETag - set match etag.
|
||||||
|
func (c RequestHeaders) SetMatchETag(etag string) error {
|
||||||
|
if etag == "" {
|
||||||
|
return ErrInvalidArgument("ETag cannot be empty.")
|
||||||
|
}
|
||||||
|
c.Set("If-Match", etag)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMatchETagExcept - set match etag except.
|
||||||
|
func (c RequestHeaders) SetMatchETagExcept(etag string) error {
|
||||||
|
if etag == "" {
|
||||||
|
return ErrInvalidArgument("ETag cannot be empty.")
|
||||||
|
}
|
||||||
|
c.Set("If-None-Match", etag)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUnmodified - set unmodified time since.
|
||||||
|
func (c RequestHeaders) SetUnmodified(modTime time.Time) error {
|
||||||
|
if modTime.IsZero() {
|
||||||
|
return ErrInvalidArgument("Modified since cannot be empty.")
|
||||||
|
}
|
||||||
|
c.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetModified - set modified time since.
|
||||||
|
func (c RequestHeaders) SetModified(modTime time.Time) error {
|
||||||
|
if modTime.IsZero() {
|
||||||
|
return ErrInvalidArgument("Modified since cannot be empty.")
|
||||||
|
}
|
||||||
|
c.Set("If-Modified-Since", modTime.Format(http.TimeFormat))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRange - set the start and end offset of the object to be read.
|
||||||
|
// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
|
||||||
|
func (c RequestHeaders) SetRange(start, end int64) error {
|
||||||
|
switch {
|
||||||
|
case start == 0 && end < 0:
|
||||||
|
// Read last '-end' bytes. `bytes=-N`.
|
||||||
|
c.Set("Range", fmt.Sprintf("bytes=%d", end))
|
||||||
|
case 0 < start && end == 0:
|
||||||
|
// Read everything starting from offset
|
||||||
|
// 'start'. `bytes=N-`.
|
||||||
|
c.Set("Range", fmt.Sprintf("bytes=%d-", start))
|
||||||
|
case 0 <= start && start <= end:
|
||||||
|
// Read everything starting at 'start' till the
|
||||||
|
// 'end'. `bytes=N-M`
|
||||||
|
c.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
|
||||||
|
default:
|
||||||
|
// All other cases such as
|
||||||
|
// bytes=-3-
|
||||||
|
// bytes=5-3
|
||||||
|
// bytes=-2-4
|
||||||
|
// bytes=-3-0
|
||||||
|
// bytes=-3--2
|
||||||
|
// are invalid.
|
||||||
|
return ErrInvalidArgument(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Invalid range specified: start=%d end=%d",
|
||||||
|
start, end))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
56
vendor/src/github.com/minio/minio-go/request-headers_test.go
vendored
Normal file
56
vendor/src/github.com/minio/minio-go/request-headers_test.go
vendored
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSetHeader(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
start int64
|
||||||
|
end int64
|
||||||
|
errVal error
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{0, 10, nil, "bytes=0-10"},
|
||||||
|
{1, 10, nil, "bytes=1-10"},
|
||||||
|
{5, 0, nil, "bytes=5-"},
|
||||||
|
{0, -5, nil, "bytes=-5"},
|
||||||
|
{0, 0, nil, "bytes=0-0"},
|
||||||
|
{11, 10, fmt.Errorf("Invalid range specified: start=11 end=10"),
|
||||||
|
""},
|
||||||
|
{-1, 10, fmt.Errorf("Invalid range specified: start=-1 end=10"), ""},
|
||||||
|
{-1, 0, fmt.Errorf("Invalid range specified: start=-1 end=0"), ""},
|
||||||
|
{1, -5, fmt.Errorf("Invalid range specified: start=1 end=-5"), ""},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
rh := NewGetReqHeaders()
|
||||||
|
err := rh.SetRange(testCase.start, testCase.end)
|
||||||
|
if err == nil && testCase.errVal != nil {
|
||||||
|
t.Errorf("Test %d: Expected to fail with '%v' but it passed",
|
||||||
|
i+1, testCase.errVal)
|
||||||
|
} else if err != nil && testCase.errVal.Error() != err.Error() {
|
||||||
|
t.Errorf("Test %d: Expected error '%v' but got error '%v'",
|
||||||
|
i+1, testCase.errVal, err)
|
||||||
|
} else if err == nil && rh.Get("Range") != testCase.expected {
|
||||||
|
t.Errorf("Test %d: Expected range header '%s', but got '%s'",
|
||||||
|
i+1, testCase.expected, rh.Get("Range"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
18
vendor/src/github.com/minio/minio-go/retry.go
vendored
18
vendor/src/github.com/minio/minio-go/retry.go
vendored
@ -33,8 +33,16 @@ const MaxJitter = 1.0
|
|||||||
// NoJitter disables the use of jitter for randomizing the exponential backoff time
|
// NoJitter disables the use of jitter for randomizing the exponential backoff time
|
||||||
const NoJitter = 0.0
|
const NoJitter = 0.0
|
||||||
|
|
||||||
// newRetryTimer creates a timer with exponentially increasing delays
|
// DefaultRetryUnit - default unit multiplicative per retry.
|
||||||
// until the maximum retry attempts are reached.
|
// defaults to 1 second.
|
||||||
|
const DefaultRetryUnit = time.Second
|
||||||
|
|
||||||
|
// DefaultRetryCap - Each retry attempt never waits no longer than
|
||||||
|
// this maximum time duration.
|
||||||
|
const DefaultRetryCap = time.Second * 30
|
||||||
|
|
||||||
|
// newRetryTimer creates a timer with exponentially increasing
|
||||||
|
// delays until the maximum retry attempts are reached.
|
||||||
func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
|
func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
|
||||||
attemptCh := make(chan int)
|
attemptCh := make(chan int)
|
||||||
|
|
||||||
@ -78,6 +86,9 @@ func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duratio
|
|||||||
|
|
||||||
// isNetErrorRetryable - is network error retryable.
|
// isNetErrorRetryable - is network error retryable.
|
||||||
func isNetErrorRetryable(err error) bool {
|
func isNetErrorRetryable(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
switch err.(type) {
|
switch err.(type) {
|
||||||
case net.Error:
|
case net.Error:
|
||||||
switch err.(type) {
|
switch err.(type) {
|
||||||
@ -96,6 +107,9 @@ func isNetErrorRetryable(err error) bool {
|
|||||||
} else if strings.Contains(err.Error(), "i/o timeout") {
|
} else if strings.Contains(err.Error(), "i/o timeout") {
|
||||||
// If error is - tcp timeoutError, retry.
|
// If error is - tcp timeoutError, retry.
|
||||||
return true
|
return true
|
||||||
|
} else if strings.Contains(err.Error(), "connection timed out") {
|
||||||
|
// If err is a net.Dial timeout, retry.
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
60
vendor/src/github.com/minio/minio-go/s3-error.go
vendored
Normal file
60
vendor/src/github.com/minio/minio-go/s3-error.go
vendored
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
// Non exhaustive list of AWS S3 standard error responses -
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||||
|
var s3ErrorResponseMap = map[string]string{
|
||||||
|
"AccessDenied": "Access Denied.",
|
||||||
|
"BadDigest": "The Content-Md5 you specified did not match what we received.",
|
||||||
|
"EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.",
|
||||||
|
"EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.",
|
||||||
|
"IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.",
|
||||||
|
"InternalError": "We encountered an internal error, please try again.",
|
||||||
|
"InvalidAccessKeyID": "The access key ID you provided does not exist in our records.",
|
||||||
|
"InvalidBucketName": "The specified bucket is not valid.",
|
||||||
|
"InvalidDigest": "The Content-Md5 you specified is not valid.",
|
||||||
|
"InvalidRange": "The requested range is not satisfiable",
|
||||||
|
"MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.",
|
||||||
|
"MissingContentLength": "You must provide the Content-Length HTTP header.",
|
||||||
|
"MissingContentMD5": "Missing required header for this request: Content-Md5.",
|
||||||
|
"MissingRequestBodyError": "Request body is empty.",
|
||||||
|
"NoSuchBucket": "The specified bucket does not exist",
|
||||||
|
"NoSuchBucketPolicy": "The bucket policy does not exist",
|
||||||
|
"NoSuchKey": "The specified key does not exist.",
|
||||||
|
"NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
|
||||||
|
"NotImplemented": "A header you provided implies functionality that is not implemented",
|
||||||
|
"PreconditionFailed": "At least one of the pre-conditions you specified did not hold",
|
||||||
|
"RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.",
|
||||||
|
"SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
|
||||||
|
"MethodNotAllowed": "The specified method is not allowed against this resource.",
|
||||||
|
"InvalidPart": "One or more of the specified parts could not be found.",
|
||||||
|
"InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
|
||||||
|
"InvalidObjectState": "The operation is not valid for the current state of the object.",
|
||||||
|
"AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.",
|
||||||
|
"MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.",
|
||||||
|
"BucketNotEmpty": "The bucket you tried to delete is not empty",
|
||||||
|
"AllAccessDisabled": "All access to this bucket has been disabled.",
|
||||||
|
"MalformedPolicy": "Policy has invalid resource.",
|
||||||
|
"MissingFields": "Missing fields in request.",
|
||||||
|
"AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".",
|
||||||
|
"MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
|
||||||
|
"BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.",
|
||||||
|
"InvalidDuration": "Duration provided in the request is invalid.",
|
||||||
|
"XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.",
|
||||||
|
// Add new API errors here.
|
||||||
|
}
|
@ -24,8 +24,11 @@ const (
|
|||||||
Latest SignatureType = iota
|
Latest SignatureType = iota
|
||||||
SignatureV4
|
SignatureV4
|
||||||
SignatureV2
|
SignatureV2
|
||||||
|
SignatureV4Streaming
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var emptySHA256 = sum256(nil)
|
||||||
|
|
||||||
// isV2 - is signature SignatureV2?
|
// isV2 - is signature SignatureV2?
|
||||||
func (s SignatureType) isV2() bool {
|
func (s SignatureType) isV2() bool {
|
||||||
return s == SignatureV2
|
return s == SignatureV2
|
||||||
@ -35,3 +38,8 @@ func (s SignatureType) isV2() bool {
|
|||||||
func (s SignatureType) isV4() bool {
|
func (s SignatureType) isV4() bool {
|
||||||
return s == SignatureV4 || s == Latest
|
return s == SignatureV4 || s == Latest
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isStreamingV4 - is signature SignatureV4Streaming?
|
||||||
|
func (s SignatureType) isStreamingV4() bool {
|
||||||
|
return s == SignatureV4Streaming
|
||||||
|
}
|
||||||
|
@ -57,13 +57,12 @@ func TestGetEndpointURL(t *testing.T) {
|
|||||||
{"s3.cn-north-1.amazonaws.com.cn", false, "http://s3.cn-north-1.amazonaws.com.cn", nil, true},
|
{"s3.cn-north-1.amazonaws.com.cn", false, "http://s3.cn-north-1.amazonaws.com.cn", nil, true},
|
||||||
{"192.168.1.1:9000", false, "http://192.168.1.1:9000", nil, true},
|
{"192.168.1.1:9000", false, "http://192.168.1.1:9000", nil, true},
|
||||||
{"192.168.1.1:9000", true, "https://192.168.1.1:9000", nil, true},
|
{"192.168.1.1:9000", true, "https://192.168.1.1:9000", nil, true},
|
||||||
{"192.168.1.1::9000", false, "", fmt.Errorf("too many colons in address %s", "192.168.1.1::9000"), false},
|
{"13333.123123.-", true, "", ErrInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-")), false},
|
||||||
{"13333.123123.-", true, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-"), false},
|
{"13333.123123.-", true, "", ErrInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-")), false},
|
||||||
{"13333.123123.-", true, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-"), false},
|
{"s3.amazonaws.com:443", true, "", ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
|
||||||
{"s3.amazonaws.com:443", true, "", fmt.Errorf("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
|
{"storage.googleapis.com:4000", true, "", ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
|
||||||
{"storage.googleapis.com:4000", true, "", fmt.Errorf("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
|
{"s3.aamzza.-", true, "", ErrInvalidArgument(fmt.Sprintf("Endpoint: %s does not follow ip address or domain name standards.", "s3.aamzza.-")), false},
|
||||||
{"s3.aamzza.-", true, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "s3.aamzza.-"), false},
|
{"", true, "", ErrInvalidArgument("Endpoint: does not follow ip address or domain name standards."), false},
|
||||||
{"", true, "", fmt.Errorf("Endpoint: does not follow ip address or domain name standards."), false},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
@ -98,17 +97,17 @@ func TestIsValidEndpointURL(t *testing.T) {
|
|||||||
// Flag indicating whether the test is expected to pass or not.
|
// Flag indicating whether the test is expected to pass or not.
|
||||||
shouldPass bool
|
shouldPass bool
|
||||||
}{
|
}{
|
||||||
{"", fmt.Errorf("Endpoint url cannot be empty."), false},
|
{"", ErrInvalidArgument("Endpoint url cannot be empty."), false},
|
||||||
{"/", nil, true},
|
{"/", nil, true},
|
||||||
{"https://s3.am1;4205;0cazonaws.com", nil, true},
|
{"https://s3.am1;4205;0cazonaws.com", nil, true},
|
||||||
{"https://s3.cn-north-1.amazonaws.com.cn", nil, true},
|
{"https://s3.cn-north-1.amazonaws.com.cn", nil, true},
|
||||||
{"https://s3.amazonaws.com/", nil, true},
|
{"https://s3.amazonaws.com/", nil, true},
|
||||||
{"https://storage.googleapis.com/", nil, true},
|
{"https://storage.googleapis.com/", nil, true},
|
||||||
{"192.168.1.1", fmt.Errorf("Endpoint url cannot have fully qualified paths."), false},
|
{"192.168.1.1", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false},
|
||||||
{"https://amazon.googleapis.com/", fmt.Errorf("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
|
{"https://amazon.googleapis.com/", ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
|
||||||
{"https://storage.googleapis.com/bucket/", fmt.Errorf("Endpoint url cannot have fully qualified paths."), false},
|
{"https://storage.googleapis.com/bucket/", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false},
|
||||||
{"https://z3.amazonaws.com", fmt.Errorf("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
|
{"https://z3.amazonaws.com", ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
|
||||||
{"https://s3.amazonaws.com/bucket/object", fmt.Errorf("Endpoint url cannot have fully qualified paths."), false},
|
{"https://s3.amazonaws.com/bucket/object", ErrInvalidArgument("Endpoint url cannot have fully qualified paths."), false},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
@ -149,9 +148,9 @@ func TestIsValidExpiry(t *testing.T) {
|
|||||||
// Flag to indicate whether the test should pass.
|
// Flag to indicate whether the test should pass.
|
||||||
shouldPass bool
|
shouldPass bool
|
||||||
}{
|
}{
|
||||||
{100 * time.Millisecond, fmt.Errorf("Expires cannot be lesser than 1 second."), false},
|
{100 * time.Millisecond, ErrInvalidArgument("Expires cannot be lesser than 1 second."), false},
|
||||||
{604801 * time.Second, fmt.Errorf("Expires cannot be greater than 7 days."), false},
|
{604801 * time.Second, ErrInvalidArgument("Expires cannot be greater than 7 days."), false},
|
||||||
{0 * time.Second, fmt.Errorf("Expires cannot be lesser than 1 second."), false},
|
{0 * time.Second, ErrInvalidArgument("Expires cannot be lesser than 1 second."), false},
|
||||||
{1 * time.Second, nil, true},
|
{1 * time.Second, nil, true},
|
||||||
{10000 * time.Second, nil, true},
|
{10000 * time.Second, nil, true},
|
||||||
{999 * time.Second, nil, true},
|
{999 * time.Second, nil, true},
|
||||||
|
Loading…
Reference in New Issue
Block a user