mirror of
https://github.com/octoleo/restic.git
synced 2024-11-02 03:42:31 +00:00
commit
0d3674245b
@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"restic"
|
||||
@ -16,7 +14,6 @@ import (
|
||||
"restic/errors"
|
||||
|
||||
"github.com/minio/minio-go"
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
|
||||
"restic/debug"
|
||||
)
|
||||
@ -88,7 +85,15 @@ func Open(cfg Config) (restic.Backend, error) {
|
||||
// IsNotExist returns true if the error is caused by a not existing file.
|
||||
func (be *Backend) IsNotExist(err error) bool {
|
||||
debug.Log("IsNotExist(%T, %#v)", err, err)
|
||||
return os.IsNotExist(err)
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
return true
|
||||
}
|
||||
|
||||
if e, ok := errors.Cause(err).(minio.ErrorResponse); ok && e.Code == "NoSuchKey" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Join combines path components with slashes.
|
||||
@ -162,17 +167,51 @@ func (be *Backend) Path() string {
|
||||
return be.cfg.Prefix
|
||||
}
|
||||
|
||||
func (be *Backend) isGoogleCloudStorage() bool {
|
||||
scheme := "https://"
|
||||
if be.cfg.UseHTTP {
|
||||
scheme = "http://"
|
||||
}
|
||||
url, err := url.Parse(scheme + be.cfg.Endpoint)
|
||||
// nopCloserFile wraps *os.File and overwrites the Close() method with method
|
||||
// that does nothing. In addition, the method Len() is implemented, which
|
||||
// returns the size of the file (filesize - current offset).
|
||||
type nopCloserFile struct {
|
||||
*os.File
|
||||
}
|
||||
|
||||
func (f nopCloserFile) Close() error {
|
||||
debug.Log("prevented Close()")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Len returns the remaining length of the file (filesize - current offset).
|
||||
func (f nopCloserFile) Len() int {
|
||||
debug.Log("Len() called")
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return s3utils.IsGoogleEndpoint(*url)
|
||||
pos, err := f.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
size := fi.Size() - pos
|
||||
debug.Log("returning file size %v", size)
|
||||
return int(size)
|
||||
}
|
||||
|
||||
type lenner interface {
|
||||
Len() int
|
||||
io.Reader
|
||||
}
|
||||
|
||||
// nopCloserLenner wraps a lenner and overwrites the Close() method with method
|
||||
// that does nothing. In addition, the method Size() is implemented, which
|
||||
// returns the size of the file (filesize - current offset).
|
||||
type nopCloserLenner struct {
|
||||
lenner
|
||||
}
|
||||
|
||||
func (f *nopCloserLenner) Close() error {
|
||||
debug.Log("prevented Close()")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
@ -192,9 +231,15 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err
|
||||
return errors.New("key already exists")
|
||||
}
|
||||
|
||||
// prevent GCS from closing the file
|
||||
if be.isGoogleCloudStorage() {
|
||||
rd = ioutil.NopCloser(rd)
|
||||
// prevent the HTTP client from closing a file
|
||||
if f, ok := rd.(*os.File); ok {
|
||||
debug.Log("reader is %#T, using nopCloserFile{}", rd)
|
||||
rd = nopCloserFile{f}
|
||||
} else if l, ok := rd.(lenner); ok {
|
||||
debug.Log("reader is %#T, using nopCloserLenner{}", rd)
|
||||
rd = nopCloserLenner{l}
|
||||
} else {
|
||||
debug.Log("reader is %#T, no specific workaround enabled", rd)
|
||||
}
|
||||
|
||||
be.sem.GetToken()
|
||||
|
@ -241,8 +241,8 @@ func (s *Suite) TestLoad(t *testing.T) {
|
||||
|
||||
type errorCloser struct {
|
||||
io.Reader
|
||||
size int64
|
||||
t testing.TB
|
||||
l int
|
||||
t testing.TB
|
||||
}
|
||||
|
||||
func (ec errorCloser) Close() error {
|
||||
@ -250,8 +250,8 @@ func (ec errorCloser) Close() error {
|
||||
return errors.New("forbidden method close was called")
|
||||
}
|
||||
|
||||
func (ec errorCloser) Size() int64 {
|
||||
return ec.size
|
||||
func (ec errorCloser) Len() int {
|
||||
return ec.l
|
||||
}
|
||||
|
||||
// TestSave tests saving data in the backend.
|
||||
@ -325,7 +325,7 @@ func (s *Suite) TestSave(t *testing.T) {
|
||||
|
||||
// wrap the tempfile in an errorCloser, so we can detect if the backend
|
||||
// closes the reader
|
||||
err = b.Save(context.TODO(), h, errorCloser{t: t, size: int64(length), Reader: tmpfile})
|
||||
err = b.Save(context.TODO(), h, errorCloser{t: t, l: length, Reader: tmpfile})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
2
vendor/manifest
vendored
2
vendor/manifest
vendored
@ -46,7 +46,7 @@
|
||||
{
|
||||
"importpath": "github.com/minio/minio-go",
|
||||
"repository": "https://github.com/minio/minio-go",
|
||||
"revision": "f2362d9e7d8daf89594ee0a079be2424eaf360be",
|
||||
"revision": "b752793c53c56d2d3f9002dc971e998e08335fc1",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
|
@ -161,6 +161,9 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
||||
if errResp.Region == "" {
|
||||
errResp.Region = resp.Header.Get("x-amz-bucket-region")
|
||||
}
|
||||
if errResp.Code == "InvalidRegion" && errResp.Region != "" {
|
||||
errResp.Message = fmt.Sprintf("Region does not match, expecting region '%s'.", errResp.Region)
|
||||
}
|
||||
|
||||
// Save headers returned in the API XML error
|
||||
errResp.Headers = resp.Header
|
||||
|
@ -69,7 +69,7 @@ func (c Client) ListBuckets() ([]BucketInfo, error) {
|
||||
// // Create a done channel.
|
||||
// doneCh := make(chan struct{})
|
||||
// defer close(doneCh)
|
||||
// // Recurively list all objects in 'mytestbucket'
|
||||
// // Recursively list all objects in 'mytestbucket'
|
||||
// recursive := true
|
||||
// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) {
|
||||
// fmt.Println(message)
|
||||
|
@ -19,19 +19,13 @@ package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
||||
"github.com/minio/minio-go/pkg/credentials"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio-go/pkg/s3signer"
|
||||
)
|
||||
|
||||
/// Bucket operations
|
||||
@ -59,6 +53,11 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
|
||||
// If location is empty, treat is a default region 'us-east-1'.
|
||||
if location == "" {
|
||||
location = "us-east-1"
|
||||
// For custom region clients, default
|
||||
// to custom region instead not 'us-east-1'.
|
||||
if c.region != "" {
|
||||
location = c.region
|
||||
}
|
||||
}
|
||||
|
||||
// Try creating bucket with the provided region, in case of
|
||||
@ -71,99 +70,10 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
|
||||
// Indicate to our routine to exit cleanly upon return.
|
||||
defer close(doneCh)
|
||||
|
||||
// Blank indentifier is kept here on purpose since 'range' without
|
||||
// blank identifiers is only supported since go1.4
|
||||
// https://golang.org/doc/go1.4#forrange.
|
||||
for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
|
||||
// Initialize the makeBucket request.
|
||||
req, err := c.makeBucketRequest(bucketName, location)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Execute make bucket request.
|
||||
resp, err := c.do(req)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err := httpRespToErrorResponse(resp, bucketName, "")
|
||||
errResp := ToErrorResponse(err)
|
||||
if resp.StatusCode == http.StatusBadRequest && errResp.Region != "" {
|
||||
// Fetch bucket region found in headers
|
||||
// of S3 error response, attempt bucket
|
||||
// create again.
|
||||
location = errResp.Region
|
||||
continue
|
||||
}
|
||||
// Nothing to retry, fail.
|
||||
return err
|
||||
}
|
||||
|
||||
// Control reaches here when bucket create was successful,
|
||||
// break out.
|
||||
break
|
||||
}
|
||||
|
||||
// Success.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Low level wrapper API For makeBucketRequest.
|
||||
func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) {
|
||||
// Validate input arguments.
|
||||
if err := isValidBucketName(bucketName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// In case of Amazon S3. The make bucket issued on
|
||||
// already existing bucket would fail with
|
||||
// 'AuthorizationMalformed' error if virtual style is
|
||||
// used. So we default to 'path style' as that is the
|
||||
// preferred method here. The final location of the
|
||||
// 'bucket' is provided through XML LocationConstraint
|
||||
// data with the request.
|
||||
targetURL := c.endpointURL
|
||||
targetURL.Path = path.Join(bucketName, "") + "/"
|
||||
|
||||
// get a new HTTP request for the method.
|
||||
req, err := http.NewRequest("PUT", targetURL.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set UserAgent for the request.
|
||||
c.setUserAgent(req)
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
value, err := c.credsProvider.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
signerType = value.SignerType
|
||||
accessKeyID = value.AccessKeyID
|
||||
secretAccessKey = value.SecretAccessKey
|
||||
sessionToken = value.SessionToken
|
||||
)
|
||||
|
||||
// Custom signer set then override the behavior.
|
||||
if c.overrideSignerType != credentials.SignatureDefault {
|
||||
signerType = c.overrideSignerType
|
||||
}
|
||||
|
||||
// If signerType returned by credentials helper is anonymous,
|
||||
// then do not sign regardless of signerType override.
|
||||
if value.SignerType == credentials.SignatureAnonymous {
|
||||
signerType = credentials.SignatureAnonymous
|
||||
}
|
||||
|
||||
// set sha256 sum for signature calculation only with signature version '4'.
|
||||
if signerType.IsV4() {
|
||||
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
|
||||
// PUT bucket request metadata.
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
bucketLocation: location,
|
||||
}
|
||||
|
||||
// If location is not 'us-east-1' create bucket location config.
|
||||
@ -173,30 +83,29 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
|
||||
var createBucketConfigBytes []byte
|
||||
createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
|
||||
req.Body = ioutil.NopCloser(createBucketConfigBuffer)
|
||||
req.ContentLength = int64(len(createBucketConfigBytes))
|
||||
// Set content-md5.
|
||||
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
|
||||
if signerType.IsV4() {
|
||||
// Set sha256.
|
||||
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
|
||||
reqMetadata.contentMD5Bytes = sumMD5(createBucketConfigBytes)
|
||||
reqMetadata.contentSHA256Bytes = sum256(createBucketConfigBytes)
|
||||
reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
|
||||
reqMetadata.contentLength = int64(len(createBucketConfigBytes))
|
||||
}
|
||||
|
||||
// Execute PUT to create a new bucket.
|
||||
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
|
||||
// Sign the request.
|
||||
if signerType.IsV4() {
|
||||
// Signature calculated for MakeBucket request should be for 'us-east-1',
|
||||
// regardless of the bucket's location constraint.
|
||||
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
|
||||
} else if signerType.IsV2() {
|
||||
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
|
||||
}
|
||||
|
||||
// Return signed request.
|
||||
return req, nil
|
||||
// Success.
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBucketPolicy set the access permissions on an existing bucket.
|
||||
|
@ -1,299 +0,0 @@
|
||||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* (C) 2015, 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio-go/pkg/credentials"
|
||||
"github.com/minio/minio-go/pkg/s3signer"
|
||||
)
|
||||
|
||||
// Tests validate http request formulated for creation of bucket.
|
||||
func TestMakeBucketRequest(t *testing.T) {
|
||||
// Generates expected http request for bucket creation.
|
||||
// Used for asserting with the actual request generated.
|
||||
createExpectedRequest := func(c *Client, bucketName string, location string, req *http.Request) (*http.Request, error) {
|
||||
targetURL := c.endpointURL
|
||||
targetURL.Path = path.Join(bucketName, "") + "/"
|
||||
|
||||
// get a new HTTP request for the method.
|
||||
var err error
|
||||
req, err = http.NewRequest("PUT", targetURL.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set UserAgent for the request.
|
||||
c.setUserAgent(req)
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
value, err := c.credsProvider.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
signerType = value.SignerType
|
||||
accessKeyID = value.AccessKeyID
|
||||
secretAccessKey = value.SecretAccessKey
|
||||
sessionToken = value.SessionToken
|
||||
)
|
||||
|
||||
// Custom signer set then override the behavior.
|
||||
if c.overrideSignerType != credentials.SignatureDefault {
|
||||
signerType = c.overrideSignerType
|
||||
}
|
||||
|
||||
// If signerType returned by credentials helper is anonymous,
|
||||
// then do not sign regardless of signerType override.
|
||||
if value.SignerType == credentials.SignatureAnonymous {
|
||||
signerType = credentials.SignatureAnonymous
|
||||
}
|
||||
|
||||
// set sha256 sum for signature calculation only with signature version '4'.
|
||||
if signerType.IsV4() {
|
||||
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
|
||||
}
|
||||
|
||||
// If location is not 'us-east-1' create bucket location config.
|
||||
if location != "us-east-1" && location != "" {
|
||||
createBucketConfig := createBucketConfiguration{}
|
||||
createBucketConfig.Location = location
|
||||
var createBucketConfigBytes []byte
|
||||
createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
|
||||
req.Body = ioutil.NopCloser(createBucketConfigBuffer)
|
||||
req.ContentLength = int64(len(createBucketConfigBytes))
|
||||
// Set content-md5.
|
||||
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
|
||||
if signerType.IsV4() {
|
||||
// Set sha256.
|
||||
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
|
||||
}
|
||||
}
|
||||
|
||||
// Sign the request.
|
||||
if signerType.IsV4() {
|
||||
// Signature calculated for MakeBucket request should be for 'us-east-1',
|
||||
// regardless of the bucket's location constraint.
|
||||
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
|
||||
} else if signerType.IsV2() {
|
||||
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
|
||||
}
|
||||
|
||||
// Return signed request.
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Get Request body.
|
||||
getReqBody := func(reqBody io.ReadCloser) (string, error) {
|
||||
contents, err := ioutil.ReadAll(reqBody)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(contents), nil
|
||||
}
|
||||
|
||||
// Info for 'Client' creation.
|
||||
// Will be used as arguments for 'NewClient'.
|
||||
type infoForClient struct {
|
||||
endPoint string
|
||||
accessKey string
|
||||
secretKey string
|
||||
enableInsecure bool
|
||||
}
|
||||
// dataset for 'NewClient' call.
|
||||
info := []infoForClient{
|
||||
// endpoint localhost.
|
||||
// both access-key and secret-key are empty.
|
||||
{"localhost:9000", "", "", false},
|
||||
// both access-key are secret-key exists.
|
||||
{"localhost:9000", "my-access-key", "my-secret-key", false},
|
||||
// one of acess-key and secret-key are empty.
|
||||
{"localhost:9000", "", "my-secret-key", false},
|
||||
|
||||
// endpoint amazon s3.
|
||||
{"s3.amazonaws.com", "", "", false},
|
||||
{"s3.amazonaws.com", "my-access-key", "my-secret-key", false},
|
||||
{"s3.amazonaws.com", "my-acess-key", "", false},
|
||||
|
||||
// endpoint google cloud storage.
|
||||
{"storage.googleapis.com", "", "", false},
|
||||
{"storage.googleapis.com", "my-access-key", "my-secret-key", false},
|
||||
{"storage.googleapis.com", "", "my-secret-key", false},
|
||||
|
||||
// endpoint custom domain running Minio server.
|
||||
{"play.minio.io", "", "", false},
|
||||
{"play.minio.io", "my-access-key", "my-secret-key", false},
|
||||
{"play.minio.io", "my-acess-key", "", false},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
location string
|
||||
// data for new client creation.
|
||||
info infoForClient
|
||||
// error in the output.
|
||||
err error
|
||||
// flag indicating whether tests should pass.
|
||||
shouldPass bool
|
||||
}{
|
||||
// Test cases with Invalid bucket name.
|
||||
{".mybucket", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
|
||||
{"mybucket.", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
|
||||
{"mybucket-", "", infoForClient{}, ErrInvalidBucketName("Bucket name contains invalid characters."), false},
|
||||
{"my", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters."), false},
|
||||
{"", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot be empty."), false},
|
||||
{"my..bucket", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot have successive periods."), false},
|
||||
|
||||
// Test case with all valid values for S3 bucket location.
|
||||
// Client is constructed using the info struct.
|
||||
// case with empty location.
|
||||
{"my-bucket", "", info[0], nil, true},
|
||||
// case with location set to standard 'us-east-1'.
|
||||
{"my-bucket", "us-east-1", info[0], nil, true},
|
||||
// case with location set to a value different from 'us-east-1'.
|
||||
{"my-bucket", "eu-central-1", info[0], nil, true},
|
||||
|
||||
{"my-bucket", "", info[1], nil, true},
|
||||
{"my-bucket", "us-east-1", info[1], nil, true},
|
||||
{"my-bucket", "eu-central-1", info[1], nil, true},
|
||||
|
||||
{"my-bucket", "", info[2], nil, true},
|
||||
{"my-bucket", "us-east-1", info[2], nil, true},
|
||||
{"my-bucket", "eu-central-1", info[2], nil, true},
|
||||
|
||||
{"my-bucket", "", info[3], nil, true},
|
||||
{"my-bucket", "us-east-1", info[3], nil, true},
|
||||
{"my-bucket", "eu-central-1", info[3], nil, true},
|
||||
|
||||
{"my-bucket", "", info[4], nil, true},
|
||||
{"my-bucket", "us-east-1", info[4], nil, true},
|
||||
{"my-bucket", "eu-central-1", info[4], nil, true},
|
||||
|
||||
{"my-bucket", "", info[5], nil, true},
|
||||
{"my-bucket", "us-east-1", info[5], nil, true},
|
||||
{"my-bucket", "eu-central-1", info[5], nil, true},
|
||||
|
||||
{"my-bucket", "", info[6], nil, true},
|
||||
{"my-bucket", "us-east-1", info[6], nil, true},
|
||||
{"my-bucket", "eu-central-1", info[6], nil, true},
|
||||
|
||||
{"my-bucket", "", info[7], nil, true},
|
||||
{"my-bucket", "us-east-1", info[7], nil, true},
|
||||
{"my-bucket", "eu-central-1", info[7], nil, true},
|
||||
|
||||
{"my-bucket", "", info[8], nil, true},
|
||||
{"my-bucket", "us-east-1", info[8], nil, true},
|
||||
{"my-bucket", "eu-central-1", info[8], nil, true},
|
||||
|
||||
{"my-bucket", "", info[9], nil, true},
|
||||
{"my-bucket", "us-east-1", info[9], nil, true},
|
||||
{"my-bucket", "eu-central-1", info[9], nil, true},
|
||||
|
||||
{"my-bucket", "", info[10], nil, true},
|
||||
{"my-bucket", "us-east-1", info[10], nil, true},
|
||||
{"my-bucket", "eu-central-1", info[10], nil, true},
|
||||
|
||||
{"my-bucket", "", info[11], nil, true},
|
||||
{"my-bucket", "us-east-1", info[11], nil, true},
|
||||
{"my-bucket", "eu-central-1", info[11], nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
// cannot create a newclient with empty endPoint value.
|
||||
// validates and creates a new client only if the endPoint value is not empty.
|
||||
client := &Client{}
|
||||
var err error
|
||||
if testCase.info.endPoint != "" {
|
||||
|
||||
client, err = New(testCase.info.endPoint, testCase.info.accessKey, testCase.info.secretKey, testCase.info.enableInsecure)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create new Client: %s", i+1, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
actualReq, err := client.makeBucketRequest(testCase.bucketName, testCase.location)
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||
}
|
||||
if err == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if err != nil && !testCase.shouldPass {
|
||||
if err.Error() != testCase.err.Error() {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if err == nil && testCase.shouldPass {
|
||||
expectedReq := &http.Request{}
|
||||
expectedReq, err = createExpectedRequest(client, testCase.bucketName, testCase.location, expectedReq)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Expected request Creation failed", i+1)
|
||||
}
|
||||
if expectedReq.Method != actualReq.Method {
|
||||
t.Errorf("Test %d: The expected Request method doesn't match with the actual one", i+1)
|
||||
}
|
||||
if expectedReq.URL.String() != actualReq.URL.String() {
|
||||
t.Errorf("Test %d: Expected the request URL to be '%s', but instead found '%s'", i+1, expectedReq.URL.String(), actualReq.URL.String())
|
||||
}
|
||||
if expectedReq.ContentLength != actualReq.ContentLength {
|
||||
t.Errorf("Test %d: Expected the request body Content-Length to be '%d', but found '%d' instead", i+1, expectedReq.ContentLength, actualReq.ContentLength)
|
||||
}
|
||||
|
||||
if expectedReq.Header.Get("X-Amz-Content-Sha256") != actualReq.Header.Get("X-Amz-Content-Sha256") {
|
||||
t.Errorf("Test %d: 'X-Amz-Content-Sha256' header of the expected request %s doesn't match with that of the actual request %s", i+1, expectedReq.Header.Get("X-Amz-Content-Sha256"), actualReq.Header.Get("X-Amz-Content-Sha256"))
|
||||
}
|
||||
if expectedReq.Header.Get("User-Agent") != actualReq.Header.Get("User-Agent") {
|
||||
t.Errorf("Test %d: Expected 'User-Agent' header to be \"%s\",but found \"%s\" instead", i+1, expectedReq.Header.Get("User-Agent"), actualReq.Header.Get("User-Agent"))
|
||||
}
|
||||
|
||||
if testCase.location != "us-east-1" && testCase.location != "" {
|
||||
expectedContent, err := getReqBody(expectedReq.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Coudln't parse request body", i+1)
|
||||
}
|
||||
actualContent, err := getReqBody(actualReq.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Coudln't parse request body", i+1)
|
||||
}
|
||||
if expectedContent != actualContent {
|
||||
t.Errorf("Test %d: Expected request body doesn't match actual content body", i+1)
|
||||
}
|
||||
if expectedReq.Header.Get("Content-Md5") != actualReq.Header.Get("Content-Md5") {
|
||||
t.Errorf("Test %d: Request body Md5 differs from the expected result", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -77,17 +77,8 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
|
||||
objMetadata["Content-Type"] = []string{contentType}
|
||||
|
||||
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
|
||||
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
|
||||
if s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||
if fileSize > int64(maxSinglePutObjectSize) {
|
||||
return 0, ErrorResponse{
|
||||
Code: "NotImplemented",
|
||||
Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize),
|
||||
Key: objectName,
|
||||
BucketName: bucketName,
|
||||
}
|
||||
}
|
||||
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
|
||||
// Do not compute MD5 for Google Cloud Storage.
|
||||
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||
}
|
||||
|
||||
|
@ -83,20 +83,8 @@ func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.R
|
||||
}
|
||||
|
||||
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
|
||||
// So we fall back to single PUT operation with the maximum limit of 5GiB.
|
||||
if s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||
if size <= -1 {
|
||||
return 0, ErrorResponse{
|
||||
Code: "NotImplemented",
|
||||
Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.",
|
||||
Key: objectName,
|
||||
BucketName: bucketName,
|
||||
}
|
||||
}
|
||||
if size > maxSinglePutObjectSize {
|
||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
}
|
||||
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
|
||||
// Do not compute MD5 for Google Cloud Storage.
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,6 @@
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
@ -167,8 +166,11 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
|
||||
if err := isValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if size > maxSinglePutObjectSize {
|
||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
if size > 0 {
|
||||
readerAt, ok := reader.(io.ReaderAt)
|
||||
if ok {
|
||||
reader = io.NewSectionReader(readerAt, 0, size)
|
||||
}
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset as we
|
||||
@ -214,34 +216,25 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
|
||||
hashAlgos["sha256"] = sha256.New()
|
||||
}
|
||||
|
||||
if size <= minPartSize {
|
||||
// Initialize a new temporary buffer.
|
||||
tmpBuffer := new(bytes.Buffer)
|
||||
size, err = hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, size)
|
||||
reader = bytes.NewReader(tmpBuffer.Bytes())
|
||||
tmpBuffer.Reset()
|
||||
} else {
|
||||
// Initialize a new temporary file.
|
||||
var tmpFile *tempFile
|
||||
tmpFile, err = newTempFile("single$-putobject-single")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Seek back to beginning of the temporary file.
|
||||
if _, err = tmpFile.Seek(0, 0); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
reader = tmpFile
|
||||
// Initialize a new temporary file.
|
||||
tmpFile, err := newTempFile("single$-putobject-single")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
|
||||
size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
|
||||
// Return error if its not io.EOF.
|
||||
if err != nil && err != io.EOF {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Seek back to beginning of the temporary file.
|
||||
if _, err = tmpFile.Seek(0, 0); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
reader = tmpFile
|
||||
|
||||
// Execute put object.
|
||||
st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData)
|
||||
if err != nil {
|
||||
@ -270,14 +263,6 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
if size <= -1 {
|
||||
return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName)
|
||||
}
|
||||
|
||||
if size > maxSinglePutObjectSize {
|
||||
return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
}
|
||||
|
||||
// Set headers.
|
||||
customHeader := make(http.Header)
|
||||
|
||||
|
96
vendor/src/github.com/minio/minio-go/api.go
vendored
96
vendor/src/github.com/minio/minio-go/api.go
vendored
@ -26,11 +26,11 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -309,40 +309,6 @@ type requestMetadata struct {
|
||||
contentMD5Bytes []byte
|
||||
}
|
||||
|
||||
// regCred matches credential string in HTTP header
|
||||
var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
|
||||
|
||||
// regCred matches signature string in HTTP header
|
||||
var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
|
||||
|
||||
// Filter out signature value from Authorization header.
|
||||
func (c Client) filterSignature(req *http.Request) {
|
||||
origAuth := req.Header.Get("Authorization")
|
||||
if origAuth != "" {
|
||||
return
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(origAuth, signV4Algorithm) {
|
||||
// Set a temporary redacted auth
|
||||
req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**")
|
||||
return
|
||||
}
|
||||
|
||||
/// Signature V4 authorization header.
|
||||
|
||||
// Strip out accessKeyID from:
|
||||
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
|
||||
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
|
||||
|
||||
// Strip out 256-bit signature from: Signature=<256-bit signature>
|
||||
newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
|
||||
|
||||
// Set a temporary redacted auth
|
||||
req.Header.Set("Authorization", newAuth)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// dumpHTTP - dump HTTP request and response.
|
||||
func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
|
||||
// Starts http dump.
|
||||
@ -352,7 +318,10 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
|
||||
}
|
||||
|
||||
// Filter out Signature field from Authorization header.
|
||||
c.filterSignature(req)
|
||||
origAuth := req.Header.Get("Authorization")
|
||||
if origAuth != "" {
|
||||
req.Header.Set("Authorization", redactSignature(origAuth))
|
||||
}
|
||||
|
||||
// Only display request header.
|
||||
reqTrace, err := httputil.DumpRequestOut(req, false)
|
||||
@ -556,9 +525,14 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
|
||||
// Bucket region if set in error response and the error
|
||||
// code dictates invalid region, we can retry the request
|
||||
// with the new region.
|
||||
if res.StatusCode == http.StatusBadRequest && errResponse.Region != "" {
|
||||
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
|
||||
continue // Retry.
|
||||
//
|
||||
// Additionally we should only retry if bucketLocation and custom
|
||||
// region is empty.
|
||||
if metadata.bucketLocation == "" && c.region == "" {
|
||||
if res.StatusCode == http.StatusBadRequest && errResponse.Region != "" {
|
||||
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
|
||||
continue // Retry.
|
||||
}
|
||||
}
|
||||
|
||||
// Verify if error response code is retryable.
|
||||
@ -584,29 +558,19 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
||||
method = "POST"
|
||||
}
|
||||
|
||||
// Default all requests to "us-east-1" or "cn-north-1" (china region)
|
||||
location := "us-east-1"
|
||||
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
|
||||
// For china specifically we need to set everything to
|
||||
// cn-north-1 for now, there is no easier way until AWS S3
|
||||
// provides a cleaner compatible API across "us-east-1" and
|
||||
// China region.
|
||||
location = "cn-north-1"
|
||||
}
|
||||
|
||||
var location string
|
||||
// Gather location only if bucketName is present.
|
||||
if metadata.bucketName != "" {
|
||||
if metadata.bucketName != "" && metadata.bucketLocation == "" {
|
||||
location, err = c.getBucketLocation(metadata.bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
location = metadata.bucketLocation
|
||||
}
|
||||
|
||||
// Save location.
|
||||
metadata.bucketLocation = location
|
||||
|
||||
// Construct a new target URL.
|
||||
targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.bucketLocation, metadata.queryValues)
|
||||
targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, metadata.queryValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -664,9 +628,11 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
||||
req.Header.Set(k, v[0])
|
||||
}
|
||||
|
||||
// set incoming content-length.
|
||||
if metadata.contentLength > 0 {
|
||||
req.ContentLength = metadata.contentLength
|
||||
// Set incoming content-length.
|
||||
req.ContentLength = metadata.contentLength
|
||||
if req.ContentLength <= -1 {
|
||||
// For unknown content length, we upload using transfer-encoding: chunked.
|
||||
req.TransferEncoding = []string{"chunked"}
|
||||
}
|
||||
|
||||
// set md5Sum for content protection.
|
||||
@ -726,14 +692,26 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
|
||||
host = c.s3AccelerateEndpoint
|
||||
} else {
|
||||
// Fetch new host based on the bucket location.
|
||||
host = getS3Endpoint(bucketLocation)
|
||||
// Do not change the host if the endpoint URL is a FIPS S3 endpoint.
|
||||
if !s3utils.IsAmazonFIPSGovCloudEndpoint(c.endpointURL) {
|
||||
// Fetch new host based on the bucket location.
|
||||
host = getS3Endpoint(bucketLocation)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save scheme.
|
||||
scheme := c.endpointURL.Scheme
|
||||
|
||||
// Strip port 80 and 443 so we won't send these ports in Host header.
|
||||
// The reason is that browsers and curl automatically remove :80 and :443
|
||||
// with the generated presigned urls, then a signature mismatch error.
|
||||
if h, p, err := net.SplitHostPort(host); err == nil {
|
||||
if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
|
||||
host = h
|
||||
}
|
||||
}
|
||||
|
||||
urlStr := scheme + "://" + host + "/"
|
||||
// Make URL only if bucketName is available, otherwise use the
|
||||
// endpoint URL.
|
||||
|
@ -108,6 +108,20 @@ func TestMakeBucketError(t *testing.T) {
|
||||
if err = c.RemoveBucket(bucketName); err != nil {
|
||||
t.Fatal("Error:", err, bucketName)
|
||||
}
|
||||
if err = c.MakeBucket(bucketName+"..-1", "eu-central-1"); err == nil {
|
||||
t.Fatal("Error:", err, bucketName+"..-1")
|
||||
}
|
||||
// Verify valid error response.
|
||||
if ToErrorResponse(err).Code != "InvalidBucketName" {
|
||||
t.Fatal("Error: Invalid error returned by server", err)
|
||||
}
|
||||
if err = c.MakeBucket(bucketName+"AAA-1", "eu-central-1"); err == nil {
|
||||
t.Fatal("Error:", err, bucketName+"..-1")
|
||||
}
|
||||
// Verify valid error response.
|
||||
if ToErrorResponse(err).Code != "InvalidBucketName" {
|
||||
t.Fatal("Error: Invalid error returned by server", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests various bucket supported formats.
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -302,3 +303,56 @@ func TestPartSize(t *testing.T) {
|
||||
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMakeTargetURL - testing makeTargetURL()
|
||||
func TestMakeTargetURL(t *testing.T) {
|
||||
testCases := []struct {
|
||||
addr string
|
||||
secure bool
|
||||
bucketName string
|
||||
objectName string
|
||||
bucketLocation string
|
||||
queryValues map[string][]string
|
||||
expectedURL url.URL
|
||||
expectedErr error
|
||||
}{
|
||||
// Test 1
|
||||
{"localhost:9000", false, "", "", "", nil, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/"}, nil},
|
||||
// Test 2
|
||||
{"localhost", true, "", "", "", nil, url.URL{Host: "localhost", Scheme: "https", Path: "/"}, nil},
|
||||
// Test 3
|
||||
{"localhost:9000", true, "mybucket", "", "", nil, url.URL{Host: "localhost:9000", Scheme: "https", Path: "/mybucket/"}, nil},
|
||||
// Test 4, testing against google storage API
|
||||
{"storage.googleapis.com", true, "mybucket", "", "", nil, url.URL{Host: "mybucket.storage.googleapis.com", Scheme: "https", Path: "/"}, nil},
|
||||
// Test 5, testing against AWS S3 API
|
||||
{"s3.amazonaws.com", true, "mybucket", "myobject", "", nil, url.URL{Host: "mybucket.s3.amazonaws.com", Scheme: "https", Path: "/myobject"}, nil},
|
||||
// Test 6
|
||||
{"localhost:9000", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject"}, nil},
|
||||
// Test 7, testing with query
|
||||
{"localhost:9000", false, "mybucket", "myobject", "", map[string][]string{"param": []string{"val"}}, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject", RawQuery: "param=val"}, nil},
|
||||
// Test 8, testing with port 80
|
||||
{"localhost:80", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost", Scheme: "http", Path: "/mybucket/myobject"}, nil},
|
||||
// Test 9, testing with port 443
|
||||
{"localhost:443", true, "mybucket", "myobject", "", nil, url.URL{Host: "localhost", Scheme: "https", Path: "/mybucket/myobject"}, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
// Initialize a Minio client
|
||||
c, _ := New(testCase.addr, "foo", "bar", testCase.secure)
|
||||
u, err := c.makeTargetURL(testCase.bucketName, testCase.objectName, testCase.bucketLocation, testCase.queryValues)
|
||||
// Check the returned error
|
||||
if testCase.expectedErr == nil && err != nil {
|
||||
t.Fatalf("Test %d: Should succeed but failed with err = %v", i+1, err)
|
||||
}
|
||||
if testCase.expectedErr != nil && err == nil {
|
||||
t.Fatalf("Test %d: Should fail but succeeded", i+1)
|
||||
}
|
||||
if err == nil {
|
||||
// Check if the returned url is equal to what we expect
|
||||
if u.String() != testCase.expectedURL.String() {
|
||||
t.Fatalf("Test %d: Mismatched target url: expected = `%v`, found = `%v`",
|
||||
i+1, testCase.expectedURL.String(), u.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -92,6 +92,12 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
|
||||
// provides a cleaner compatible API across "us-east-1" and
|
||||
// China region.
|
||||
return "cn-north-1", nil
|
||||
} else if s3utils.IsAmazonGovCloudEndpoint(c.endpointURL) {
|
||||
// For us-gov specifically we need to set everything to
|
||||
// us-gov-west-1 for now, there is no easier way until AWS S3
|
||||
// provides a cleaner compatible API across "us-east-1" and
|
||||
// Gov cloud region.
|
||||
return "us-gov-west-1", nil
|
||||
}
|
||||
|
||||
// Region set then no need to fetch bucket location.
|
||||
|
@ -84,10 +84,29 @@ func IsAmazonEndpoint(endpointURL url.URL) bool {
|
||||
if IsAmazonChinaEndpoint(endpointURL) {
|
||||
return true
|
||||
}
|
||||
|
||||
if IsAmazonGovCloudEndpoint(endpointURL) {
|
||||
return true
|
||||
}
|
||||
return endpointURL.Host == "s3.amazonaws.com"
|
||||
}
|
||||
|
||||
// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
|
||||
func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool {
|
||||
if endpointURL == sentinelURL {
|
||||
return false
|
||||
}
|
||||
return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" ||
|
||||
IsAmazonFIPSGovCloudEndpoint(endpointURL))
|
||||
}
|
||||
|
||||
// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint.
|
||||
func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
|
||||
if endpointURL == sentinelURL {
|
||||
return false
|
||||
}
|
||||
return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com"
|
||||
}
|
||||
|
||||
// IsAmazonChinaEndpoint - Match if it is exactly Amazon S3 China endpoint.
|
||||
// Customers who wish to use the new Beijing Region are required
|
||||
// to sign up for a separate set of account credentials unique to
|
||||
|
@ -33,6 +33,7 @@ var awsS3EndpointMap = map[string]string{
|
||||
"ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
|
||||
"ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
|
||||
"sa-east-1": "s3-sa-east-1.amazonaws.com",
|
||||
"us-gov-west-1": "s3-us-gov-west-1.amazonaws.com",
|
||||
"cn-north-1": "s3.cn-north-1.amazonaws.com.cn",
|
||||
}
|
||||
|
||||
|
23
vendor/src/github.com/minio/minio-go/utils.go
vendored
23
vendor/src/github.com/minio/minio-go/utils.go
vendored
@ -227,3 +227,26 @@ func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.
|
||||
}
|
||||
return filteredHeader
|
||||
}
|
||||
|
||||
// regCred matches credential string in HTTP header
|
||||
var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
|
||||
|
||||
// regCred matches signature string in HTTP header
|
||||
var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
|
||||
|
||||
// Redact out signature value from authorization string.
|
||||
func redactSignature(origAuth string) string {
|
||||
if !strings.HasPrefix(origAuth, signV4Algorithm) {
|
||||
// Set a temporary redacted auth
|
||||
return "AWS **REDACTED**:**REDACTED**"
|
||||
}
|
||||
|
||||
/// Signature V4 authorization header.
|
||||
|
||||
// Strip out accessKeyID from:
|
||||
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
|
||||
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
|
||||
|
||||
// Strip out 256-bit signature from: Signature=<256-bit signature>
|
||||
return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
|
||||
}
|
||||
|
@ -23,6 +23,31 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Tests signature redacting function used
|
||||
// in filtering on-wire Authorization header.
|
||||
func TestRedactSignature(t *testing.T) {
|
||||
testCases := []struct {
|
||||
authValue string
|
||||
expectedRedactedAuthValue string
|
||||
}{
|
||||
{
|
||||
authValue: "AWS 1231313:888x000231==",
|
||||
expectedRedactedAuthValue: "AWS **REDACTED**:**REDACTED**",
|
||||
},
|
||||
{
|
||||
authValue: "AWS4-HMAC-SHA256 Credential=12312313/20170613/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=02131231312313213",
|
||||
expectedRedactedAuthValue: "AWS4-HMAC-SHA256 Credential=**REDACTED**/20170613/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=**REDACTED**",
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
redactedAuthValue := redactSignature(testCase.authValue)
|
||||
if redactedAuthValue != testCase.expectedRedactedAuthValue {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.expectedRedactedAuthValue, redactedAuthValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests filter header function by filtering out
|
||||
// some custom header keys.
|
||||
func TestFilterHeader(t *testing.T) {
|
||||
|
Loading…
Reference in New Issue
Block a user