Update minio-go library

This commit is contained in:
Alexander Neumann 2015-12-28 16:19:34 +01:00
parent de933a1d48
commit 3d2a714b5a
12 changed files with 961 additions and 537 deletions

1
Godeps/Godeps.json generated
View File

@ -24,6 +24,7 @@
},
{
"ImportPath": "github.com/minio/minio-go",
"Comment": "v0.2.5-62-g61f6570",
"Rev": "61f6570da0edd761974216c9ed5da485d3cc0c99"
},
{

View File

@ -209,10 +209,14 @@ func (a apiCore) getBucketACL(bucket string) (accessControlPolicy, error) {
if err != nil {
return accessControlPolicy{}, err
}
// In-case of google private bucket policy doesn't have any Grant list
if a.config.Region == "google" {
return policy, nil
}
if policy.AccessControlList.Grant == nil {
errorResponse := ErrorResponse{
Code: "InternalError",
Message: "Access control Grant list is empty, please report this at https://github.com/minio/minio-go/issues",
Message: "Access control Grant list is empty, please report this at https://github.com/minio/minio-go/issues.",
Resource: separator + bucket,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
@ -371,7 +375,7 @@ func (a apiCore) headBucket(bucket string) error {
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied",
Message: "Access Denied.",
Resource: separator + bucket,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
@ -434,7 +438,7 @@ func (a apiCore) deleteBucket(bucket string) error {
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied",
Message: "Access Denied.",
Resource: separator + bucket,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
@ -442,7 +446,7 @@ func (a apiCore) deleteBucket(bucket string) error {
case http.StatusConflict:
errorResponse = ErrorResponse{
Code: "Conflict",
Message: "Bucket not empty",
Message: "Bucket not empty.",
Resource: separator + bucket,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
@ -520,7 +524,9 @@ func (a apiCore) putObjectRequest(bucket, object, contentType string, md5SumByte
return nil, err
}
// set Content-MD5 as base64 encoded md5
r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes))
if md5SumBytes != nil {
r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes))
}
r.Set("Content-Type", contentType)
r.req.ContentLength = size
return r, nil
@ -552,6 +558,13 @@ func (a apiCore) presignedPostPolicy(p *PostPolicy) map[string]string {
t := time.Now().UTC()
r := new(request)
r.config = a.config
if r.config.Signature.isV2() {
policyBase64 := p.base64()
p.formData["policy"] = policyBase64
p.formData["AWSAccessKeyId"] = r.config.AccessKeyID
p.formData["signature"] = r.PostPresignSignatureV2(policyBase64)
return p.formData
}
credential := getCredential(r.config.AccessKeyID, r.config.Region, t)
p.addNewPolicy(policy{"eq", "$x-amz-date", t.Format(iso8601DateFormat)})
p.addNewPolicy(policy{"eq", "$x-amz-algorithm", authHeader})
@ -562,7 +575,7 @@ func (a apiCore) presignedPostPolicy(p *PostPolicy) map[string]string {
p.formData["x-amz-algorithm"] = authHeader
p.formData["x-amz-credential"] = credential
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
p.formData["x-amz-signature"] = r.PostPresignSignature(policyBase64, t)
p.formData["x-amz-signature"] = r.PostPresignSignatureV4(policyBase64, t)
return p.formData
}
@ -572,10 +585,13 @@ func (a apiCore) presignedPutObject(bucket, object string, expires int64) (strin
HTTPMethod: "PUT",
HTTPPath: separator + bucket + separator + object,
}
r, err := newPresignedRequest(op, a.config, strconv.FormatInt(expires, 10))
r, err := newPresignedRequest(op, a.config, expires)
if err != nil {
return "", err
}
if r.config.Signature.isV2() {
return r.PreSignV2()
}
return r.PreSignV4()
}
@ -585,7 +601,7 @@ func (a apiCore) presignedGetObjectRequest(bucket, object string, expires, offse
HTTPMethod: "GET",
HTTPPath: separator + bucket + separator + object,
}
r, err := newPresignedRequest(op, a.config, strconv.FormatInt(expires, 10))
r, err := newPresignedRequest(op, a.config, expires)
if err != nil {
return nil, err
}
@ -604,11 +620,14 @@ func (a apiCore) presignedGetObject(bucket, object string, expires, offset, leng
if err := invalidArgumentError(object); err != nil {
return "", err
}
req, err := a.presignedGetObjectRequest(bucket, object, expires, offset, length)
r, err := a.presignedGetObjectRequest(bucket, object, expires, offset, length)
if err != nil {
return "", err
}
return req.PreSignV4()
if r.config.Signature.isV2() {
return r.PreSignV2()
}
return r.PreSignV4()
}
// getObjectRequest wrapper creates a new getObject request
@ -623,12 +642,13 @@ func (a apiCore) getObjectRequest(bucket, object string, offset, length int64) (
return nil, err
}
switch {
case length > 0 && offset > 0:
case length > 0 && offset >= 0:
r.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
case offset > 0 && length == 0:
r.Set("Range", fmt.Sprintf("bytes=%d-", offset))
case length > 0 && offset == 0:
r.Set("Range", fmt.Sprintf("bytes=-%d", length))
// The final length bytes
case length < 0 && offset == 0:
r.Set("Range", fmt.Sprintf("bytes=%d", length))
}
return r, nil
}
@ -638,7 +658,8 @@ func (a apiCore) getObjectRequest(bucket, object string, offset, length int64) (
// Additionally this function also takes range arguments to download the specified
// range bytes of an object. Setting offset and length = 0 will download the full object.
//
// For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
// For more information about the HTTP Range header.
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
func (a apiCore) getObject(bucket, object string, offset, length int64) (io.ReadCloser, ObjectStat, error) {
if err := invalidArgumentError(object); err != nil {
return nil, ObjectStat{}, err
@ -664,7 +685,7 @@ func (a apiCore) getObject(bucket, object string, offset, length int64) (io.Read
if err != nil {
return nil, ObjectStat{}, ErrorResponse{
Code: "InternalError",
Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues",
Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues.",
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
@ -726,7 +747,7 @@ func (a apiCore) deleteObject(bucket, object string) error {
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied",
Message: "Access Denied.",
Resource: separator + bucket + separator + object,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
@ -788,7 +809,7 @@ func (a apiCore) headObject(bucket, object string) (ObjectStat, error) {
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied",
Message: "Access Denied.",
Resource: separator + bucket + separator + object,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
@ -811,7 +832,7 @@ func (a apiCore) headObject(bucket, object string) (ObjectStat, error) {
if err != nil {
return ObjectStat{}, ErrorResponse{
Code: "InternalError",
Message: "Content-Length not recognized, please report this issue at https://github.com/minio/minio-go/issues",
Message: "Content-Length not recognized, please report this issue at https://github.com/minio/minio-go/issues.",
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
@ -820,7 +841,7 @@ func (a apiCore) headObject(bucket, object string) (ObjectStat, error) {
if err != nil {
return ObjectStat{}, ErrorResponse{
Code: "InternalError",
Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues",
Message: "Last-Modified time format not recognized, please report this issue at https://github.com/minio/minio-go/issues.",
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}
@ -867,7 +888,7 @@ func (a apiCore) listBuckets() (listAllMyBucketsResult, error) {
if resp.StatusCode == http.StatusTemporaryRedirect {
return listAllMyBucketsResult{}, ErrorResponse{
Code: "AccessDenied",
Message: "Anonymous access is forbidden for this operation",
Message: "Anonymous access is forbidden for this operation.",
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
}

View File

@ -221,14 +221,14 @@ func (a apiCore) abortMultipartUpload(bucket, object, uploadID string) error {
case http.StatusForbidden:
errorResponse = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied",
Message: "Access Denied.",
Resource: separator + bucket + separator + object,
RequestID: resp.Header.Get("x-amz-request-id"),
}
default:
errorResponse = ErrorResponse{
Code: resp.Status,
Message: "",
Message: "Unknown error, please report this at https://github.com/minio/minio-go-legacy/issues.",
Resource: separator + bucket + separator + object,
RequestID: resp.Header.Get("x-amz-request-id"),
}
@ -299,7 +299,9 @@ func (a apiCore) uploadPartRequest(bucket, object, uploadID string, md5SumBytes
return nil, err
}
// set Content-MD5 as base64 encoded md5
r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes))
if md5SumBytes != nil {
r.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5SumBytes))
}
r.req.ContentLength = size
return r, nil
}

View File

@ -149,6 +149,9 @@ var regions = map[string]string{
"s3-ap-northeast-1.amazonaws.com": "ap-northeast-1",
"s3-sa-east-1.amazonaws.com": "sa-east-1",
"s3.cn-north-1.amazonaws.com.cn": "cn-north-1",
// Add google cloud storage as one of the regions
"storage.googleapis.com": "google",
}
// getRegion returns a region based on its endpoint mapping.
@ -161,12 +164,38 @@ func getRegion(host string) (region string) {
return "milkyway"
}
// SignatureType is type of signature to be used for a request
type SignatureType int
// Different types of supported signatures - default is Latest i.e SignatureV4
const (
Latest SignatureType = iota
SignatureV4
SignatureV2
)
// isV2 - is signature SignatureV2?
func (s SignatureType) isV2() bool {
return s == SignatureV2
}
// isV4 - is signature SignatureV4?
func (s SignatureType) isV4() bool {
return s == SignatureV4
}
// isLatest - is signature Latest?
func (s SignatureType) isLatest() bool {
return s == Latest
}
// Config - main configuration struct used by all to set endpoint, credentials, and other options for requests.
type Config struct {
// Standard options
AccessKeyID string
SecretAccessKey string
Endpoint string
Signature SignatureType
// Advanced options
// Specify this to get server response in non XML style if server supports it
@ -234,13 +263,37 @@ func New(config Config) (API, error) {
hostSplits := strings.SplitN(u.Host, ".", 2)
u.Host = hostSplits[1]
}
matchGoogle, _ := filepath.Match("*.storage.googleapis.com", u.Host)
if matchGoogle {
config.isVirtualStyle = true
hostSplits := strings.SplitN(u.Host, ".", 2)
u.Host = hostSplits[1]
}
config.Region = getRegion(u.Host)
if config.Region == "google" {
// Google cloud storage is signature V2
config.Signature = SignatureV2
}
}
config.SetUserAgent(LibraryName, LibraryVersion, runtime.GOOS, runtime.GOARCH)
config.isUserAgentSet = false // default
return api{apiCore{&config}}, nil
}
// PresignedPostPolicy return POST form data that can be used for object upload
func (a api) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
if p.expiration.IsZero() {
return nil, errors.New("Expiration time must be specified")
}
if _, ok := p.formData["key"]; !ok {
return nil, errors.New("object key must be specified")
}
if _, ok := p.formData["bucket"]; !ok {
return nil, errors.New("bucket name must be specified")
}
return a.presignedPostPolicy(p), nil
}
/// Object operations
/// Expires maximum is 7days - ie. 604800 and minimum is 1
@ -549,20 +602,6 @@ func (a api) continueObjectUpload(bucket, object, uploadID string, size int64, d
return nil
}
// PresignedPostPolicy return POST form data that can be used for object upload
func (a api) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
if p.expiration.IsZero() {
return nil, errors.New("Expiration time must be specified")
}
if _, ok := p.formData["key"]; !ok {
return nil, errors.New("object key must be specified")
}
if _, ok := p.formData["bucket"]; !ok {
return nil, errors.New("bucket name must be specified")
}
return a.presignedPostPolicy(p), nil
}
// PutObject create an object in a bucket
//
// You must have WRITE permissions on a bucket to create an object
@ -588,6 +627,21 @@ func (a api) PutObject(bucket, object, contentType string, size int64, data io.R
return nil
}
}
// Special handling just for Google Cloud Storage.
// TODO - we should remove this in future when we fully implement Resumable object upload.
if a.config.Region == "google" {
if size > maxPartSize {
return ErrorResponse{
Code: "EntityTooLarge",
Message: "Your proposed upload exceeds the maximum allowed object size.",
Resource: separator + bucket + separator + object,
}
}
if _, err := a.putObject(bucket, object, contentType, nil, size, ReadSeekCloser(data)); err != nil {
return err
}
return nil
}
switch {
case size < minimumPartSize && size > 0:
// Single Part use case, use PutObject directly

View File

@ -21,6 +21,24 @@ import (
"testing"
)
func TestSignature(t *testing.T) {
conf := new(Config)
if !conf.Signature.isLatest() {
t.Fatalf("Error")
}
conf.Signature = SignatureV2
if !conf.Signature.isV2() {
t.Fatalf("Error")
}
if conf.Signature.isV4() {
t.Fatalf("Error")
}
conf.Signature = SignatureV4
if !conf.Signature.isV4() {
t.Fatalf("Error")
}
}
func TestACLTypes(t *testing.T) {
want := map[string]bool{
"private": true,

View File

@ -51,7 +51,7 @@ func TestBucketOperations(t *testing.T) {
if err == nil {
t.Fatal("Error")
}
if err.Error() != "Access Denied" {
if err.Error() != "Access Denied." {
t.Fatal("Error")
}

View File

@ -152,7 +152,7 @@ func invalidObjectError(object string) error {
func invalidArgumentError(arg string) error {
errorResponse := ErrorResponse{
Code: "InvalidArgument",
Message: "Invalid Argument",
Message: "Invalid Argument.",
RequestID: "minio",
}
if strings.TrimSpace(arg) == "" || arg == "" {

67
Godeps/_workspace/src/github.com/minio/minio-go/io.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
package minio
import "io"
// ReadSeekCloser wraps an io.Reader returning a ReaderSeekerCloser
func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
return ReaderSeekerCloser{r}
}
// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
// io.Closer interfaces to the underlying object if available.
type ReaderSeekerCloser struct {
r io.Reader
}
// Read reads up to len(p) bytes into p. It returns the number of bytes
// read (0 <= n <= len(p)) and any error encountered. Even if Read
// returns n < len(p), it may use all of p as scratch space during the call.
// If some data is available but not len(p) bytes, Read conventionally
// returns what is available instead of waiting for more.
//
// When Read encounters an error or end-of-file condition after
// successfully reading n > 0 bytes, it returns the number of
// bytes read. It may return the (non-nil) error from the same call
// or return the error (and n == 0) from a subsequent call.
// An instance of this general case is that a Reader returning
// a non-zero number of bytes at the end of the input stream may
// return either err == EOF or err == nil. The next Read should
// return 0, EOF.
func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
switch t := r.r.(type) {
case io.Reader:
return t.Read(p)
}
return 0, nil
}
// Seek sets the offset for the next Read or Write to offset,
// interpreted according to whence: 0 means relative to the start of
// the file, 1 means relative to the current offset, and 2 means
// relative to the end. Seek returns the new offset relative to the
// start of the file and an error, if any.
//
// Seeking to an offset before the start of the file is an error.
//
// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
switch t := r.r.(type) {
case io.Seeker:
return t.Seek(offset, whence)
}
return int64(0), nil
}
// Close closes the ReaderSeekerCloser.
//
// The behavior of Close after the first call is undefined.
// Specific implementations may document their own behavior.
//
// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
func (r ReaderSeekerCloser) Close() error {
switch t := r.r.(type) {
case io.Closer:
return t.Close()
}
return nil
}

View File

@ -0,0 +1,283 @@
package minio
import (
"encoding/hex"
"io"
"io/ioutil"
"net/http"
"regexp"
"strings"
"unicode/utf8"
)
// operation - rest operation
type operation struct {
HTTPServer string
HTTPMethod string
HTTPPath string
}
// request - a http request
type request struct {
req *http.Request
config *Config
body io.ReadSeeker
expires int64
}
// Do - start the request
func (r *request) Do() (resp *http.Response, err error) {
if r.config.AccessKeyID != "" && r.config.SecretAccessKey != "" {
if r.config.Signature.isV2() {
r.SignV2()
}
if r.config.Signature.isV4() || r.config.Signature.isLatest() {
r.SignV4()
}
}
transport := http.DefaultTransport
if r.config.Transport != nil {
transport = r.config.Transport
}
// do not use http.Client{}, while it may seem intuitive but the problem seems to be
// that http.Client{} internally follows redirects and there is no easier way to disable
// it from outside using a configuration parameter -
// this auto redirect causes complications in verifying subsequent errors
//
// The best is to use RoundTrip() directly, so the request comes back to the caller where
// we are going to handle such replies. And indeed that is the right thing to do here.
//
return transport.RoundTrip(r.req)
}
// Set - set additional headers if any
func (r *request) Set(key, value string) {
r.req.Header.Set(key, value)
}
// Get - get header values
func (r *request) Get(key string) string {
return r.req.Header.Get(key)
}
func path2BucketAndObject(path string) (bucketName, objectName string) {
pathSplits := strings.SplitN(path, "?", 2)
splits := strings.SplitN(pathSplits[0], separator, 3)
switch len(splits) {
case 0, 1:
bucketName = ""
objectName = ""
case 2:
bucketName = splits[1]
objectName = ""
case 3:
bucketName = splits[1]
objectName = splits[2]
}
return bucketName, objectName
}
// path2Object gives objectName from URL path
func path2Object(path string) (objectName string) {
_, objectName = path2BucketAndObject(path)
return
}
// path2Bucket gives bucketName from URL path
func path2Bucket(path string) (bucketName string) {
bucketName, _ = path2BucketAndObject(path)
return
}
// path2Query gives query part from URL path
func path2Query(path string) (query string) {
pathSplits := strings.SplitN(path, "?", 2)
if len(pathSplits) > 1 {
query = pathSplits[1]
}
return
}
// getURLEncodedPath encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func getURLEncodedPath(pathName string) string {
// if object matches reserved string, no need to encode them
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
if reservedNames.MatchString(pathName) {
return pathName
}
var encodedPathname string
for _, s := range pathName {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
// if utf8 cannot convert return the same string as is
return pathName
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
}
}
}
return encodedPathname
}
func (op *operation) getRequestURL(config Config) (url string) {
// parse URL for the combination of HTTPServer + HTTPPath
url = op.HTTPServer + separator
if !config.isVirtualStyle {
url += path2Bucket(op.HTTPPath)
}
objectName := getURLEncodedPath(path2Object(op.HTTPPath))
queryPath := path2Query(op.HTTPPath)
if objectName == "" && queryPath != "" {
url += "?" + queryPath
return
}
if objectName != "" && queryPath == "" {
if strings.HasSuffix(url, separator) {
url += objectName
} else {
url += separator + objectName
}
return
}
if objectName != "" && queryPath != "" {
if strings.HasSuffix(url, separator) {
url += objectName + "?" + queryPath
} else {
url += separator + objectName + "?" + queryPath
}
}
return
}
func newPresignedRequest(op *operation, config *Config, expires int64) (*request, error) {
// if no method default to POST
method := op.HTTPMethod
if method == "" {
method = "POST"
}
u := op.getRequestURL(*config)
// get a new HTTP request, for the requested method
req, err := http.NewRequest(method, u, nil)
if err != nil {
return nil, err
}
// set UserAgent
req.Header.Set("User-Agent", config.userAgent)
// set Accept header for response encoding style, if available
if config.AcceptType != "" {
req.Header.Set("Accept", config.AcceptType)
}
// save for subsequent use
r := new(request)
r.config = config
r.expires = expires
r.req = req
r.body = nil
return r, nil
}
// newUnauthenticatedRequest - instantiate a new unauthenticated request
func newUnauthenticatedRequest(op *operation, config *Config, body io.Reader) (*request, error) {
// if no method default to POST
method := op.HTTPMethod
if method == "" {
method = "POST"
}
u := op.getRequestURL(*config)
// get a new HTTP request, for the requested method
req, err := http.NewRequest(method, u, nil)
if err != nil {
return nil, err
}
// set UserAgent
req.Header.Set("User-Agent", config.userAgent)
// set Accept header for response encoding style, if available
if config.AcceptType != "" {
req.Header.Set("Accept", config.AcceptType)
}
// add body
switch {
case body == nil:
req.Body = nil
default:
req.Body = ioutil.NopCloser(body)
}
// save for subsequent use
r := new(request)
r.req = req
r.config = config
return r, nil
}
// newRequest - instantiate a new request
func newRequest(op *operation, config *Config, body io.ReadSeeker) (*request, error) {
// if no method default to POST
method := op.HTTPMethod
if method == "" {
method = "POST"
}
u := op.getRequestURL(*config)
// get a new HTTP request, for the requested method
req, err := http.NewRequest(method, u, nil)
if err != nil {
return nil, err
}
// set UserAgent
req.Header.Set("User-Agent", config.userAgent)
// set Accept header for response encoding style, if available
if config.AcceptType != "" {
req.Header.Set("Accept", config.AcceptType)
}
// add body
switch {
case body == nil:
req.Body = nil
default:
req.Body = ioutil.NopCloser(body)
}
// save for subsequent use
r := new(request)
r.config = config
r.req = req
r.body = body
return r, nil
}

View File

@ -0,0 +1,248 @@
/*
* Minio Go Library for Amazon S3 Legacy v2 Signature Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"errors"
"fmt"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
)
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}
func (r *request) PreSignV2() (string, error) {
if r.config.AccessKeyID == "" || r.config.SecretAccessKey == "" {
return "", errors.New("presign requires accesskey and secretkey")
}
// Add date if not present
d := time.Now().UTC()
if date := r.Get("Date"); date == "" {
r.Set("Date", d.Format(http.TimeFormat))
}
epochExpires := d.Unix() + r.expires
var path string
if r.config.isVirtualStyle {
for k, v := range regions {
if v == r.config.Region {
path = "/" + strings.TrimSuffix(r.req.URL.Host, "."+k)
path += r.req.URL.Path
path = getURLEncodedPath(path)
break
}
}
} else {
path = getURLEncodedPath(r.req.URL.Path)
}
signText := fmt.Sprintf("%s\n\n\n%d\n%s", r.req.Method, epochExpires, path)
hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey))
hm.Write([]byte(signText))
query := r.req.URL.Query()
query.Set("AWSAccessKeyId", r.config.AccessKeyID)
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
query.Set("Signature", base64.StdEncoding.EncodeToString(hm.Sum(nil)))
r.req.URL.RawQuery = query.Encode()
return r.req.URL.String(), nil
}
func (r *request) PostPresignSignatureV2(policyBase64 string) string {
hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey))
hm.Write([]byte(policyBase64))
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
return signature
}
// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
//
// StringToSign = HTTP-Verb + "\n" +
// Content-MD5 + "\n" +
// Content-Type + "\n" +
// Date + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
//
// CanonicalizedResource = [ "/" + Bucket ] +
// <HTTP-Request-URI, from the protocol name up to the query string> +
// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
//
// CanonicalizedProtocolHeaders = <described below>
// SignV2 the request before Do() (version 2.0)
func (r *request) SignV2() {
// Add date if not present
if date := r.Get("Date"); date == "" {
r.Set("Date", time.Now().UTC().Format(http.TimeFormat))
}
// Calculate HMAC for secretAccessKey
hm := hmac.New(sha1.New, []byte(r.config.SecretAccessKey))
hm.Write([]byte(r.getStringToSignV2()))
// prepare auth header
authHeader := new(bytes.Buffer)
authHeader.WriteString(fmt.Sprintf("AWS %s:", r.config.AccessKeyID))
encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
encoder.Write(hm.Sum(nil))
encoder.Close()
// Set Authorization header
r.req.Header.Set("Authorization", authHeader.String())
}
// From the Amazon docs:
//
// StringToSign = HTTP-Verb + "\n" +
// Content-MD5 + "\n" +
// Content-Type + "\n" +
// Date + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
func (r *request) getStringToSignV2() string {
buf := new(bytes.Buffer)
// write standard headers
r.writeDefaultHeaders(buf)
// write canonicalized protocol headers if any
r.writeCanonicalizedHeaders(buf)
// write canonicalized Query resources if any
r.writeCanonicalizedResource(buf)
return buf.String()
}
func (r *request) writeDefaultHeaders(buf *bytes.Buffer) {
buf.WriteString(r.req.Method)
buf.WriteByte('\n')
buf.WriteString(r.req.Header.Get("Content-MD5"))
buf.WriteByte('\n')
buf.WriteString(r.req.Header.Get("Content-Type"))
buf.WriteByte('\n')
buf.WriteString(r.req.Header.Get("Date"))
buf.WriteByte('\n')
}
func (r *request) writeCanonicalizedHeaders(buf *bytes.Buffer) {
var protoHeaders []string
vals := make(map[string][]string)
for k, vv := range r.req.Header {
// all the AMZ and GOOG headers should be lowercase
lk := strings.ToLower(k)
if strings.HasPrefix(lk, "x-amz") {
protoHeaders = append(protoHeaders, lk)
vals[lk] = vv
}
}
sort.Strings(protoHeaders)
for _, k := range protoHeaders {
buf.WriteString(k)
buf.WriteByte(':')
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
if strings.Contains(v, "\n") {
// TODO: "Unfold" long headers that
// span multiple lines (as allowed by
// RFC 2616, section 4.2) by replacing
// the folding white-space (including
// new-line) by a single space.
buf.WriteString(v)
} else {
buf.WriteString(v)
}
}
buf.WriteByte('\n')
}
}
// Must be sorted:
var resourceList = []string{
"acl",
"location",
"logging",
"notification",
"partNumber",
"policy",
"response-content-type",
"response-content-language",
"response-expires",
"response-cache-control",
"response-content-disposition",
"response-content-encoding",
"requestPayment",
"torrent",
"uploadId",
"uploads",
"versionId",
"versioning",
"versions",
"website",
}
// From the Amazon docs:
//
// CanonicalizedResource = [ "/" + Bucket ] +
// <HTTP-Request-URI, from the protocol name up to the query string> +
// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
func (r *request) writeCanonicalizedResource(buf *bytes.Buffer) error {
requestURL := r.req.URL
if r.config.isVirtualStyle {
for k, v := range regions {
if v == r.config.Region {
path := "/" + strings.TrimSuffix(requestURL.Host, "."+k)
path += requestURL.Path
buf.WriteString(getURLEncodedPath(path))
break
}
}
} else {
buf.WriteString(getURLEncodedPath(requestURL.Path))
}
sort.Strings(resourceList)
if requestURL.RawQuery != "" {
var n int
vals, _ := url.ParseQuery(requestURL.RawQuery)
// loop through all the supported resourceList
for _, resource := range resourceList {
if vv, ok := vals[resource]; ok && len(vv) > 0 {
n++
// first element
switch n {
case 1:
buf.WriteByte('?')
// the rest
default:
buf.WriteByte('&')
}
buf.WriteString(resource)
// request parameters
if len(vv[0]) > 0 {
buf.WriteByte('=')
buf.WriteString(url.QueryEscape(vv[0]))
}
}
}
}
return nil
}

View File

@ -0,0 +1,228 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"encoding/hex"
"errors"
"net/http"
"sort"
"strconv"
"strings"
"time"
)
const (
authHeader = "AWS4-HMAC-SHA256"
iso8601DateFormat = "20060102T150405Z"
yyyymmdd = "20060102"
)
///
/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258
///
/// User-Agent:
///
/// This is ignored from signing because signing this causes problems with generating pre-signed URLs
/// (that are executed by other agents) or when customers pass requests through proxies, which may
/// modify the user-agent.
///
/// Content-Length:
///
/// This is ignored from signing because generating a pre-signed URL should not provide a content-length
/// constraint, specifically when vending a S3 pre-signed PUT URL. The corollary to this is that when
/// sending regular requests (non-pre-signed), the signature contains a checksum of the body, which
/// implicitly validates the payload length (since changing the number of bytes would change the checksum)
/// and therefore this header is not valuable in the signature.
///
/// Content-Type:
///
/// Signing this header causes quite a number of problems in browser environments, where browsers
/// like to modify and normalize the content-type header in different ways. There is more information
/// on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding this field simplifies logic
/// and reduces the possibility of future bugs
///
/// Authorization:
///
/// Is skipped for obvious reasons
///
var ignoredHeaders = map[string]bool{
"Authorization": true,
"Content-Type": true,
"Content-Length": true,
"User-Agent": true,
}
// getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload
func (r *request) getHashedPayload() string {
hash := func() string {
switch {
case r.expires != 0:
return "UNSIGNED-PAYLOAD"
case r.body == nil:
return hex.EncodeToString(sum256([]byte{}))
default:
sum256Bytes, _ := sum256Reader(r.body)
return hex.EncodeToString(sum256Bytes)
}
}
hashedPayload := hash()
if hashedPayload != "UNSIGNED-PAYLOAD" {
r.req.Header.Set("X-Amz-Content-Sha256", hashedPayload)
}
return hashedPayload
}
// getCanonicalHeaders generate a list of request headers with their values
func (r *request) getCanonicalHeaders() string {
var headers []string
vals := make(map[string][]string)
for k, vv := range r.req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // ignored header
}
headers = append(headers, strings.ToLower(k))
vals[strings.ToLower(k)] = vv
}
headers = append(headers, "host")
sort.Strings(headers)
var buf bytes.Buffer
for _, k := range headers {
buf.WriteString(k)
buf.WriteByte(':')
switch {
case k == "host":
buf.WriteString(r.req.URL.Host)
fallthrough
default:
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
buf.WriteString(v)
}
buf.WriteByte('\n')
}
}
return buf.String()
}
// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
func (r *request) getSignedHeaders() string {
var headers []string
for k := range r.req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // ignored header
}
headers = append(headers, strings.ToLower(k))
}
headers = append(headers, "host")
sort.Strings(headers)
return strings.Join(headers, ";")
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (r *request) getCanonicalRequest(hashedPayload string) string {
r.req.URL.RawQuery = strings.Replace(r.req.URL.Query().Encode(), "+", "%20", -1)
canonicalRequest := strings.Join([]string{
r.req.Method,
getURLEncodedPath(r.req.URL.Path),
r.req.URL.RawQuery,
r.getCanonicalHeaders(),
r.getSignedHeaders(),
hashedPayload,
}, "\n")
return canonicalRequest
}
// getStringToSign a string based on selected query values
func (r *request) getStringToSignV4(canonicalRequest string, t time.Time) string {
stringToSign := authHeader + "\n" + t.Format(iso8601DateFormat) + "\n"
stringToSign = stringToSign + getScope(r.config.Region, t) + "\n"
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
return stringToSign
}
// Presign the request, in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
func (r *request) PreSignV4() (string, error) {
if r.config.AccessKeyID == "" && r.config.SecretAccessKey == "" {
return "", errors.New("presign requires accesskey and secretkey")
}
r.SignV4()
return r.req.URL.String(), nil
}
func (r *request) PostPresignSignatureV4(policyBase64 string, t time.Time) string {
signingkey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t)
signature := getSignature(signingkey, policyBase64)
return signature
}
// SignV4 the request before Do(), in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
func (r *request) SignV4() {
query := r.req.URL.Query()
if r.expires != 0 {
query.Set("X-Amz-Algorithm", authHeader)
}
t := time.Now().UTC()
// Add date if not present
if r.expires != 0 {
query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
query.Set("X-Amz-Expires", strconv.FormatInt(r.expires, 10))
} else {
r.Set("X-Amz-Date", t.Format(iso8601DateFormat))
}
hashedPayload := r.getHashedPayload()
signedHeaders := r.getSignedHeaders()
if r.expires != 0 {
query.Set("X-Amz-SignedHeaders", signedHeaders)
}
credential := getCredential(r.config.AccessKeyID, r.config.Region, t)
if r.expires != 0 {
query.Set("X-Amz-Credential", credential)
r.req.URL.RawQuery = query.Encode()
}
canonicalRequest := r.getCanonicalRequest(hashedPayload)
stringToSign := r.getStringToSignV4(canonicalRequest, t)
signingKey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t)
signature := getSignature(signingKey, stringToSign)
if r.expires != 0 {
r.req.URL.RawQuery += "&X-Amz-Signature=" + signature
} else {
// final Authorization header
parts := []string{
authHeader + " Credential=" + credential,
"SignedHeaders=" + signedHeaders,
"Signature=" + signature,
}
auth := strings.Join(parts, ", ")
r.Set("Authorization", auth)
}
}

View File

@ -1,498 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"encoding/hex"
"errors"
"io"
"io/ioutil"
"net/http"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
)
// operation - rest operation
type operation struct {
HTTPServer string
HTTPMethod string
HTTPPath string
}
// request - a http request
type request struct {
req *http.Request
config *Config
body io.ReadSeeker
expires string
}
const (
authHeader = "AWS4-HMAC-SHA256"
iso8601DateFormat = "20060102T150405Z"
yyyymmdd = "20060102"
)
///
/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258
///
/// User-Agent:
///
/// This is ignored from signing because signing this causes problems with generating pre-signed URLs
/// (that are executed by other agents) or when customers pass requests through proxies, which may
/// modify the user-agent.
///
/// Content-Length:
///
/// This is ignored from signing because generating a pre-signed URL should not provide a content-length
/// constraint, specifically when vending a S3 pre-signed PUT URL. The corollary to this is that when
/// sending regular requests (non-pre-signed), the signature contains a checksum of the body, which
/// implicitly validates the payload length (since changing the number of bytes would change the checksum)
/// and therefore this header is not valuable in the signature.
///
/// Content-Type:
///
/// Signing this header causes quite a number of problems in browser environments, where browsers
/// like to modify and normalize the content-type header in different ways. There is more information
/// on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding this field simplifies logic
/// and reduces the possibility of future bugs
///
/// Authorization:
///
/// Is skipped for obvious reasons
///
var ignoredHeaders = map[string]bool{
"Authorization": true,
"Content-Type": true,
"Content-Length": true,
"User-Agent": true,
}
// getURLEncodedPath encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func getURLEncodedPath(pathName string) string {
// if object matches reserved string, no need to encode them
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
if reservedNames.MatchString(pathName) {
return pathName
}
var encodedPathname string
for _, s := range pathName {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
// if utf8 cannot convert return the same string as is
return pathName
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
}
}
}
return encodedPathname
}
func path2BucketAndObject(path string) (bucketName, objectName string) {
pathSplits := strings.SplitN(path, "?", 2)
splits := strings.SplitN(pathSplits[0], separator, 3)
switch len(splits) {
case 0, 1:
bucketName = ""
objectName = ""
case 2:
bucketName = splits[1]
objectName = ""
case 3:
bucketName = splits[1]
objectName = splits[2]
}
return bucketName, objectName
}
// path2Object gives objectName from URL path
func path2Object(path string) (objectName string) {
_, objectName = path2BucketAndObject(path)
return
}
// path2Bucket gives bucketName from URL path
func path2Bucket(path string) (bucketName string) {
bucketName, _ = path2BucketAndObject(path)
return
}
// path2Query gives query part from URL path
func path2Query(path string) (query string) {
pathSplits := strings.SplitN(path, "?", 2)
if len(pathSplits) > 1 {
query = pathSplits[1]
}
return
}
func (op *operation) getRequestURL(config Config) (url string) {
// parse URL for the combination of HTTPServer + HTTPPath
url = op.HTTPServer + separator
if !config.isVirtualStyle {
url += path2Bucket(op.HTTPPath)
}
objectName := getURLEncodedPath(path2Object(op.HTTPPath))
queryPath := path2Query(op.HTTPPath)
if objectName == "" && queryPath != "" {
url += "?" + queryPath
return
}
if objectName != "" && queryPath == "" {
if strings.HasSuffix(url, separator) {
url += objectName
} else {
url += separator + objectName
}
return
}
if objectName != "" && queryPath != "" {
if strings.HasSuffix(url, separator) {
url += objectName + "?" + queryPath
} else {
url += separator + objectName + "?" + queryPath
}
}
return
}
func newPresignedRequest(op *operation, config *Config, expires string) (*request, error) {
// if no method default to POST
method := op.HTTPMethod
if method == "" {
method = "POST"
}
u := op.getRequestURL(*config)
// get a new HTTP request, for the requested method
req, err := http.NewRequest(method, u, nil)
if err != nil {
return nil, err
}
// set UserAgent
req.Header.Set("User-Agent", config.userAgent)
// set Accept header for response encoding style, if available
if config.AcceptType != "" {
req.Header.Set("Accept", config.AcceptType)
}
// save for subsequent use
r := new(request)
r.config = config
r.expires = expires
r.req = req
r.body = nil
return r, nil
}
// newUnauthenticatedRequest - instantiate a new unauthenticated request
func newUnauthenticatedRequest(op *operation, config *Config, body io.Reader) (*request, error) {
// if no method default to POST
method := op.HTTPMethod
if method == "" {
method = "POST"
}
u := op.getRequestURL(*config)
// get a new HTTP request, for the requested method
req, err := http.NewRequest(method, u, nil)
if err != nil {
return nil, err
}
// set UserAgent
req.Header.Set("User-Agent", config.userAgent)
// set Accept header for response encoding style, if available
if config.AcceptType != "" {
req.Header.Set("Accept", config.AcceptType)
}
// add body
switch {
case body == nil:
req.Body = nil
default:
req.Body = ioutil.NopCloser(body)
}
// save for subsequent use
r := new(request)
r.req = req
r.config = config
return r, nil
}
// newRequest - instantiate a new request
func newRequest(op *operation, config *Config, body io.ReadSeeker) (*request, error) {
// if no method default to POST
method := op.HTTPMethod
if method == "" {
method = "POST"
}
u := op.getRequestURL(*config)
// get a new HTTP request, for the requested method
req, err := http.NewRequest(method, u, nil)
if err != nil {
return nil, err
}
// set UserAgent
req.Header.Set("User-Agent", config.userAgent)
// set Accept header for response encoding style, if available
if config.AcceptType != "" {
req.Header.Set("Accept", config.AcceptType)
}
// add body
switch {
case body == nil:
req.Body = nil
default:
req.Body = ioutil.NopCloser(body)
}
// save for subsequent use
r := new(request)
r.config = config
r.req = req
r.body = body
return r, nil
}
// Do - start the request
func (r *request) Do() (resp *http.Response, err error) {
if r.config.AccessKeyID != "" && r.config.SecretAccessKey != "" {
r.SignV4()
}
transport := http.DefaultTransport
if r.config.Transport != nil {
transport = r.config.Transport
}
// do not use http.Client{}, while it may seem intuitive but the problem seems to be
// that http.Client{} internally follows redirects and there is no easier way to disable
// it from outside using a configuration parameter -
// this auto redirect causes complications in verifying subsequent errors
//
// The best is to use RoundTrip() directly, so the request comes back to the caller where
// we are going to handle such replies. And indeed that is the right thing to do here.
//
return transport.RoundTrip(r.req)
}
// Set - set additional headers if any
func (r *request) Set(key, value string) {
r.req.Header.Set(key, value)
}
// Get - get header values
func (r *request) Get(key string) string {
return r.req.Header.Get(key)
}
// getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload
func (r *request) getHashedPayload() string {
hash := func() string {
switch {
case r.expires != "":
return "UNSIGNED-PAYLOAD"
case r.body == nil:
return hex.EncodeToString(sum256([]byte{}))
default:
sum256Bytes, _ := sum256Reader(r.body)
return hex.EncodeToString(sum256Bytes)
}
}
hashedPayload := hash()
if hashedPayload != "UNSIGNED-PAYLOAD" {
r.req.Header.Set("X-Amz-Content-Sha256", hashedPayload)
}
return hashedPayload
}
// getCanonicalHeaders generate a list of request headers with their values
func (r *request) getCanonicalHeaders() string {
var headers []string
vals := make(map[string][]string)
for k, vv := range r.req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // ignored header
}
headers = append(headers, strings.ToLower(k))
vals[strings.ToLower(k)] = vv
}
headers = append(headers, "host")
sort.Strings(headers)
var buf bytes.Buffer
for _, k := range headers {
buf.WriteString(k)
buf.WriteByte(':')
switch {
case k == "host":
buf.WriteString(r.req.URL.Host)
fallthrough
default:
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
buf.WriteString(v)
}
buf.WriteByte('\n')
}
}
return buf.String()
}
// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
func (r *request) getSignedHeaders() string {
var headers []string
for k := range r.req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // ignored header
}
headers = append(headers, strings.ToLower(k))
}
headers = append(headers, "host")
sort.Strings(headers)
return strings.Join(headers, ";")
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (r *request) getCanonicalRequest(hashedPayload string) string {
r.req.URL.RawQuery = strings.Replace(r.req.URL.Query().Encode(), "+", "%20", -1)
canonicalRequest := strings.Join([]string{
r.req.Method,
getURLEncodedPath(r.req.URL.Path),
r.req.URL.RawQuery,
r.getCanonicalHeaders(),
r.getSignedHeaders(),
hashedPayload,
}, "\n")
return canonicalRequest
}
// getStringToSign a string based on selected query values
func (r *request) getStringToSign(canonicalRequest string, t time.Time) string {
stringToSign := authHeader + "\n" + t.Format(iso8601DateFormat) + "\n"
stringToSign = stringToSign + getScope(r.config.Region, t) + "\n"
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
return stringToSign
}
// Presign the request, in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
func (r *request) PreSignV4() (string, error) {
if r.config.AccessKeyID == "" && r.config.SecretAccessKey == "" {
return "", errors.New("presign requires accesskey and secretkey")
}
r.SignV4()
return r.req.URL.String(), nil
}
func (r *request) PostPresignSignature(policyBase64 string, t time.Time) string {
signingkey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t)
signature := getSignature(signingkey, policyBase64)
return signature
}
// SignV4 the request before Do(), in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
func (r *request) SignV4() {
query := r.req.URL.Query()
if r.expires != "" {
query.Set("X-Amz-Algorithm", authHeader)
}
t := time.Now().UTC()
// Add date if not present
if r.expires != "" {
query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
query.Set("X-Amz-Expires", r.expires)
} else {
r.Set("X-Amz-Date", t.Format(iso8601DateFormat))
}
hashedPayload := r.getHashedPayload()
signedHeaders := r.getSignedHeaders()
if r.expires != "" {
query.Set("X-Amz-SignedHeaders", signedHeaders)
}
credential := getCredential(r.config.AccessKeyID, r.config.Region, t)
if r.expires != "" {
query.Set("X-Amz-Credential", credential)
r.req.URL.RawQuery = query.Encode()
}
canonicalRequest := r.getCanonicalRequest(hashedPayload)
stringToSign := r.getStringToSign(canonicalRequest, t)
signingKey := getSigningKey(r.config.SecretAccessKey, r.config.Region, t)
signature := getSignature(signingKey, stringToSign)
if r.expires != "" {
r.req.URL.RawQuery += "&X-Amz-Signature=" + signature
} else {
// final Authorization header
parts := []string{
authHeader + " Credential=" + credential,
"SignedHeaders=" + signedHeaders,
"Signature=" + signature,
}
auth := strings.Join(parts, ", ")
r.Set("Authorization", auth)
}
}