2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-02 09:58:25 +00:00

Update minio-go

This commit is contained in:
Alexander Neumann 2016-08-21 16:14:22 +02:00
parent ca14942c80
commit e893be3dec
32 changed files with 4014 additions and 1764 deletions

2
vendor/manifest vendored
View File

@ -34,7 +34,7 @@
{ {
"importpath": "github.com/minio/minio-go", "importpath": "github.com/minio/minio-go",
"repository": "https://github.com/minio/minio-go", "repository": "https://github.com/minio/minio-go",
"revision": "76b385d8c68e7079c5fe6182570a6bd51cb36905", "revision": "9e734013294ab153b0bdbe182738bcddd46f1947",
"branch": "master" "branch": "master"
}, },
{ {

View File

@ -1,21 +1,22 @@
# Minio Golang Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) # Minio Golang Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
The Minio Golang Client SDK provides simple APIs to access any Amazon S3 compatible object storage server. The Minio Golang Client SDK provides simple APIs to access any Amazon S3 compatible object storage server.
**List of supported cloud storage providers.** **Supported cloud storage providers:**
- AWS Signature Version 4 - AWS Signature Version 4
- Amazon S3 - Amazon S3
- Minio - Minio
- AWS Signature Version 2 - AWS Signature Version 2
- Google Cloud Storage (Compatibility Mode) - Google Cloud Storage (Compatibility Mode)
- Openstack Swift + Swift3 middleware - Openstack Swift + Swift3 middleware
- Ceph Object Gateway - Ceph Object Gateway
- Riak CS - Riak CS
This quickstart guide will show you how to install the client SDK and execute an example Golang program. For a complete list of APIs and examples, please take a look at the [Golang Client API Reference](https://docs.minio.io/docs/golang-client-api-reference) documentation. This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough of a simple file uploader. For a complete list of APIs and examples, please take a look at the [Golang Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
This document assumes that you have a working [Golang](https://docs.minio.io/docs/how-to-install-golang) setup in place. This document assumes that you have a working [Golang setup](https://docs.minio.io/docs/how-to-install-golang).
## Download from Github ## Download from Github
@ -27,14 +28,14 @@ $ go get -u github.com/minio/minio-go
``` ```
## Initialize Minio Client ## Initialize Minio Client
You need four items in order to connect to Minio object storage server. You need four items to connect to Minio object storage server.
| Params | Description| | Parameter | Description|
| :--- | :--- | | :--- | :--- |
| endpoint | URL to object storage service. | | endpoint | URL to object storage service. |
| accessKeyID | Access key is like user ID that uniquely identifies your account. | | accessKeyID | Access key is the user ID that uniquely identifies your account. |
| secretAccessKey | Secret key is the password to your account. | | secretAccessKey | Secret key is the password to your account. |
| secure | Set this value to 'true' to enable secure (HTTPS) access. | | secure | Set this value to 'true' to enable secure (HTTPS) access. |
@ -44,23 +45,25 @@ You need four items in order to connect to Minio object storage server.
package main package main
import ( import (
"fmt"
"github.com/minio/minio-go" "github.com/minio/minio-go"
"log"
) )
func main() { func main() {
// Use a secure connection. endpoint := "play.minio.io:9000"
ssl := true accessKeyID := "Q3AM3UQ867SPQQA43P2F"
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
useSSL := true
// Initialize minio client object. // Initialize minio client object.
minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl) minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
if err != nil { if err != nil {
fmt.Println(err) log.Fatalln(err)
return
}
} }
log.Println("%v", minioClient) // minioClient is now setup
``` ```
## Quick Start Example - File Uploader ## Quick Start Example - File Uploader
@ -75,41 +78,54 @@ We will use the Minio server running at [https://play.minio.io:9000](https://pla
#### FileUploader.go #### FileUploader.go
```go ```go
package main package main
import "fmt"
import ( import (
"log"
"github.com/minio/minio-go" "github.com/minio/minio-go"
"log"
) )
func main() { func main() {
// Use a secure connection. endpoint := "play.minio.io:9000"
ssl := true accessKeyID := "Q3AM3UQ867SPQQA43P2F"
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
useSSL := true
// Initialize minio client object. // Initialize minio client object.
minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl) minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
// Make a new bucket called mymusic. // Make a new bucked called mymusic.
err = minioClient.MakeBucket("mymusic", "us-east-1") bucketName := "mymusic"
location := "us-east-1"
err = minioClient.MakeBucket(bucketName, location)
if err != nil {
// Check to see if we already own this bucket (which happens if you run this twice)
exists, err := minioClient.BucketExists(bucketName)
if err == nil && exists {
log.Printf("We already own %s\n", bucketName)
} else {
log.Fatalln(err)
}
}
log.Printf("Successfully created %s\n", bucketName)
// Upload the zip file
objectName := "golden-oldies.zip"
filePath := "/tmp/golden-oldies.zip"
contentType := "application/zip"
// Upload the zip file with FPutObject
n, err := minioClient.FPutObject(bucketName, objectName, filePath, contentType)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
fmt.Println("Successfully created mymusic")
// Upload the zip file with FPutObject. log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
n, err := minioClient.FPutObject("mymusic", "golden-oldies.zip", "/tmp/golden-oldies.zip", "application/zip")
if err != nil {
log.Fatalln(err)
} }
log.Printf("Successfully uploaded golden-oldies.zip of size %d\n", n)
}
``` ```
#### Run FileUploader #### Run FileUploader
@ -117,8 +133,8 @@ func main() {
```sh ```sh
$ go run file-uploader.go $ go run file-uploader.go
$ Successfully created mymusic 2016/08/13 17:03:28 Successfully created mymusic
$ Successfully uploaded golden-oldies.zip of size 17MiB 2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
$ mc ls play/mymusic/ $ mc ls play/mymusic/
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip [2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
@ -150,7 +166,8 @@ The full API Reference is available here.
* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification) * [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification) * [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
* [`DeleteBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#DeleteBucketNotification) * [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
### API Reference : File Object Operations ### API Reference : File Object Operations
@ -194,7 +211,7 @@ The full API Reference is available here.
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) * [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) * [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
* [deletebucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketnotification.go) * [deletebucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketnotification.go)
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
#### Full Examples : File Object Operations #### Full Examples : File Object Operations

View File

@ -97,7 +97,7 @@ func TestHttpRespToErrorResponse(t *testing.T) {
} }
// Generate http response with empty body. // Generate http response with empty body.
// Set the StatusCode to the arugment supplied. // Set the StatusCode to the argument supplied.
// Sets common headers. // Sets common headers.
genEmptyBodyResponse := func(statusCode int) *http.Response { genEmptyBodyResponse := func(statusCode int) *http.Response {
resp := &http.Response{} resp := &http.Response{}

View File

@ -48,7 +48,7 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error {
} }
} }
// Extract top level direcotry. // Extract top level directory.
objectDir, _ := filepath.Split(filePath) objectDir, _ := filepath.Split(filePath)
if objectDir != "" { if objectDir != "" {
// Create any missing top level directories. // Create any missing top level directories.

View File

@ -243,8 +243,8 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
return 0, o.prevErr return 0, o.prevErr
} }
// If offset is negative and offset is greater than or equal to // if offset is greater than or equal to object size we return io.EOF.
// object size we return EOF. // If offset is negative then we return io.EOF.
if offset < 0 || offset >= o.objectInfo.Size { if offset < 0 || offset >= o.objectInfo.Size {
return 0, io.EOF return 0, io.EOF
} }
@ -353,7 +353,12 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
if o.objectInfo.Size+offset < 0 { if o.objectInfo.Size+offset < 0 {
return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
} }
o.currOffset += offset o.currOffset = o.objectInfo.Size + offset
}
// Reset the saved error since we successfully seeked, let the Read
// and ReadAt decide.
if o.prevErr == io.EOF {
o.prevErr = nil
} }
// Return the effective offset. // Return the effective offset.
return o.currOffset, nil return o.currOffset, nil

View File

@ -17,31 +17,32 @@
package minio package minio
import ( import (
"io" "encoding/json"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"sort"
"github.com/minio/minio-go/pkg/policy"
) )
// GetBucketPolicy - get bucket policy at a given path. // GetBucketPolicy - get bucket policy at a given path.
func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy BucketPolicy, err error) { func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) {
// Input validation. // Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return BucketPolicyNone, err return policy.BucketPolicyNone, err
} }
if err := isValidObjectPrefix(objectPrefix); err != nil { if err := isValidObjectPrefix(objectPrefix); err != nil {
return BucketPolicyNone, err return policy.BucketPolicyNone, err
} }
policy, err := c.getBucketPolicy(bucketName, objectPrefix) policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix)
if err != nil { if err != nil {
return BucketPolicyNone, err return policy.BucketPolicyNone, err
} }
return identifyPolicyType(policy, bucketName, objectPrefix), nil return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
} }
// Request server for policy. // Request server for policy.
func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (BucketAccessPolicy, error) { func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (policy.BucketAccessPolicy, error) {
// Get resources properly escaped and lined up before // Get resources properly escaped and lined up before
// using them in http request. // using them in http request.
urlValues := make(url.Values) urlValues := make(url.Values)
@ -55,38 +56,24 @@ func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (BucketA
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return BucketAccessPolicy{}, err return policy.BucketAccessPolicy{}, err
}
return processBucketPolicyResponse(bucketName, resp)
} }
// processes the GetPolicy http response from the server.
func processBucketPolicyResponse(bucketName string, resp *http.Response) (BucketAccessPolicy, error) {
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
errResponse := httpRespToErrorResponse(resp, bucketName, "") errResponse := httpRespToErrorResponse(resp, bucketName, "")
if ToErrorResponse(errResponse).Code == "NoSuchBucketPolicy" { if ToErrorResponse(errResponse).Code == "NoSuchBucketPolicy" {
return BucketAccessPolicy{Version: "2012-10-17"}, nil return policy.BucketAccessPolicy{Version: "2012-10-17"}, nil
} }
return BucketAccessPolicy{}, errResponse return policy.BucketAccessPolicy{}, errResponse
} }
} }
// Read access policy up to maxAccessPolicySize. bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
// http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
// bucket policies are limited to 20KB in size, using a limit reader.
bucketPolicyBuf, err := ioutil.ReadAll(io.LimitReader(resp.Body, maxAccessPolicySize))
if err != nil { if err != nil {
return BucketAccessPolicy{}, err return policy.BucketAccessPolicy{}, err
} }
policy, err := unMarshalBucketPolicy(bucketPolicyBuf)
if err != nil { policy := policy.BucketAccessPolicy{}
return BucketAccessPolicy{}, err err = json.Unmarshal(bucketPolicyBuf, &policy)
} return policy, err
// Sort the policy actions and resources for convenience.
for _, statement := range policy.Statements {
sort.Strings(statement.Actions)
sort.Strings(statement.Resources)
}
return policy, nil
} }

View File

@ -1,102 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"reflect"
"testing"
)
// Mocks valid http response containing bucket policy from server.
func generatePolicyResponse(resp *http.Response, policy BucketAccessPolicy) (*http.Response, error) {
policyBytes, err := json.Marshal(policy)
if err != nil {
return nil, err
}
resp.StatusCode = http.StatusOK
resp.Body = ioutil.NopCloser(bytes.NewBuffer(policyBytes))
return resp, nil
}
// Tests the processing of GetPolicy response from server.
func TestProcessBucketPolicyResopnse(t *testing.T) {
bucketAccesPolicies := []BucketAccessPolicy{
{Version: "1.0"},
{Version: "1.0", Statements: setReadOnlyStatement("minio-bucket", "")},
{Version: "1.0", Statements: setReadWriteStatement("minio-bucket", "Asia/")},
{Version: "1.0", Statements: setWriteOnlyStatement("minio-bucket", "Asia/India/")},
}
APIErrors := []APIError{
{
Code: "NoSuchBucketPolicy",
Description: "The specified bucket does not have a bucket policy.",
HTTPStatusCode: http.StatusNotFound,
},
}
testCases := []struct {
bucketName string
isAPIError bool
apiErr APIError
// expected results.
expectedResult BucketAccessPolicy
err error
// flag indicating whether tests should pass.
shouldPass bool
}{
{"my-bucket", true, APIErrors[0], BucketAccessPolicy{Version: "2012-10-17"}, nil, true},
{"my-bucket", false, APIError{}, bucketAccesPolicies[0], nil, true},
{"my-bucket", false, APIError{}, bucketAccesPolicies[1], nil, true},
{"my-bucket", false, APIError{}, bucketAccesPolicies[2], nil, true},
{"my-bucket", false, APIError{}, bucketAccesPolicies[3], nil, true},
}
for i, testCase := range testCases {
inputResponse := &http.Response{}
var err error
if testCase.isAPIError {
inputResponse = generateErrorResponse(inputResponse, testCase.apiErr, testCase.bucketName)
} else {
inputResponse, err = generatePolicyResponse(inputResponse, testCase.expectedResult)
if err != nil {
t.Fatalf("Test %d: Creation of valid response failed", i+1)
}
}
actualResult, err := processBucketPolicyResponse("my-bucket", inputResponse)
if err != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
}
if err == nil && !testCase.shouldPass {
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
}
// Failed as expected, but does it fail for the expected reason.
if err != nil && !testCase.shouldPass {
if err.Error() != testCase.err.Error() {
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
}
}
if err == nil && testCase.shouldPass {
if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
t.Errorf("Test %d: The expected BucketPolicy doesnt match the actual BucketPolicy", i+1)
}
}
}
}

View File

@ -17,6 +17,9 @@
package minio package minio
import ( import (
"bufio"
"encoding/json"
"io"
"net/http" "net/http"
"net/url" "net/url"
) )
@ -36,7 +39,6 @@ func (c Client) GetBucketNotification(bucketName string) (bucketNotification Buc
// Request server for notification rules. // Request server for notification rules.
func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) { func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) {
urlValues := make(url.Values) urlValues := make(url.Values)
urlValues.Set("notification", "") urlValues.Set("notification", "")
@ -67,3 +69,137 @@ func processBucketNotificationResponse(bucketName string, resp *http.Response) (
} }
return bucketNotification, nil return bucketNotification, nil
} }
// Indentity represents the user id, this is a compliance field.
type identity struct {
PrincipalID string `json:"principalId"`
}
// Notification event bucket metadata.
type bucketMeta struct {
Name string `json:"name"`
OwnerIdentity identity `json:"ownerIdentity"`
ARN string `json:"arn"`
}
// Notification event object metadata.
type objectMeta struct {
Key string `json:"key"`
Size int64 `json:"size,omitempty"`
ETag string `json:"eTag,omitempty"`
VersionID string `json:"versionId,omitempty"`
Sequencer string `json:"sequencer"`
}
// Notification event server specific metadata.
type eventMeta struct {
SchemaVersion string `json:"s3SchemaVersion"`
ConfigurationID string `json:"configurationId"`
Bucket bucketMeta `json:"bucket"`
Object objectMeta `json:"object"`
}
// NotificationEvent represents an Amazon an S3 bucket notification event.
type NotificationEvent struct {
EventVersion string `json:"eventVersion"`
EventSource string `json:"eventSource"`
AwsRegion string `json:"awsRegion"`
EventTime string `json:"eventTime"`
EventName string `json:"eventName"`
UserIdentity identity `json:"userIdentity"`
RequestParameters map[string]string `json:"requestParameters"`
ResponseElements map[string]string `json:"responseElements"`
S3 eventMeta `json:"s3"`
}
// NotificationInfo - represents the collection of notification events, additionally
// also reports errors if any while listening on bucket notifications.
type NotificationInfo struct {
Records []NotificationEvent
Err error
}
// ListenBucketNotification - listen on bucket notifications.
func (c Client) ListenBucketNotification(bucketName string, accountArn Arn, doneCh <-chan struct{}) <-chan NotificationInfo {
notificationInfoCh := make(chan NotificationInfo, 1)
// Only success, start a routine to start reading line by line.
go func(notificationInfoCh chan<- NotificationInfo) {
defer close(notificationInfoCh)
// Validate the bucket name.
if err := isValidBucketName(bucketName); err != nil {
notificationInfoCh <- NotificationInfo{
Err: err,
}
return
}
// Continuously run and listen on bucket notification.
for {
urlValues := make(url.Values)
urlValues.Set("notificationARN", accountArn.String())
// Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
if err != nil {
notificationInfoCh <- NotificationInfo{
Err: err,
}
return
}
// Validate http response, upon error return quickly.
if resp.StatusCode != http.StatusOK {
errResponse := httpRespToErrorResponse(resp, bucketName, "")
notificationInfoCh <- NotificationInfo{
Err: errResponse,
}
return
}
// Initialize a new bufio scanner, to read line by line.
bio := bufio.NewScanner(resp.Body)
// Close the response body.
defer resp.Body.Close()
// Unmarshal each line, returns marshalled values.
for bio.Scan() {
var notificationInfo NotificationInfo
if err = json.Unmarshal(bio.Bytes(), &notificationInfo); err != nil {
notificationInfoCh <- NotificationInfo{
Err: err,
}
return
}
// Send notifications on channel only if there are events received.
if len(notificationInfo.Records) > 0 {
select {
case notificationInfoCh <- notificationInfo:
case <-doneCh:
return
}
}
}
// Look for any underlying errors.
if err = bio.Err(); err != nil {
// For an unexpected connection drop from server, we close the body
// and re-connect.
if err == io.ErrUnexpectedEOF {
resp.Body.Close()
continue
}
notificationInfoCh <- NotificationInfo{
Err: err,
}
return
}
}
}(notificationInfoCh)
// Returns the notification info channel, for caller to start reading from.
return notificationInfoCh
}

View File

@ -26,7 +26,8 @@ import (
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"reflect"
"github.com/minio/minio-go/pkg/policy"
) )
/// Bucket operations /// Bucket operations
@ -149,7 +150,7 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
// readonly - anonymous get access for everyone at a given object prefix. // readonly - anonymous get access for everyone at a given object prefix.
// readwrite - anonymous list/put/delete access to a given object prefix. // readwrite - anonymous list/put/delete access to a given object prefix.
// writeonly - anonymous put/delete access to a given object prefix. // writeonly - anonymous put/delete access to a given object prefix.
func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy BucketPolicy) error { func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error {
// Input validation. // Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return err return err
@ -157,57 +158,35 @@ func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPo
if err := isValidObjectPrefix(objectPrefix); err != nil { if err := isValidObjectPrefix(objectPrefix); err != nil {
return err return err
} }
if !bucketPolicy.isValidBucketPolicy() { if !bucketPolicy.IsValidBucketPolicy() {
return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy)) return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
} }
policy, err := c.getBucketPolicy(bucketName, objectPrefix) policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix)
if err != nil { if err != nil {
return err return err
} }
// For bucket policy set to 'none' we need to remove the policy.
if bucketPolicy == BucketPolicyNone && policy.Statements == nil {
// No policy exists on the given prefix so return with ErrNoSuchBucketPolicy.
return ErrNoSuchBucketPolicy(fmt.Sprintf("No policy exists on %s/%s", bucketName, objectPrefix))
}
// Remove any previous policies at this path.
statements := removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
// generating []Statement for the given bucketPolicy. if bucketPolicy == policy.BucketPolicyNone && policyInfo.Statements == nil {
generatedStatements, err := generatePolicyStatement(bucketPolicy, bucketName, objectPrefix) // As the request is for removing policy and the bucket
if err != nil { // has empty policy statements, just return success.
return err return nil
}
statements = append(statements, generatedStatements...)
// No change in the statements indicates either an attempt of setting 'none'
// on a prefix which doesn't have a pre-existing policy, or setting a policy
// on a prefix which already has the same policy.
if reflect.DeepEqual(policy.Statements, statements) {
// If policy being set is 'none' return an error, otherwise return nil to
// prevent the unnecessary request from being sent
var err error
if bucketPolicy == BucketPolicyNone {
err = ErrNoSuchBucketPolicy(fmt.Sprintf("No policy exists on %s/%s", bucketName, objectPrefix))
} else {
err = nil
}
return err
} }
policy.Statements = statements policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketPolicy, bucketName, objectPrefix)
// Save the updated policies. // Save the updated policies.
return c.putBucketPolicy(bucketName, policy) return c.putBucketPolicy(bucketName, policyInfo)
} }
// Saves a new bucket policy. // Saves a new bucket policy.
func (c Client) putBucketPolicy(bucketName string, policy BucketAccessPolicy) error { func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error {
// Input validation. // Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return err return err
} }
// If there are no policy statements, we should remove entire policy. // If there are no policy statements, we should remove entire policy.
if len(policy.Statements) == 0 { if len(policyInfo.Statements) == 0 {
return c.removeBucketPolicy(bucketName) return c.removeBucketPolicy(bucketName)
} }
@ -216,7 +195,7 @@ func (c Client) putBucketPolicy(bucketName string, policy BucketAccessPolicy) er
urlValues := make(url.Values) urlValues := make(url.Values)
urlValues.Set("policy", "") urlValues.Set("policy", "")
policyBytes, err := json.Marshal(&policy) policyBytes, err := json.Marshal(&policyInfo)
if err != nil { if err != nil {
return err return err
} }
@ -309,7 +288,7 @@ func (c Client) SetBucketNotification(bucketName string, bucketNotification Buck
return nil return nil
} }
// DeleteBucketNotification - Remove bucket notification clears all previously specified config // RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
func (c Client) DeleteBucketNotification(bucketName string) error { func (c Client) RemoveAllBucketNotification(bucketName string) error {
return c.SetBucketNotification(bucketName, BucketNotification{}) return c.SetBucketNotification(bucketName, BucketNotification{})
} }

View File

@ -24,10 +24,10 @@ import (
) )
// BucketExists verify if bucket exists and you have permission to access it. // BucketExists verify if bucket exists and you have permission to access it.
func (c Client) BucketExists(bucketName string) error { func (c Client) BucketExists(bucketName string) (bool, error) {
// Input validation. // Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return err return false, err
} }
// Execute HEAD on bucketName. // Execute HEAD on bucketName.
@ -36,14 +36,17 @@ func (c Client) BucketExists(bucketName string) error {
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return err if ToErrorResponse(err).Code == "NoSuchBucket" {
return false, nil
}
return false, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, bucketName, "") return false, httpRespToErrorResponse(resp, bucketName, "")
} }
} }
return nil return true, nil
} }
// StatObject verifies if object exists and you have permission to access. // StatObject verifies if object exists and you have permission to access.

View File

@ -589,7 +589,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
// set sha256 sum for signature calculation only with // set sha256 sum for signature calculation only with
// signature version '4'. // signature version '4'.
if c.signature.isV4() { if c.signature.isV4() {
shaHeader := "UNSIGNED-PAYLOAD" shaHeader := unsignedPayload
if !c.secure { if !c.secure {
if metadata.contentSHA256Bytes == nil { if metadata.contentSHA256Bytes == nil {
shaHeader = hex.EncodeToString(sum256([]byte{})) shaHeader = hex.EncodeToString(sum256([]byte{}))

View File

@ -28,6 +28,8 @@ import (
"os" "os"
"testing" "testing"
"time" "time"
"github.com/minio/minio-go/pkg/policy"
) )
// Tests bucket re-create errors. // Tests bucket re-create errors.
@ -709,8 +711,8 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if n != 0 { if n != st.Size-offset {
t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n) t.Fatalf("Error: number of bytes seeked back does not match, want %d, got %v\n", st.Size-offset, n)
} }
var buffer1 bytes.Buffer var buffer1 bytes.Buffer
@ -719,7 +721,7 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
} }
if !bytes.Equal(buf, buffer1.Bytes()) { if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
t.Fatal("Error: Incorrect read bytes v/s original buffer.") t.Fatal("Error: Incorrect read bytes v/s original buffer.")
} }
@ -1067,13 +1069,17 @@ func TestFunctionalV2(t *testing.T) {
file.Close() file.Close()
// Verify if bucket exits and you have access. // Verify if bucket exits and you have access.
err = c.BucketExists(bucketName) var exists bool
exists, err = c.BucketExists(bucketName)
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName) t.Fatal("Error:", err, bucketName)
} }
if !exists {
t.Fatal("Error: could not find ", bucketName)
}
// Make the bucket 'public read/write'. // Make the bucket 'public read/write'.
err = c.SetBucketPolicy(bucketName, "", BucketPolicyReadWrite) err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }

View File

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
crand "crypto/rand" crand "crypto/rand"
"errors" "errors"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math/rand" "math/rand"
@ -28,6 +29,8 @@ import (
"os" "os"
"testing" "testing"
"time" "time"
"github.com/minio/minio-go/pkg/policy"
) )
const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
@ -307,6 +310,107 @@ func TestListPartiallyUploaded(t *testing.T) {
} }
} }
// Test get object seeker from the end, using whence set to '2'.
func TestGetOjectSeekEnd(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Instantiate new minio client object.
c, err := New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
st, err := r.Stat()
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if st.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
pos, err := r.Seek(-100, 2)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if pos != st.Size-100 {
t.Fatalf("Expected %d, got %d instead", pos, st.Size-100)
}
buf2 := make([]byte, 100)
m, err := io.ReadFull(r, buf2)
if err != nil {
t.Fatal("Error: reading through io.ReadFull", err, bucketName, objectName)
}
if m != len(buf2) {
t.Fatalf("Expected %d bytes, got %d", len(buf2), m)
}
hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:])
hexBuf2 := fmt.Sprintf("%02x", buf2[:m])
if hexBuf1 != hexBuf2 {
t.Fatalf("Expected %s, got %s instead", hexBuf1, hexBuf2)
}
pos, err = r.Seek(-100, 2)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if pos != st.Size-100 {
t.Fatalf("Expected %d, got %d instead", pos, st.Size-100)
}
if err = r.Close(); err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
}
// Test get object reader to not throw error on being closed twice. // Test get object reader to not throw error on being closed twice.
func TestGetObjectClosedTwice(t *testing.T) { func TestGetObjectClosedTwice(t *testing.T) {
if testing.Short() { if testing.Short() {
@ -973,8 +1077,8 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if n != 0 { if n != st.Size-offset {
t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n) t.Fatalf("Error: number of bytes seeked back does not match, want %d, got %v\n", st.Size-offset, n)
} }
var buffer1 bytes.Buffer var buffer1 bytes.Buffer
@ -983,7 +1087,7 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
} }
if !bytes.Equal(buf, buffer1.Bytes()) { if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
t.Fatal("Error: Incorrect read bytes v/s original buffer.") t.Fatal("Error: Incorrect read bytes v/s original buffer.")
} }
@ -1458,13 +1562,23 @@ func TestBucketNotification(t *testing.T) {
bucketName := os.Getenv("NOTIFY_BUCKET") bucketName := os.Getenv("NOTIFY_BUCKET")
topicArn := NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) topicArn := NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE"))
queueArn := NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource")
topicConfig := NewNotificationConfig(topicArn) topicConfig := NewNotificationConfig(topicArn)
topicConfig.AddEvents(ObjectCreatedAll, ObjectRemovedAll) topicConfig.AddEvents(ObjectCreatedAll, ObjectRemovedAll)
topicConfig.AddFilterSuffix("jpg") topicConfig.AddFilterSuffix("jpg")
queueConfig := NewNotificationConfig(queueArn)
queueConfig.AddEvents(ObjectCreatedAll)
queueConfig.AddFilterPrefix("photos/")
bNotification := BucketNotification{} bNotification := BucketNotification{}
bNotification.AddTopic(topicConfig) bNotification.AddTopic(topicConfig)
// Add and remove a queue config
bNotification.AddQueue(queueConfig)
bNotification.RemoveQueueByArn(queueArn)
err = c.SetBucketNotification(bucketName, bNotification) err = c.SetBucketNotification(bucketName, bNotification)
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
@ -1483,7 +1597,7 @@ func TestBucketNotification(t *testing.T) {
t.Fatal("Error: cannot get the suffix") t.Fatal("Error: cannot get the suffix")
} }
err = c.DeleteBucketNotification(bucketName) err = c.RemoveAllBucketNotification(bucketName)
if err != nil { if err != nil {
t.Fatal("Error: cannot delete bucket notification") t.Fatal("Error: cannot delete bucket notification")
} }
@ -1539,57 +1653,61 @@ func TestFunctional(t *testing.T) {
file.Close() file.Close()
// Verify if bucket exits and you have access. // Verify if bucket exits and you have access.
err = c.BucketExists(bucketName) var exists bool
exists, err = c.BucketExists(bucketName)
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName) t.Fatal("Error:", err, bucketName)
} }
if !exists {
t.Fatal("Error: could not find ", bucketName)
}
// Asserting the default bucket policy. // Asserting the default bucket policy.
policy, err := c.GetBucketPolicy(bucketName, "") policyAccess, err := c.GetBucketPolicy(bucketName, "")
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if policy != "none" { if policyAccess != "none" {
t.Fatalf("Default bucket policy incorrect") t.Fatalf("Default bucket policy incorrect")
} }
// Set the bucket policy to 'public readonly'. // Set the bucket policy to 'public readonly'.
err = c.SetBucketPolicy(bucketName, "", BucketPolicyReadOnly) err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
// should return policy `readonly`. // should return policy `readonly`.
policy, err = c.GetBucketPolicy(bucketName, "") policyAccess, err = c.GetBucketPolicy(bucketName, "")
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if policy != "readonly" { if policyAccess != "readonly" {
t.Fatalf("Expected bucket policy to be readonly") t.Fatalf("Expected bucket policy to be readonly")
} }
// Make the bucket 'public writeonly'. // Make the bucket 'public writeonly'.
err = c.SetBucketPolicy(bucketName, "", BucketPolicyWriteOnly) err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyWriteOnly)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
// should return policy `writeonly`. // should return policy `writeonly`.
policy, err = c.GetBucketPolicy(bucketName, "") policyAccess, err = c.GetBucketPolicy(bucketName, "")
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if policy != "writeonly" { if policyAccess != "writeonly" {
t.Fatalf("Expected bucket policy to be writeonly") t.Fatalf("Expected bucket policy to be writeonly")
} }
// Make the bucket 'public read/write'. // Make the bucket 'public read/write'.
err = c.SetBucketPolicy(bucketName, "", BucketPolicyReadWrite) err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
// should return policy `readwrite`. // should return policy `readwrite`.
policy, err = c.GetBucketPolicy(bucketName, "") policyAccess, err = c.GetBucketPolicy(bucketName, "")
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
if policy != "readwrite" { if policyAccess != "readwrite" {
t.Fatalf("Expected bucket policy to be readwrite") t.Fatalf("Expected bucket policy to be readwrite")
} }
// List all buckets. // List all buckets.

View File

@ -26,6 +26,8 @@ import (
"os" "os"
"strings" "strings"
"testing" "testing"
"github.com/minio/minio-go/pkg/policy"
) )
type customReader struct{} type customReader struct{}
@ -325,7 +327,7 @@ func TestBucketPolicyTypes(t *testing.T) {
"invalid": false, "invalid": false,
} }
for bucketPolicy, ok := range want { for bucketPolicy, ok := range want {
if BucketPolicy(bucketPolicy).isValidBucketPolicy() != ok { if policy.BucketPolicy(bucketPolicy).IsValidBucketPolicy() != ok {
t.Fatal("Error") t.Fatal("Error")
} }
} }

View File

@ -25,7 +25,7 @@ import (
"sync" "sync"
) )
// bucketLocationCache - Provides simple mechansim to hold bucket // bucketLocationCache - Provides simple mechanism to hold bucket
// locations in memory. // locations in memory.
type bucketLocationCache struct { type bucketLocationCache struct {
// mutex is used for handling the concurrent // mutex is used for handling the concurrent
@ -66,8 +66,21 @@ func (r *bucketLocationCache) Delete(bucketName string) {
delete(r.items, bucketName) delete(r.items, bucketName)
} }
// getBucketLocation - Get location for the bucketName from location map cache. // GetBucketLocation - get location for the bucket name from location cache, if not
// fetch freshly by making a new request.
func (c Client) GetBucketLocation(bucketName string) (string, error) {
if err := isValidBucketName(bucketName); err != nil {
return "", err
}
return c.getBucketLocation(bucketName)
}
// getBucketLocation - Get location for the bucketName from location map cache, if not
// fetch freshly by making a new request.
func (c Client) getBucketLocation(bucketName string) (string, error) { func (c Client) getBucketLocation(bucketName string) (string, error) {
if err := isValidBucketName(bucketName); err != nil {
return "", err
}
if location, ok := c.bucketLocCache.Get(bucketName); ok { if location, ok := c.bucketLocCache.Get(bucketName); ok {
return location, nil return location, nil
} }
@ -165,7 +178,13 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
// Set sha256 sum for signature calculation only with signature version '4'. // Set sha256 sum for signature calculation only with signature version '4'.
if c.signature.isV4() { if c.signature.isV4() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) var contentSha256 string
if c.secure {
contentSha256 = unsignedPayload
} else {
contentSha256 = hex.EncodeToString(sum256([]byte{}))
}
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
} }
// Sign the request. // Sign the request.

View File

@ -316,7 +316,7 @@ func TestProcessBucketLocationResponse(t *testing.T) {
} }
if err == nil && testCase.shouldPass { if err == nil && testCase.shouldPass {
if !reflect.DeepEqual(testCase.expectedResult, actualResult) { if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
t.Errorf("Test %d: The expected BucketPolicy doesnt match the actual BucketPolicy", i+1) t.Errorf("Test %d: The expected BucketPolicy doesn't match the actual BucketPolicy", i+1)
} }
} }
} }

View File

@ -20,11 +20,13 @@ import (
"encoding/xml" "encoding/xml"
) )
// S3 notification events // NotificationEventType is a S3 notification event associated to the bucket notification configuration
type Event string type NotificationEventType string
// The role of all event types are described in :
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
const ( const (
ObjectCreatedAll Event = "s3:ObjectCreated:*" ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*"
ObjectCreatePut = "s3:ObjectCreated:Put" ObjectCreatePut = "s3:ObjectCreated:Put"
ObjectCreatedPost = "s3:ObjectCreated:Post" ObjectCreatedPost = "s3:ObjectCreated:Post"
ObjectCreatedCopy = "s3:ObjectCreated:Copy" ObjectCreatedCopy = "s3:ObjectCreated:Copy"
@ -35,20 +37,27 @@ const (
ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
) )
// FilterRule - child of S3Key, a tag in the notification xml which
// carries suffix/prefix filters
type FilterRule struct { type FilterRule struct {
Name string `xml:"Name"` Name string `xml:"Name"`
Value string `xml:"Value"` Value string `xml:"Value"`
} }
// S3Key - child of Filter, a tag in the notification xml which
// carries suffix/prefix filters
type S3Key struct { type S3Key struct {
FilterRules []FilterRule `xml:"FilterRule,omitempty"` FilterRules []FilterRule `xml:"FilterRule,omitempty"`
} }
// Filter - a tag in the notification xml structure which carries
// suffix/prefix filters
type Filter struct { type Filter struct {
S3Key S3Key `xml:"S3Key,omitempty"` S3Key S3Key `xml:"S3Key,omitempty"`
} }
// Arn - holds ARN information that will be sent to the web service // Arn - holds ARN information that will be sent to the web service,
// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
type Arn struct { type Arn struct {
Partition string Partition string
Service string Service string
@ -57,6 +66,7 @@ type Arn struct {
Resource string Resource string
} }
// NewArn creates new ARN based on the given partition, service, region, account id and resource
func NewArn(partition, service, region, accountID, resource string) Arn { func NewArn(partition, service, region, accountID, resource string) Arn {
return Arn{Partition: partition, return Arn{Partition: partition,
Service: service, Service: service,
@ -65,6 +75,7 @@ func NewArn(partition, service, region, accountID, resource string) Arn {
Resource: resource} Resource: resource}
} }
// Return the string format of the ARN
func (arn Arn) String() string { func (arn Arn) String() string {
return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
} }
@ -74,43 +85,65 @@ func (arn Arn) String() string {
type NotificationConfig struct { type NotificationConfig struct {
Id string `xml:"Id,omitempty"` Id string `xml:"Id,omitempty"`
Arn Arn `xml:"-"` Arn Arn `xml:"-"`
Events []Event `xml:"Event"` Events []NotificationEventType `xml:"Event"`
Filter *Filter `xml:"Filter,omitempty"` Filter *Filter `xml:"Filter,omitempty"`
} }
// NewNotificationConfig creates one notification config and sets the given ARN
func NewNotificationConfig(arn Arn) NotificationConfig { func NewNotificationConfig(arn Arn) NotificationConfig {
return NotificationConfig{Arn: arn} return NotificationConfig{Arn: arn}
} }
func (t *NotificationConfig) AddEvents(events ...Event) { // AddEvents adds one event to the current notification config
func (t *NotificationConfig) AddEvents(events ...NotificationEventType) {
t.Events = append(t.Events, events...) t.Events = append(t.Events, events...)
} }
// AddFilterSuffix sets the suffix configuration to the current notification config
func (t *NotificationConfig) AddFilterSuffix(suffix string) { func (t *NotificationConfig) AddFilterSuffix(suffix string) {
if t.Filter == nil { if t.Filter == nil {
t.Filter = &Filter{} t.Filter = &Filter{}
} }
t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, FilterRule{Name: "suffix", Value: suffix}) newFilterRule := FilterRule{Name: "suffix", Value: suffix}
// Replace any suffix rule if existing and add to the list otherwise
for index := range t.Filter.S3Key.FilterRules {
if t.Filter.S3Key.FilterRules[index].Name == "suffix" {
t.Filter.S3Key.FilterRules[index] = newFilterRule
return
}
}
t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
} }
// AddFilterPrefix sets the prefix configuration to the current notification config
func (t *NotificationConfig) AddFilterPrefix(prefix string) { func (t *NotificationConfig) AddFilterPrefix(prefix string) {
if t.Filter == nil { if t.Filter == nil {
t.Filter = &Filter{} t.Filter = &Filter{}
} }
t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, FilterRule{Name: "prefix", Value: prefix}) newFilterRule := FilterRule{Name: "prefix", Value: prefix}
// Replace any prefix rule if existing and add to the list otherwise
for index := range t.Filter.S3Key.FilterRules {
if t.Filter.S3Key.FilterRules[index].Name == "prefix" {
t.Filter.S3Key.FilterRules[index] = newFilterRule
return
}
}
t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
} }
// Topic notification config // TopicConfig carries one single topic notification configuration
type TopicConfig struct { type TopicConfig struct {
NotificationConfig NotificationConfig
Topic string `xml:"Topic"` Topic string `xml:"Topic"`
} }
// QueueConfig carries one single queue notification configuration
type QueueConfig struct { type QueueConfig struct {
NotificationConfig NotificationConfig
Queue string `xml:"Queue"` Queue string `xml:"Queue"`
} }
// LambdaConfig carries one single cloudfunction notification configuration
type LambdaConfig struct { type LambdaConfig struct {
NotificationConfig NotificationConfig
Lambda string `xml:"CloudFunction"` Lambda string `xml:"CloudFunction"`
@ -124,17 +157,53 @@ type BucketNotification struct {
QueueConfigs []QueueConfig `xml:"QueueConfiguration"` QueueConfigs []QueueConfig `xml:"QueueConfiguration"`
} }
// AddTopic adds a given topic config to the general bucket notification config
func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) { func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) {
config := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()} newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()}
b.TopicConfigs = append(b.TopicConfigs, config) b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
} }
// AddQueue adds a given queue config to the general bucket notification config
func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) { func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) {
config := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()} newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()}
b.QueueConfigs = append(b.QueueConfigs, config) b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
} }
// AddLambda adds a given lambda config to the general bucket notification config
func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) { func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) {
config := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()} newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
b.LambdaConfigs = append(b.LambdaConfigs, config) b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
}
// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
func (b *BucketNotification) RemoveTopicByArn(arn Arn) {
var topics []TopicConfig
for _, topic := range b.TopicConfigs {
if topic.Topic != arn.String() {
topics = append(topics, topic)
}
}
b.TopicConfigs = topics
}
// RemoveQueueByArn removes all queue configurations that match the exact specified ARN
func (b *BucketNotification) RemoveQueueByArn(arn Arn) {
var queues []QueueConfig
for _, queue := range b.QueueConfigs {
if queue.Queue != arn.String() {
queues = append(queues, queue)
}
}
b.QueueConfigs = queues
}
// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN
func (b *BucketNotification) RemoveLambdaByArn(arn Arn) {
var lambdas []LambdaConfig
for _, lambda := range b.LambdaConfigs {
if lambda.Lambda != arn.String() {
lambdas = append(lambdas, lambda)
}
}
b.LambdaConfigs = lambdas
} }

View File

@ -1,618 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/json"
"fmt"
"sort"
"strings"
)
// maximum supported access policy size.
const maxAccessPolicySize = 20 * 1024 * 1024 // 20KiB.
// Resource prefix for all aws resources.
const awsResourcePrefix = "arn:aws:s3:::"
// BucketPolicy - Bucket level policy.
type BucketPolicy string
// Different types of Policies currently supported for buckets.
const (
BucketPolicyNone BucketPolicy = "none"
BucketPolicyReadOnly = "readonly"
BucketPolicyReadWrite = "readwrite"
BucketPolicyWriteOnly = "writeonly"
)
// isValidBucketPolicy - Is provided policy value supported.
func (p BucketPolicy) isValidBucketPolicy() bool {
switch p {
case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
return true
}
return false
}
// User - canonical users list.
type User struct {
AWS []string
}
// Statement - minio policy statement
type Statement struct {
Sid string
Effect string
Principal User `json:"Principal"`
Actions []string `json:"Action"`
Resources []string `json:"Resource"`
Conditions map[string]map[string]string `json:"Condition,omitempty"`
}
// BucketAccessPolicy - minio policy collection
type BucketAccessPolicy struct {
Version string // date in 0000-00-00 format
Statements []Statement `json:"Statement"`
}
// Read write actions.
var (
readWriteBucketActions = []string{
"s3:GetBucketLocation",
"s3:ListBucketMultipartUploads",
// Add more bucket level read-write actions here.
}
readWriteObjectActions = []string{
"s3:AbortMultipartUpload",
"s3:DeleteObject",
"s3:GetObject",
"s3:ListMultipartUploadParts",
"s3:PutObject",
// Add more object level read-write actions here.
}
)
// Write only actions.
var (
writeOnlyBucketActions = []string{
"s3:GetBucketLocation",
"s3:ListBucketMultipartUploads",
// Add more bucket level write actions here.
}
writeOnlyObjectActions = []string{
"s3:AbortMultipartUpload",
"s3:DeleteObject",
"s3:ListMultipartUploadParts",
"s3:PutObject",
// Add more object level write actions here.
}
)
// Read only actions.
var (
readOnlyBucketActions = []string{
"s3:GetBucketLocation",
// Add more bucket level read actions here.
}
readOnlyObjectActions = []string{
"s3:GetObject",
// Add more object level read actions here.
}
)
// subsetActions returns true if the first array is completely
// contained in the second array. There must be at least
// the same number of duplicate values in second as there
// are in first.
func subsetActions(first, second []string) bool {
set := make(map[string]int)
for _, value := range second {
set[value]++
}
for _, value := range first {
if count, found := set[value]; !found {
return false
} else if count < 1 {
return false
} else {
set[value] = count - 1
}
}
return true
}
// Verifies if we have read/write policy set at bucketName, objectPrefix.
func isBucketPolicyReadWrite(statements []Statement, bucketName string, objectPrefix string) bool {
var commonActions, readWrite bool
sort.Strings(readWriteBucketActions)
sort.Strings(readWriteObjectActions)
for _, statement := range statements {
if statement.Principal.AWS[0] != "*" {
continue
}
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName {
if subsetActions(readWriteBucketActions, statement.Actions) {
commonActions = true
continue
}
} else if resourceMatch(resource, awsResourcePrefix+bucketName+"/"+objectPrefix) {
if subsetActions(readWriteObjectActions, statement.Actions) {
readWrite = true
}
}
}
}
return commonActions && readWrite
}
// Verifies if we have write only policy set at bucketName, objectPrefix.
func isBucketPolicyWriteOnly(statements []Statement, bucketName string, objectPrefix string) bool {
var commonActions, writeOnly bool
sort.Strings(writeOnlyBucketActions)
sort.Strings(writeOnlyObjectActions)
for _, statement := range statements {
if statement.Principal.AWS[0] != "*" {
continue
}
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName {
if subsetActions(writeOnlyBucketActions, statement.Actions) {
commonActions = true
continue
}
} else if resourceMatch(resource, awsResourcePrefix+bucketName+"/"+objectPrefix) {
if subsetActions(writeOnlyObjectActions, statement.Actions) {
writeOnly = true
}
}
}
}
return commonActions && writeOnly
}
// Verifies if we have read only policy set at bucketName, objectPrefix.
func isBucketPolicyReadOnly(statements []Statement, bucketName string, objectPrefix string) bool {
var commonActions, readOnly bool
sort.Strings(readOnlyBucketActions)
sort.Strings(readOnlyObjectActions)
for _, statement := range statements {
if statement.Principal.AWS[0] != "*" {
continue
}
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName {
if subsetActions(readOnlyBucketActions, statement.Actions) {
commonActions = true
continue
}
} else if resourceMatch(resource, awsResourcePrefix+bucketName+"/"+objectPrefix) {
if subsetActions(readOnlyObjectActions, statement.Actions) {
readOnly = true
break
}
}
}
}
return commonActions && readOnly
}
// isAction - returns true if action is found amond the list of actions.
func isAction(action string, actions []string) bool {
for _, act := range actions {
if action == act {
return true
}
}
return false
}
// removeReadBucketActions - removes readWriteBucket actions if found.
func removeReadBucketActions(statements []Statement, bucketName string) []Statement {
var newStatements []Statement
var bucketActionsRemoved bool
for _, statement := range statements {
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName && !bucketActionsRemoved {
var newActions []string
for _, action := range statement.Actions {
if isAction(action, readWriteBucketActions) {
continue
}
newActions = append(newActions, action)
}
statement.Actions = newActions
bucketActionsRemoved = true
}
}
if len(statement.Actions) != 0 {
newStatements = append(newStatements, statement)
}
}
return newStatements
}
// removeListBucketActions - removes "s3:ListBucket" action if found.
func removeListBucketAction(statements []Statement, bucketName string) []Statement {
var newStatements []Statement
var listBucketActionsRemoved bool
for _, statement := range statements {
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName && !listBucketActionsRemoved {
var newActions []string
for _, action := range statement.Actions {
if isAction(action, []string{"s3:ListBucket"}) {
delete(statement.Conditions, "StringEquals")
continue
}
newActions = append(newActions, action)
}
statement.Actions = newActions
listBucketActionsRemoved = true
}
}
if len(statement.Actions) != 0 {
newStatements = append(newStatements, statement)
}
}
return newStatements
}
// removeWriteObjectActions - removes writeOnlyObject actions if found.
func removeWriteObjectActions(statements []Statement, bucketName string, objectPrefix string) []Statement {
var newStatements []Statement
for _, statement := range statements {
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
var newActions []string
for _, action := range statement.Actions {
if isAction(action, writeOnlyObjectActions) {
continue
}
newActions = append(newActions, action)
}
statement.Actions = newActions
}
}
if len(statement.Actions) != 0 {
newStatements = append(newStatements, statement)
}
}
return newStatements
}
// removeReadObjectActions - removes "s3:GetObject" actions if found.
func removeReadObjectActions(statements []Statement, bucketName string, objectPrefix string) []Statement {
var newStatements []Statement
for _, statement := range statements {
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
var newActions []string
for _, action := range statement.Actions {
if isAction(action, []string{"s3:GetObject"}) {
continue
}
newActions = append(newActions, action)
}
statement.Actions = newActions
}
}
if len(statement.Actions) != 0 {
newStatements = append(newStatements, statement)
}
}
return newStatements
}
// removeReadWriteObjectActions - removes readWriteObject actions if found.
func removeReadWriteObjectActions(statements []Statement, bucketName string, objectPrefix string) []Statement {
var newStatements []Statement
for _, statement := range statements {
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
var newActions []string
for _, action := range statement.Actions {
if isAction(action, readWriteObjectActions) {
continue
}
newActions = append(newActions, action)
}
statement.Actions = newActions
}
}
if len(statement.Actions) != 0 {
newStatements = append(newStatements, statement)
}
}
return newStatements
}
// Removes read write bucket policy if found.
func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName string, objectPrefix string) []Statement {
newStatements := removeReadBucketActions(statements, bucketName)
newStatements = removeListBucketAction(newStatements, bucketName)
newStatements = removeReadWriteObjectActions(newStatements, bucketName, objectPrefix)
return newStatements
}
// Removes write only bucket policy if found.
func removeBucketPolicyStatementWriteOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
newStatements := removeReadBucketActions(statements, bucketName)
newStatements = removeWriteObjectActions(newStatements, bucketName, objectPrefix)
return newStatements
}
// Removes read only bucket policy if found.
func removeBucketPolicyStatementReadOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
newStatements := removeReadBucketActions(statements, bucketName)
newStatements = removeListBucketAction(newStatements, bucketName)
newStatements = removeReadObjectActions(newStatements, bucketName, objectPrefix)
return newStatements
}
// Remove bucket policies based on the type.
func removeBucketPolicyStatement(statements []Statement, bucketName string, objectPrefix string) []Statement {
// Verify that a policy is defined on the object prefix, otherwise do not remove the policy
if isPolicyDefinedForObjectPrefix(statements, bucketName, objectPrefix) {
// Verify type of policy to be removed.
if isBucketPolicyReadWrite(statements, bucketName, objectPrefix) {
statements = removeBucketPolicyStatementReadWrite(statements, bucketName, objectPrefix)
} else if isBucketPolicyWriteOnly(statements, bucketName, objectPrefix) {
statements = removeBucketPolicyStatementWriteOnly(statements, bucketName, objectPrefix)
} else if isBucketPolicyReadOnly(statements, bucketName, objectPrefix) {
statements = removeBucketPolicyStatementReadOnly(statements, bucketName, objectPrefix)
}
}
return statements
}
// Checks if an access policiy is defined for the given object prefix
func isPolicyDefinedForObjectPrefix(statements []Statement, bucketName string, objectPrefix string) bool {
for _, statement := range statements {
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
return true
}
}
}
return false
}
// Unmarshals bucket policy byte array into a structured bucket access policy.
func unMarshalBucketPolicy(bucketPolicyBuf []byte) (BucketAccessPolicy, error) {
// Untyped lazy JSON struct.
type bucketAccessPolicyUntyped struct {
Version string
Statement []struct {
Sid string
Effect string
Principal struct {
AWS json.RawMessage
}
Action json.RawMessage
Resource json.RawMessage
Condition map[string]map[string]string
}
}
var policyUntyped = bucketAccessPolicyUntyped{}
// Unmarshal incoming policy into an untyped structure, to be
// evaluated lazily later.
err := json.Unmarshal(bucketPolicyBuf, &policyUntyped)
if err != nil {
return BucketAccessPolicy{}, err
}
var policy = BucketAccessPolicy{}
policy.Version = policyUntyped.Version
for _, stmtUntyped := range policyUntyped.Statement {
statement := Statement{}
// These are properly typed messages.
statement.Sid = stmtUntyped.Sid
statement.Effect = stmtUntyped.Effect
statement.Conditions = stmtUntyped.Condition
// AWS user can have two different types, either as []string
// and either as regular 'string'. We fall back to doing this
// since there is no other easier way to fix this.
err = json.Unmarshal(stmtUntyped.Principal.AWS, &statement.Principal.AWS)
if err != nil {
var awsUser string
err = json.Unmarshal(stmtUntyped.Principal.AWS, &awsUser)
if err != nil {
return BucketAccessPolicy{}, err
}
statement.Principal.AWS = []string{awsUser}
}
// Actions can have two different types, either as []string
// and either as regular 'string'. We fall back to doing this
// since there is no other easier way to fix this.
err = json.Unmarshal(stmtUntyped.Action, &statement.Actions)
if err != nil {
var action string
err = json.Unmarshal(stmtUntyped.Action, &action)
if err != nil {
return BucketAccessPolicy{}, err
}
statement.Actions = []string{action}
}
// Resources can have two different types, either as []string
// and either as regular 'string'. We fall back to doing this
// since there is no other easier way to fix this.
err = json.Unmarshal(stmtUntyped.Resource, &statement.Resources)
if err != nil {
var resource string
err = json.Unmarshal(stmtUntyped.Resource, &resource)
if err != nil {
return BucketAccessPolicy{}, err
}
statement.Resources = []string{resource}
}
// Append the typed policy.
policy.Statements = append(policy.Statements, statement)
}
return policy, nil
}
// Identifies the policy type from policy Statements.
func identifyPolicyType(policy BucketAccessPolicy, bucketName, objectPrefix string) (bucketPolicy BucketPolicy) {
if policy.Statements == nil {
return BucketPolicyNone
}
if isBucketPolicyReadWrite(policy.Statements, bucketName, objectPrefix) {
return BucketPolicyReadWrite
} else if isBucketPolicyWriteOnly(policy.Statements, bucketName, objectPrefix) {
return BucketPolicyWriteOnly
} else if isBucketPolicyReadOnly(policy.Statements, bucketName, objectPrefix) {
return BucketPolicyReadOnly
}
return BucketPolicyNone
}
// Generate policy statements for various bucket policies.
// refer to http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
// for more details about statement fields.
func generatePolicyStatement(bucketPolicy BucketPolicy, bucketName, objectPrefix string) ([]Statement, error) {
if !bucketPolicy.isValidBucketPolicy() {
return []Statement{}, ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
}
var statements []Statement
if bucketPolicy == BucketPolicyNone {
return []Statement{}, nil
} else if bucketPolicy == BucketPolicyReadWrite {
// Get read-write policy.
statements = setReadWriteStatement(bucketName, objectPrefix)
} else if bucketPolicy == BucketPolicyReadOnly {
// Get read only policy.
statements = setReadOnlyStatement(bucketName, objectPrefix)
} else if bucketPolicy == BucketPolicyWriteOnly {
// Return Write only policy.
statements = setWriteOnlyStatement(bucketName, objectPrefix)
}
return statements, nil
}
// Obtain statements for read-write BucketPolicy.
func setReadWriteStatement(bucketName, objectPrefix string) []Statement {
bucketResourceStatement := Statement{}
bucketResourceStatement.Effect = "Allow"
bucketResourceStatement.Principal.AWS = []string{"*"}
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketResourceStatement.Actions = readWriteBucketActions
bucketListResourceStatement := Statement{}
bucketListResourceStatement.Effect = "Allow"
bucketListResourceStatement.Principal.AWS = []string{"*"}
bucketListResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketListResourceStatement.Actions = []string{"s3:ListBucket"}
// Object prefix is present, make sure to set the conditions for s3:ListBucket.
if objectPrefix != "" {
bucketListResourceStatement.Conditions = map[string]map[string]string{
"StringEquals": {
"s3:prefix": objectPrefix,
},
}
}
objectResourceStatement := Statement{}
objectResourceStatement.Effect = "Allow"
objectResourceStatement.Principal.AWS = []string{"*"}
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
objectResourceStatement.Actions = readWriteObjectActions
// Save the read write policy.
statements := []Statement{}
statements = append(statements, bucketResourceStatement, bucketListResourceStatement, objectResourceStatement)
return statements
}
// Obtain statements for read only BucketPolicy.
func setReadOnlyStatement(bucketName, objectPrefix string) []Statement {
bucketResourceStatement := Statement{}
bucketResourceStatement.Effect = "Allow"
bucketResourceStatement.Principal.AWS = []string{"*"}
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketResourceStatement.Actions = readOnlyBucketActions
bucketListResourceStatement := Statement{}
bucketListResourceStatement.Effect = "Allow"
bucketListResourceStatement.Principal.AWS = []string{"*"}
bucketListResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketListResourceStatement.Actions = []string{"s3:ListBucket"}
// Object prefix is present, make sure to set the conditions for s3:ListBucket.
if objectPrefix != "" {
bucketListResourceStatement.Conditions = map[string]map[string]string{
"StringEquals": {
"s3:prefix": objectPrefix,
},
}
}
objectResourceStatement := Statement{}
objectResourceStatement.Effect = "Allow"
objectResourceStatement.Principal.AWS = []string{"*"}
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
objectResourceStatement.Actions = readOnlyObjectActions
statements := []Statement{}
// Save the read only policy.
statements = append(statements, bucketResourceStatement, bucketListResourceStatement, objectResourceStatement)
return statements
}
// Obtain statements for write only BucketPolicy.
func setWriteOnlyStatement(bucketName, objectPrefix string) []Statement {
bucketResourceStatement := Statement{}
objectResourceStatement := Statement{}
statements := []Statement{}
// Write only policy.
bucketResourceStatement.Effect = "Allow"
bucketResourceStatement.Principal.AWS = []string{"*"}
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketResourceStatement.Actions = writeOnlyBucketActions
objectResourceStatement.Effect = "Allow"
objectResourceStatement.Principal.AWS = []string{"*"}
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
objectResourceStatement.Actions = writeOnlyObjectActions
// Save the write only policy.
statements = append(statements, bucketResourceStatement, objectResourceStatement)
return statements
}
// Match function matches wild cards in 'pattern' for resource.
func resourceMatch(pattern, resource string) bool {
if pattern == "" {
return resource == pattern
}
if pattern == "*" {
return true
}
parts := strings.Split(pattern, "*")
if len(parts) == 1 {
return resource == pattern
}
tGlob := strings.HasSuffix(pattern, "*")
end := len(parts) - 1
if !strings.HasPrefix(resource, parts[0]) {
return false
}
for i := 1; i < end; i++ {
if !strings.Contains(resource, parts[i]) {
return false
}
idx := strings.Index(resource, parts[i]) + len(parts[i])
resource = resource[idx:]
}
return tGlob || strings.HasSuffix(resource, parts[end])
}

View File

@ -1,645 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/json"
"fmt"
"reflect"
"testing"
)
// Validates bucket policy string.
func TestIsValidBucketPolicy(t *testing.T) {
testCases := []struct {
inputPolicy BucketPolicy
expectedResult bool
}{
// valid inputs.
{BucketPolicy("none"), true},
{BucketPolicy("readonly"), true},
{BucketPolicy("readwrite"), true},
{BucketPolicy("writeonly"), true},
// invalid input.
{BucketPolicy("readwriteonly"), false},
{BucketPolicy("writeread"), false},
}
for i, testCase := range testCases {
actualResult := testCase.inputPolicy.isValidBucketPolicy()
if testCase.expectedResult != actualResult {
t.Errorf("Test %d: Expected IsValidBucket policy to be '%v' for policy \"%s\", but instead found it to be '%v'", i+1, testCase.expectedResult, testCase.inputPolicy, actualResult)
}
}
}
// Tests whether first array is completly contained in second array.
func TestSubsetActions(t *testing.T) {
testCases := []struct {
firstArray []string
secondArray []string
expectedResult bool
}{
{[]string{"aaa", "bbb"}, []string{"ccc", "bbb"}, false},
{[]string{"aaa", "bbb"}, []string{"aaa", "ccc"}, false},
{[]string{"aaa", "bbb"}, []string{"aaa", "bbb"}, true},
{[]string{"aaa", "bbb"}, []string{"aaa", "bbb", "ccc"}, true},
{[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb", "ccc"}, false},
{[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb", "bbb", "aaa"}, true},
{[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb"}, false},
{[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb", "aaa", "bbb", "ccc"}, true},
}
for i, testCase := range testCases {
actualResult := subsetActions(testCase.firstArray, testCase.secondArray)
if testCase.expectedResult != actualResult {
t.Errorf("Test %d: First array '%v' is not contained in second array '%v'", i+1, testCase.firstArray, testCase.secondArray)
}
}
}
// Tests validate Bucket Policy type identifier.
func TestIdentifyPolicyType(t *testing.T) {
testCases := []struct {
inputPolicy BucketAccessPolicy
bucketName string
objName string
expectedPolicy BucketPolicy
}{
{BucketAccessPolicy{Version: "2012-10-17"}, "my-bucket", "", BucketPolicyNone},
}
for i, testCase := range testCases {
actualBucketPolicy := identifyPolicyType(testCase.inputPolicy, testCase.bucketName, testCase.objName)
if testCase.expectedPolicy != actualBucketPolicy {
t.Errorf("Test %d: Expected bucket policy to be '%v', but instead got '%v'", i+1, testCase.expectedPolicy, actualBucketPolicy)
}
}
}
// Test validate Resource Statement Generator.
func TestGeneratePolicyStatement(t *testing.T) {
testCases := []struct {
bucketPolicy BucketPolicy
bucketName string
objectPrefix string
expectedStatements []Statement
shouldPass bool
err error
}{
{BucketPolicy("my-policy"), "my-bucket", "", []Statement{}, false, ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", BucketPolicy("my-policy")))},
{BucketPolicyNone, "my-bucket", "", []Statement{}, true, nil},
{BucketPolicyReadOnly, "read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), true, nil},
{BucketPolicyWriteOnly, "write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), true, nil},
{BucketPolicyReadWrite, "read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true, nil},
}
for i, testCase := range testCases {
actualStatements, err := generatePolicyStatement(testCase.bucketPolicy, testCase.bucketName, testCase.objectPrefix)
if err != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
}
if err == nil && !testCase.shouldPass {
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
}
// Failed as expected, but does it fail for the expected reason.
if err != nil && !testCase.shouldPass {
if err.Error() != testCase.err.Error() {
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
}
}
// Test passes as expected, but the output values are verified for correctness here.
if err == nil && testCase.shouldPass {
if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
}
}
}
}
// Tests validating read only statement generator.
func TestsetReadOnlyStatement(t *testing.T) {
expectedReadOnlyStatement := func(bucketName, objectPrefix string) []Statement {
bucketResourceStatement := &Statement{}
bucketListResourceStatement := &Statement{}
objectResourceStatement := &Statement{}
statements := []Statement{}
bucketResourceStatement.Effect = "Allow"
bucketResourceStatement.Principal.AWS = []string{"*"}
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketResourceStatement.Actions = readOnlyBucketActions
bucketListResourceStatement.Effect = "Allow"
bucketListResourceStatement.Principal.AWS = []string{"*"}
bucketListResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketListResourceStatement.Actions = []string{"s3:ListBucket"}
if objectPrefix != "" {
bucketListResourceStatement.Conditions = map[string]map[string]string{
"StringEquals": {
"s3:prefix": objectPrefix,
},
}
}
objectResourceStatement.Effect = "Allow"
objectResourceStatement.Principal.AWS = []string{"*"}
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
objectResourceStatement.Actions = readOnlyObjectActions
// Save the read only policy.
statements = append(statements, *bucketResourceStatement, *bucketListResourceStatement, *objectResourceStatement)
return statements
}
testCases := []struct {
// inputs.
bucketName string
objectPrefix string
// expected result.
expectedStatements []Statement
}{
{"my-bucket", "", expectedReadOnlyStatement("my-bucket", "")},
{"my-bucket", "Asia/", expectedReadOnlyStatement("my-bucket", "Asia/")},
{"my-bucket", "Asia/India", expectedReadOnlyStatement("my-bucket", "Asia/India")},
}
for i, testCase := range testCases {
actualStaments := setReadOnlyStatement(testCase.bucketName, testCase.objectPrefix)
if !reflect.DeepEqual(testCase.expectedStatements, actualStaments) {
t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
}
}
}
// Tests validating write only statement generator.
func TestsetWriteOnlyStatement(t *testing.T) {
expectedWriteOnlyStatement := func(bucketName, objectPrefix string) []Statement {
bucketResourceStatement := &Statement{}
objectResourceStatement := &Statement{}
statements := []Statement{}
// Write only policy.
bucketResourceStatement.Effect = "Allow"
bucketResourceStatement.Principal.AWS = []string{"*"}
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketResourceStatement.Actions = writeOnlyBucketActions
objectResourceStatement.Effect = "Allow"
objectResourceStatement.Principal.AWS = []string{"*"}
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
objectResourceStatement.Actions = writeOnlyObjectActions
// Save the write only policy.
statements = append(statements, *bucketResourceStatement, *objectResourceStatement)
return statements
}
testCases := []struct {
// inputs.
bucketName string
objectPrefix string
// expected result.
expectedStatements []Statement
}{
{"my-bucket", "", expectedWriteOnlyStatement("my-bucket", "")},
{"my-bucket", "Asia/", expectedWriteOnlyStatement("my-bucket", "Asia/")},
{"my-bucket", "Asia/India", expectedWriteOnlyStatement("my-bucket", "Asia/India")},
}
for i, testCase := range testCases {
actualStaments := setWriteOnlyStatement(testCase.bucketName, testCase.objectPrefix)
if !reflect.DeepEqual(testCase.expectedStatements, actualStaments) {
t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
}
}
}
// Tests validating read-write statement generator.
func TestsetReadWriteStatement(t *testing.T) {
// Obtain statements for read-write BucketPolicy.
expectedReadWriteStatement := func(bucketName, objectPrefix string) []Statement {
bucketResourceStatement := &Statement{}
bucketListResourceStatement := &Statement{}
objectResourceStatement := &Statement{}
statements := []Statement{}
bucketResourceStatement.Effect = "Allow"
bucketResourceStatement.Principal.AWS = []string{"*"}
bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketResourceStatement.Actions = readWriteBucketActions
bucketListResourceStatement.Effect = "Allow"
bucketListResourceStatement.Principal.AWS = []string{"*"}
bucketListResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
bucketListResourceStatement.Actions = []string{"s3:ListBucket"}
if objectPrefix != "" {
bucketListResourceStatement.Conditions = map[string]map[string]string{
"StringEquals": {
"s3:prefix": objectPrefix,
},
}
}
objectResourceStatement.Effect = "Allow"
objectResourceStatement.Principal.AWS = []string{"*"}
objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
objectResourceStatement.Actions = readWriteObjectActions
// Save the read write policy.
statements = append(statements, *bucketResourceStatement, *bucketListResourceStatement, *objectResourceStatement)
return statements
}
testCases := []struct {
// inputs.
bucketName string
objectPrefix string
// expected result.
expectedStatements []Statement
}{
{"my-bucket", "", expectedReadWriteStatement("my-bucket", "")},
{"my-bucket", "Asia/", expectedReadWriteStatement("my-bucket", "Asia/")},
{"my-bucket", "Asia/India", expectedReadWriteStatement("my-bucket", "Asia/India")},
}
for i, testCase := range testCases {
actualStaments := setReadWriteStatement(testCase.bucketName, testCase.objectPrefix)
if !reflect.DeepEqual(testCase.expectedStatements, actualStaments) {
t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
}
}
}
// Tests validate Unmarshalling of BucketAccessPolicy.
func TestUnMarshalBucketPolicy(t *testing.T) {
bucketAccesPolicies := []BucketAccessPolicy{
{Version: "1.0"},
{Version: "1.0", Statements: setReadOnlyStatement("minio-bucket", "")},
{Version: "1.0", Statements: setReadWriteStatement("minio-bucket", "Asia/")},
{Version: "1.0", Statements: setWriteOnlyStatement("minio-bucket", "Asia/India/")},
}
testCases := []struct {
inputPolicy BucketAccessPolicy
// expected results.
expectedPolicy BucketAccessPolicy
err error
// Flag indicating whether the test should pass.
shouldPass bool
}{
{bucketAccesPolicies[0], bucketAccesPolicies[0], nil, true},
{bucketAccesPolicies[1], bucketAccesPolicies[1], nil, true},
{bucketAccesPolicies[2], bucketAccesPolicies[2], nil, true},
{bucketAccesPolicies[3], bucketAccesPolicies[3], nil, true},
}
for i, testCase := range testCases {
inputPolicyBytes, e := json.Marshal(testCase.inputPolicy)
if e != nil {
t.Fatalf("Test %d: Couldn't Marshal bucket policy", i+1)
}
actualAccessPolicy, err := unMarshalBucketPolicy(inputPolicyBytes)
if err != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
}
if err == nil && !testCase.shouldPass {
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
}
// Failed as expected, but does it fail for the expected reason.
if err != nil && !testCase.shouldPass {
if err.Error() != testCase.err.Error() {
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
}
}
// Test passes as expected, but the output values are verified for correctness here.
if err == nil && testCase.shouldPass {
if !reflect.DeepEqual(testCase.expectedPolicy, actualAccessPolicy) {
t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
}
}
}
}
// Statement.Action, Statement.Resource, Statement.Principal.AWS fields could be just string also.
// Setting these values to just a string and testing the unMarshalBucketPolicy
func TestUnMarshalBucketPolicyUntyped(t *testing.T) {
obtainRaw := func(v interface{}, t *testing.T) []byte {
rawData, err := json.Marshal(v)
if err != nil {
t.Fatal(err)
}
return rawData
}
type untypedStatement struct {
Sid string
Effect string
Principal struct {
AWS json.RawMessage
}
Action json.RawMessage
Resource json.RawMessage
Condition map[string]map[string]string
}
type bucketAccessPolicyUntyped struct {
Version string
Statement []untypedStatement
}
statements := setReadOnlyStatement("my-bucket", "Asia/")
expectedBucketPolicy := BucketAccessPolicy{Statements: statements}
accessPolicyUntyped := bucketAccessPolicyUntyped{}
accessPolicyUntyped.Statement = make([]untypedStatement, len(statements))
accessPolicyUntyped.Statement[0].Effect = statements[0].Effect
accessPolicyUntyped.Statement[0].Principal.AWS = obtainRaw(statements[0].Principal.AWS[0], t)
accessPolicyUntyped.Statement[0].Action = obtainRaw(statements[0].Actions, t)
accessPolicyUntyped.Statement[0].Resource = obtainRaw(statements[0].Resources, t)
accessPolicyUntyped.Statement[1].Effect = statements[1].Effect
accessPolicyUntyped.Statement[1].Principal.AWS = obtainRaw(statements[1].Principal.AWS[0], t)
accessPolicyUntyped.Statement[1].Action = obtainRaw(statements[1].Actions, t)
accessPolicyUntyped.Statement[1].Resource = obtainRaw(statements[1].Resources, t)
accessPolicyUntyped.Statement[1].Condition = statements[1].Conditions
// Setting the values are strings.
accessPolicyUntyped.Statement[2].Effect = statements[2].Effect
accessPolicyUntyped.Statement[2].Principal.AWS = obtainRaw(statements[2].Principal.AWS[0], t)
accessPolicyUntyped.Statement[2].Action = obtainRaw(statements[2].Actions[0], t)
accessPolicyUntyped.Statement[2].Resource = obtainRaw(statements[2].Resources[0], t)
inputPolicyBytes := obtainRaw(accessPolicyUntyped, t)
actualAccessPolicy, err := unMarshalBucketPolicy(inputPolicyBytes)
if err != nil {
t.Fatal("Unmarshalling bucket policy from untyped statements failed")
}
if !reflect.DeepEqual(expectedBucketPolicy, actualAccessPolicy) {
t.Errorf("Expected BucketPolicy after unmarshalling untyped statements doesn't match the actual one")
}
}
// Tests validate whether access policy is defined for the given object prefix
func TestIsPolicyDefinedForObjectPrefix(t *testing.T) {
testCases := []struct {
bucketName string
objectPrefix string
inputStatements []Statement
expectedResult bool
}{
{"my-bucket", "abc/", setReadOnlyStatement("my-bucket", "abc/"), true},
{"my-bucket", "abc/", setReadOnlyStatement("my-bucket", "ab/"), false},
{"my-bucket", "abc/", setReadOnlyStatement("my-bucket", "abcde"), false},
{"my-bucket", "abc/", setReadOnlyStatement("my-bucket", "abc/de"), false},
{"my-bucket", "abc", setReadOnlyStatement("my-bucket", "abc"), true},
{"bucket", "", setReadOnlyStatement("bucket", "abc/"), false},
}
for i, testCase := range testCases {
actualResult := isPolicyDefinedForObjectPrefix(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
if actualResult != testCase.expectedResult {
t.Errorf("Test %d: Expected isPolicyDefinedForObjectPrefix to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
}
}
}
// Tests validate removal of policy statement from the list of statements.
func TestRemoveBucketPolicyStatement(t *testing.T) {
var emptyStatement []Statement
testCases := []struct {
bucketName string
objectPrefix string
inputStatements []Statement
expectedStatements []Statement
}{
{"my-bucket", "", nil, emptyStatement},
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), emptyStatement},
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), emptyStatement},
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), emptyStatement},
{"my-bucket", "abcd", setReadOnlyStatement("my-bucket", "abc"), setReadOnlyStatement("my-bucket", "abc")},
{"my-bucket", "abc/de", setReadOnlyStatement("my-bucket", "abc/"), setReadOnlyStatement("my-bucket", "abc/")},
{"my-bucket", "abcd", setWriteOnlyStatement("my-bucket", "abc"), setWriteOnlyStatement("my-bucket", "abc")},
{"my-bucket", "abc/de", setWriteOnlyStatement("my-bucket", "abc/"), setWriteOnlyStatement("my-bucket", "abc/")},
{"my-bucket", "abcd", setReadWriteStatement("my-bucket", "abc"), setReadWriteStatement("my-bucket", "abc")},
{"my-bucket", "abc/de", setReadWriteStatement("my-bucket", "abc/"), setReadWriteStatement("my-bucket", "abc/")},
}
for i, testCase := range testCases {
actualStatements := removeBucketPolicyStatement(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
}
}
}
// Tests validate removing of read only bucket statement.
func TestRemoveBucketPolicyStatementReadOnly(t *testing.T) {
var emptyStatement []Statement
testCases := []struct {
bucketName string
objectPrefix string
inputStatements []Statement
expectedStatements []Statement
}{
{"my-bucket", "", []Statement{}, emptyStatement},
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), emptyStatement},
{"read-only-bucket", "abc/", setReadOnlyStatement("read-only-bucket", "abc/"), emptyStatement},
{"my-bucket", "abc/", append(setReadOnlyStatement("my-bucket", "abc/"), setReadOnlyStatement("my-bucket", "def/")...), setReadOnlyStatement("my-bucket", "def/")},
}
for i, testCase := range testCases {
actualStatements := removeBucketPolicyStatementReadOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
}
}
}
// Tests validate removing of write only bucket statement.
func TestRemoveBucketPolicyStatementWriteOnly(t *testing.T) {
var emptyStatement []Statement
testCases := []struct {
bucketName string
objectPrefix string
inputStatements []Statement
expectedStatements []Statement
}{
{"my-bucket", "", []Statement{}, emptyStatement},
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), emptyStatement},
{"write-only-bucket", "abc/", setWriteOnlyStatement("write-only-bucket", "abc/"), emptyStatement},
{"my-bucket", "abc/", append(setWriteOnlyStatement("my-bucket", "abc/"), setWriteOnlyStatement("my-bucket", "def/")...), setWriteOnlyStatement("my-bucket", "def/")},
}
for i, testCase := range testCases {
actualStatements := removeBucketPolicyStatementWriteOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
}
}
}
// Tests validate removing of read-write bucket statement.
func TestRemoveBucketPolicyStatementReadWrite(t *testing.T) {
var emptyStatement []Statement
testCases := []struct {
bucketName string
objectPrefix string
inputStatements []Statement
expectedStatements []Statement
}{
{"my-bucket", "", []Statement{}, emptyStatement},
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), emptyStatement},
{"read-write-bucket", "abc/", setReadWriteStatement("read-write-bucket", "abc/"), emptyStatement},
{"my-bucket", "abc/", append(setReadWriteStatement("my-bucket", "abc/"), setReadWriteStatement("my-bucket", "def/")...), setReadWriteStatement("my-bucket", "def/")},
}
for i, testCase := range testCases {
actualStatements := removeBucketPolicyStatementReadWrite(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
}
}
}
// Tests validate Bucket policy resource matcher.
func TestBucketPolicyResourceMatch(t *testing.T) {
// generates\ statement with given resource..
generateStatement := func(resource string) Statement {
statement := Statement{}
statement.Resources = []string{resource}
return statement
}
// generates resource prefix.
generateResource := func(bucketName, objectName string) string {
return awsResourcePrefix + bucketName + "/" + objectName
}
testCases := []struct {
resourceToMatch string
statement Statement
expectedResourceMatch bool
}{
// Test case 1-4.
// Policy with resource ending with bucket/* allows access to all objects inside the given bucket.
{generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
{generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
{generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
{generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
// Test case - 5.
// Policy with resource ending with bucket/oo* should not allow access to bucket/output.txt.
{generateResource("minio-bucket", "output.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), false},
// Test case - 6.
// Policy with resource ending with bucket/oo* should allow access to bucket/ootput.txt.
{generateResource("minio-bucket", "ootput.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), true},
// Test case - 7.
// Policy with resource ending with bucket/oo* allows access to all subfolders starting with "oo" inside given bucket.
{generateResource("minio-bucket", "oop-bucket/my-file"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), true},
// Test case - 8.
{generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/Asia/Japan/*")), false},
// Test case - 9.
{generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/Asia/Japan/*")), false},
// Test case - 10.
// Proves that the name space is flat.
{generateResource("minio-bucket", "Africa/Bihar/India/design_info.doc/Bihar"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix,
"minio-bucket"+"/*/India/*/Bihar")), true},
// Test case - 11.
// Proves that the name space is flat.
{generateResource("minio-bucket", "Asia/China/India/States/Bihar/output.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix,
"minio-bucket"+"/*/India/*/Bihar/*")), true},
}
for i, testCase := range testCases {
actualResourceMatch := resourceMatch(testCase.statement.Resources[0], testCase.resourceToMatch)
if testCase.expectedResourceMatch != actualResourceMatch {
t.Errorf("Test %d: Expected Resource match to be `%v`, but instead found it to be `%v`", i+1, testCase.expectedResourceMatch, actualResourceMatch)
}
}
}
// Tests validate whether the bucket policy is read only.
func TestIsBucketPolicyReadOnly(t *testing.T) {
testCases := []struct {
bucketName string
objectPrefix string
inputStatements []Statement
// expected result.
expectedResult bool
}{
{"my-bucket", "", []Statement{}, false},
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), true},
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), false},
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
{"my-bucket", "abc", setReadOnlyStatement("my-bucket", ""), true},
{"my-bucket", "abc", setReadOnlyStatement("my-bucket", "abc"), true},
{"my-bucket", "abcde", setReadOnlyStatement("my-bucket", "abc"), true},
{"my-bucket", "abc/d", setReadOnlyStatement("my-bucket", "abc/"), true},
{"my-bucket", "abc", setWriteOnlyStatement("my-bucket", ""), false},
}
for i, testCase := range testCases {
actualResult := isBucketPolicyReadOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
if testCase.expectedResult != actualResult {
t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
}
}
}
// Tests validate whether the bucket policy is read-write.
func TestIsBucketPolicyReadWrite(t *testing.T) {
testCases := []struct {
bucketName string
objectPrefix string
inputStatements []Statement
// expected result.
expectedResult bool
}{
{"my-bucket", "", []Statement{}, false},
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), false},
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), false},
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
{"my-bucket", "abc", setReadWriteStatement("my-bucket", ""), true},
{"my-bucket", "abc", setReadWriteStatement("my-bucket", "abc"), true},
{"my-bucket", "abcde", setReadWriteStatement("my-bucket", "abc"), true},
{"my-bucket", "abc/d", setReadWriteStatement("my-bucket", "abc/"), true},
}
for i, testCase := range testCases {
actualResult := isBucketPolicyReadWrite(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
if testCase.expectedResult != actualResult {
t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
}
}
}
// Tests validate whether the bucket policy is read only.
func TestIsBucketPolicyWriteOnly(t *testing.T) {
testCases := []struct {
bucketName string
objectPrefix string
inputStatements []Statement
// expected result.
expectedResult bool
}{
{"my-bucket", "", []Statement{}, false},
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), false},
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), true},
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
{"my-bucket", "abc", setWriteOnlyStatement("my-bucket", ""), true},
{"my-bucket", "abc", setWriteOnlyStatement("my-bucket", "abc"), true},
{"my-bucket", "abcde", setWriteOnlyStatement("my-bucket", "abc"), true},
{"my-bucket", "abc/d", setWriteOnlyStatement("my-bucket", "abc/"), true},
{"my-bucket", "abc", setReadOnlyStatement("my-bucket", ""), false},
}
for i, testCase := range testCases {
actualResult := isBucketPolicyWriteOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
if testCase.expectedResult != actualResult {
t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
}
}
}

View File

@ -40,3 +40,7 @@ const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
// optimalReadBufferSize - optimal buffer 5MiB used for reading // optimalReadBufferSize - optimal buffer 5MiB used for reading
// through Read operation. // through Read operation.
const optimalReadBufferSize = 1024 * 1024 * 5 const optimalReadBufferSize = 1024 * 1024 * 5
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
// we don't want to sign the request payload
const unsignedPayload = "UNSIGNED-PAYLOAD"

View File

@ -91,33 +91,19 @@ Creates a new bucket.
__Parameters__ __Parameters__
| Param | Type | Description |
<table> |---|---|---|
<thead> |`bucketName` | _string_ | Name of the bucket. |
<tr> | `location` | _string_ | Default value is us-east-1 Region where the bucket is created. Valid values are listed below:|
<th>Param</th> | | |us-east-1 |
<th>Type</th> | | |us-west-1 |
<th>Description</th> | | |us-west-2 |
</tr> | | |eu-west-1 |
</thead> | | | eu-central-1|
<tbody> | | | ap-southeast-1|
<tr> | | | ap-northeast-1|
<td> <code> bucketName </code></td> | | | ap-southeast-2|
<td> <i> string </i> </td> | | | sa-east-1|
<td> name of the bucket </td>
</tr>
<tr>
<td>
<code> location </code>
</td>
<td> <i> string </i> </td>
<td> Default value is <i>us-east-1</i> <br/>
Region valid values are: [ <i> us-west-1, us-west-2, eu-west-1, eu-central-1, ap-southeast-1, ap-northeast-1, ap-southeast-2, sa-east-1 </i> ].
</td>
</tr>
</tbody>
</table>
__Example__ __Example__
@ -139,30 +125,15 @@ fmt.Println("Successfully created mybucket.")
Lists all buckets. Lists all buckets.
| Param | Type | Description |
|---|---|---|
|`bucketList` | _[]BucketInfo_ | Lists bucket in following format shown below: |
<table>
<thead> | Param | Type | Description |
<tr> |---|---|---|
<th>Param</th> |`bucket.Name` | _string_ | bucket name. |
<th>Type</th> |`bucket.CreationDate` | _time.Time_ | date when bucket was created. |
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<code> bucketList </code>
</td>
<td> <i> []BucketInfo </i> </td>
<td>
<ul>Lists bucket in following format:
<li> <code>bucket.Name</code> <i>string</i>: bucket name.</li>
<li> <code>bucket.CreationDate</code> <i>time.Time</i> : date when bucket was created.</li>
</ul>
</td>
</tr>
</tbody>
</table>
__Example__ __Example__
@ -250,32 +221,16 @@ __Parameters__
__Return Value__ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: |
<table> |Param |Type |Description |
<thead> |:---|:---| :---|
<tr> |`objectInfo.Key` | _string_ |name of the object. |
<th>Param</th> |`objectInfo.Size` | _int64_ |size of the object. |
<th>Type</th> |`objectInfo.ETag` | _string_ |etag of the object. |
<th>Description</th> |`objectInfo.LastModified` | _time.Time_ |modified time stamp. |
</tr>
</thead>
<tbody>
<tr>
<td>
<code> chan ObjectInfo </code>
</td>
<td> <i> chan ObjectInfo </i> </td>
<td>
<ul>Read channel for all the objects in the bucket, the object is of the format:
<li> <code>objectInfo.Key</code> <i>string</i>: name of the object.</li>
<li> <code>objectInfo.Size</code> <i>int64</i>: size of the object.</li>
<li> <code>objectInfo.ETag</code> <i>string</i>: etag of the object. </li>
<li> <code>objectInfo.LastModified</code> <i>time.Time</i>: modified time stamp.</li>
</ul>
</td>
</tr>
</tbody>
</table>
```go ```go
@ -317,32 +272,16 @@ __Parameters__
__Return Value__ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: |
<table> |Param |Type |Description |
<thead> |:---|:---| :---|
<tr> |`objectInfo.Key` | _string_ |name of the object. |
<th>Param</th> |`objectInfo.Size` | _int64_ |size of the object. |
<th>Type</th> |`objectInfo.ETag` | _string_ |etag of the object. |
<th>Description</th> |`objectInfo.LastModified` | _time.Time_ |modified time stamp. |
</tr>
</thead>
<tbody>
<tr>
<td>
<code> chan ObjectInfo </code>
</td>
<td> <i> chan ObjectInfo </i> </td>
<td>
<ul>Read channel for all the objects in the bucket, the object is of the format:
<li> <code>objectInfo.Key</code> string: name of the object.</li>
<li> <code>objectInfo.Size</code> int64: size of the object.</li>
<li> <code>objectInfo.ETag</code> string: etag of the object. </li>
<li> <code>objectInfo.LastModified</code> time.Time: modified time stamp.</li>
</ul>
</td>
</tr>
</tbody>
</table>
```go ```go
@ -384,32 +323,17 @@ __Parameters__
__Return Value__ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
|`chan ObjectMultipartInfo` | _chan ObjectMultipartInfo_ |emits multipart objects of the format listed below: |
<table> __Return Value__
<thead>
<tr>
<th>Param</th>
<th>Type</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<code> chan ObjectMultipartInfo </code>
</td>
<td> <i> chan ObjectMultipartInfo </i> </td>
<td>
<ul>emits multipart objects of the format:
<li> <code>multiPartObjInfo.Key</code> <i>string</i>: name of the incomplete object.</li>
<li> <code>multiPartObjInfo.UploadID</code> <i>string</i>: upload ID of the incomplete object.</li>
<li> <code>multiPartObjInfo.Size</code> <i>int64</i>: size of the incompletely uploaded object.</li>
</ul>
</td>
</tr>
</tbody>
</table>
|Param |Type |Description |
|:---|:---| :---|
|`multiPartObjInfo.Key` | _string_ |name of the incomplete object. |
|`multiPartObjInfo.UploadID` | _string_ |upload ID of the incomplete object.|
|`multiPartObjInfo.Size` | _int64_ |size of the incompletely uploaded object.|
__Example__ __Example__
@ -489,7 +413,6 @@ if _, err = io.Copy(localFile, object); err != nil {
__Parameters__ __Parameters__
|Param |Type |Description | |Param |Type |Description |
|:---|:---| :---| |:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. | |`bucketName` | _string_ |name of the bucket. |
@ -650,32 +573,17 @@ __Parameters__
__Return Value__ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
|`objInfo` | _ObjectInfo_ |object stat info for format listed below: |
<table>
<thead> |Param |Type |Description |
<tr> |:---|:---| :---|
<th>Param</th> |`objInfo.LastModified` | _time.Time_ |modified time stamp. |
<th>Type</th> |`objInfo.ETag` | _string_ |etag of the object.|
<th>Description</th> |`objInfo.ContentType` | _string_ |Content-Type of the object.|
</tr> |`objInfo.Size` | _int64_ |size of the object.|
</thead>
<tbody>
<tr>
<td>
<code> objInfo </code>
</td>
<td> <i> ObjectInfo </i> </td>
<td>
<ul>object stat info for following format:
<li> <code>objInfo.Size</code> <i>int64</i>: size of the object.</li>
<li> <code>objInfo.ETag</code> <i>string</i>: etag of the object.</li>
<li> <code>objInfo.ContentType</code> <i>string</i>: Content-Type of the object.</li>
<li> <code>objInfo.LastModified</code> <i>time.Time</i>: modified time stamp</li>
</ul>
</td>
</tr>
</tbody>
</table>
__Example__ __Example__
@ -860,14 +768,12 @@ POST your content from the command line using `curl`:
```go ```go
fmt.Printf("curl ") fmt.Printf("curl ")
for k, v := range formData { for k, v := range formData {
fmt.Printf("-F %s=%s ", k, v) fmt.Printf("-F %s=%s ", k, v)
} }
fmt.Printf("-F file=@/etc/bash.bashrc ") fmt.Printf("-F file=@/etc/bash.bashrc ")
fmt.Printf("%s\n", url) fmt.Printf("%s\n", url)
``` ```
## 5. Bucket policy/notification operations ## 5. Bucket policy/notification operations
@ -880,41 +786,15 @@ Set access permissions on bucket or an object prefix.
__Parameters__ __Parameters__
<table> |Param |Type |Description |
<thead> |:---|:---| :---|
<tr> |`bucketName` | _string_ |name of the bucket.|
<th>Param</th> |`objectPrefix` | _string_ |name of the object prefix.|
<th>Type</th> |`policy` | _BucketPolicy_ |policy can be:|
<th>Description</th> || |BucketPolicyNone|
</tr> | | |BucketPolicyReadOnly|
</thead> || |BucketPolicyReadWrite|
<tbody> | | |BucketPolicyWriteOnly|
<tr>
<td> <code> bucketName </code> </td>
<td> <i> string </i> </td>
<td>name of the bucket</td>
</tr>
<tr>
<td> <code> objectPrefix </code> </td>
<td> <i> string </i> </td>
<td>name of the object prefix</td>
</tr>
<tr>
<td>
<code> policy </code>
</td>
<td> <i> BucketPolicy </i> </td>
<td>
<ul>policy can be <br/>
<li> <i>BucketPolicyNone</i>,</li>
<li> <i>BucketPolicyReadOnly</i>,</li>
<li> <i>BucketPolicyReadWrite</i>,</li>
<li> <i>BucketPolicyWriteOnly</li>
</ul>
</td>
</tr>
</tbody>
</table>
__Return Values__ __Return Values__
@ -1034,19 +914,20 @@ __Example__
topicArn := NewArn("aws", "s3", "us-east-1", "804605494417", "PhotoUpdate") topicArn := NewArn("aws", "s3", "us-east-1", "804605494417", "PhotoUpdate")
topicConfig := NewNotificationConfig(topicArn) topicConfig := NewNotificationConfig(topicArn)
topicConfig.AddEvents(ObjectCreatedAll, ObjectRemovedAll) topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
topicConfig.AddFilterSuffix(".jpg") lambdaConfig.AddFilterPrefix("photos/")
lambdaConfig.AddFilterSuffix(".jpg")
bucketNotification := BucketNotification{} bucketNotification := BucketNotification{}
bucetNotification.AddTopic(topicConfig) bucketNotification.AddTopic(topicConfig)
err := c.SetBucketNotification(bucketName, bucketNotification) err := c.SetBucketNotification(bucketName, bucketNotification)
if err != nil { if err != nil {
fmt.Println("Cannot set the bucket notification: " + err) fmt.Println("Cannot set the bucket notification: " + err)
} }
``` ```
<a name="DeleteBucketNotification"></a> <a name="RemoveAllBucketNotification"></a>
### DeleteBucketNotification(bucketName string) error ### RemoveAllBucketNotification(bucketName string) error
Remove all configured bucket notifications on a bucket. Remove all configured bucket notifications on a bucket.
@ -1068,32 +949,88 @@ __Example__
```go ```go
err := c.RemoveBucketNotification(bucketName) err := c.RemoveAllBucketNotification(bucketName)
if err != nil { if err != nil {
fmt.Println("Cannot remove bucket notifications.") fmt.Println("Cannot remove bucket notifications.")
} }
``` ```
<a name="ListenBucketNotification"></a>
### ListenBucketNotification(bucketName string, accountArn Arn, doneCh chan<- struct{}) <-chan NotificationInfo
ListenBucketNotification API receives bucket notification events through the
notification channel. The returned notification channel has two fields
'Records' and 'Err'.
- 'Records' holds the notifications received from the server.
- 'Err' indicates any error while processing the received notifications.
NOTE: Notification channel is closed at the first occurrence of an error.
__Parameters__
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ | Bucket to listen notifications from. |
|`accountArn` | _Arn_ | Unique account ID to listen notifications for. |
|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenBucketNotification loop. |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
|`chan NotificationInfo` | _chan_ | Read channel for all notificatons on bucket. |
|`NotificationInfo` | _object_ | Notification object represents events info. |
|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events. |
|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation. |
__Example__
```go
// Create a done channel to control 'ListenBucketNotification' go routine.
doneCh := make(chan struct{})
// Indicate a background go-routine to exit cleanly upon return.
defer close(doneCh)
// Fetch the bucket location.
location, err := minioClient.GetBucketLocation("YOUR-BUCKET")
if err != nil {
log.Fatalln(err)
}
// Construct a new account Arn.
accountArn := minio.NewArn("minio", "sns", location, "your-account-id", "listen")
topicConfig := minio.NewNotificationConfig(accountArn)
topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
topicConfig.AddFilterPrefix("photos/")
topicConfig.AddFilterSuffix(".jpg")
// Now, set all previously created notification configs
bucketNotification := minio.BucketNotification{}
bucketNotification.AddTopic(topicConfig)
err = s3Client.SetBucketNotification("YOUR-BUCKET", bucketNotification)
if err != nil {
log.Fatalln("Error: " + err.Error())
}
log.Println("Success")
// Listen for bucket notifications on "mybucket" filtered by accountArn "arn:minio:sns:<location>:<your-account-id>:listen".
for notificationInfo := range s3Client.ListenBucketNotification("mybucket", accountArn, doneCh) {
if notificationInfo.Err != nil {
fmt.Println(notificationInfo.Err)
return
}
fmt.Println(notificationInfo)
}
```
## 6. Explore Further ## 6. Explore Further
- [Build your own Go Music Player App example](https://docs.minio.io/docs/go-music-player-app) - [Build your own Go Music Player App example](https://docs.minio.io/docs/go-music-player-app)

View File

@ -0,0 +1,78 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
minioClient, err := minio.New("play.minio.io:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
// s3Client.TraceOn(os.Stderr)
// Create a done channel to control 'ListenBucketNotification' go routine.
doneCh := make(chan struct{})
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
// Fetch the bucket location.
location, err := minioClient.GetBucketLocation("YOUR-BUCKET")
if err != nil {
log.Fatalln(err)
}
// Construct a new account Arn.
accountArn := minio.NewArn("minio", "sns", location, "your-account-id", "listen")
topicConfig := minio.NewNotificationConfig(accountArn)
topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
topicConfig.AddFilterPrefix("photos/")
topicConfig.AddFilterSuffix(".jpg")
// Now, set all previously created notification configs
bucketNotification := minio.BucketNotification{}
bucketNotification.AddTopic(topicConfig)
err = minioClient.SetBucketNotification("YOUR-BUCKET", bucketNotification)
if err != nil {
log.Fatalln("Error: " + err.Error())
}
log.Println("Success")
// Listen for bucket notifications on "mybucket" filtered by accountArn "arn:minio:sns:<location>:<your-account-id>:listen".
for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", accountArn, doneCh) {
if notificationInfo.Err != nil {
log.Fatalln(notificationInfo.Err)
}
log.Println(notificationInfo)
}
}

View File

@ -21,8 +21,8 @@ package main
import ( import (
"log" "log"
"github.com/cheggaaa/pb"
"github.com/minio/minio-go" "github.com/minio/minio-go"
"github.com/minio/pb"
) )
func main() { func main() {

View File

@ -40,7 +40,7 @@ func main() {
// s3Client.TraceOn(os.Stderr) // s3Client.TraceOn(os.Stderr)
err = s3Client.DeleteBucketNotification("my-bucketname") err = s3Client.RemoveAllBucketNotification("my-bucketname")
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View File

@ -0,0 +1,115 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package policy
import "github.com/minio/minio-go/pkg/set"
// ConditionKeyMap - map of policy condition key and value.
type ConditionKeyMap map[string]set.StringSet
// Add - adds key and value. The value is appended If key already exists.
func (ckm ConditionKeyMap) Add(key string, value set.StringSet) {
if v, ok := ckm[key]; ok {
ckm[key] = v.Union(value)
} else {
ckm[key] = set.CopyStringSet(value)
}
}
// Remove - removes value of given key. If key has empty after removal, the key is also removed.
func (ckm ConditionKeyMap) Remove(key string, value set.StringSet) {
if v, ok := ckm[key]; ok {
if value != nil {
ckm[key] = v.Difference(value)
}
if ckm[key].IsEmpty() {
delete(ckm, key)
}
}
}
// RemoveKey - removes key and its value.
func (ckm ConditionKeyMap) RemoveKey(key string) {
if _, ok := ckm[key]; ok {
delete(ckm, key)
}
}
// CopyConditionKeyMap - returns new copy of given ConditionKeyMap.
func CopyConditionKeyMap(condKeyMap ConditionKeyMap) ConditionKeyMap {
out := make(ConditionKeyMap)
for k, v := range condKeyMap {
out[k] = set.CopyStringSet(v)
}
return out
}
// mergeConditionKeyMap - returns a new ConditionKeyMap which contains merged key/value of given two ConditionKeyMap.
func mergeConditionKeyMap(condKeyMap1 ConditionKeyMap, condKeyMap2 ConditionKeyMap) ConditionKeyMap {
out := CopyConditionKeyMap(condKeyMap1)
for k, v := range condKeyMap2 {
if ev, ok := out[k]; ok {
out[k] = ev.Union(v)
} else {
out[k] = set.CopyStringSet(v)
}
}
return out
}
// ConditionMap - map of condition and conditional values.
type ConditionMap map[string]ConditionKeyMap
// Add - adds condition key and condition value. The value is appended if key already exists.
func (cond ConditionMap) Add(condKey string, condKeyMap ConditionKeyMap) {
if v, ok := cond[condKey]; ok {
cond[condKey] = mergeConditionKeyMap(v, condKeyMap)
} else {
cond[condKey] = CopyConditionKeyMap(condKeyMap)
}
}
// Remove - removes condition key and its value.
func (cond ConditionMap) Remove(condKey string) {
if _, ok := cond[condKey]; ok {
delete(cond, condKey)
}
}
// mergeConditionMap - returns new ConditionMap which contains merged key/value of two ConditionMap.
func mergeConditionMap(condMap1 ConditionMap, condMap2 ConditionMap) ConditionMap {
out := make(ConditionMap)
for k, v := range condMap1 {
out[k] = CopyConditionKeyMap(v)
}
for k, v := range condMap2 {
if ev, ok := out[k]; ok {
out[k] = mergeConditionKeyMap(ev, v)
} else {
out[k] = CopyConditionKeyMap(v)
}
}
return out
}

View File

@ -0,0 +1,289 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package policy
import (
"encoding/json"
"testing"
"github.com/minio/minio-go/pkg/set"
)
// ConditionKeyMap.Add() is called and the result is validated.
func TestConditionKeyMapAdd(t *testing.T) {
condKeyMap := make(ConditionKeyMap)
testCases := []struct {
key string
value set.StringSet
expectedResult string
}{
// Add new key and value.
{"s3:prefix", set.CreateStringSet("hello"), `{"s3:prefix":["hello"]}`},
// Add existing key and value.
{"s3:prefix", set.CreateStringSet("hello"), `{"s3:prefix":["hello"]}`},
// Add existing key and not value.
{"s3:prefix", set.CreateStringSet("world"), `{"s3:prefix":["hello","world"]}`},
}
for _, testCase := range testCases {
condKeyMap.Add(testCase.key, testCase.value)
if data, err := json.Marshal(condKeyMap); err != nil {
t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
} else {
if string(data) != testCase.expectedResult {
t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
}
}
}
}
// ConditionKeyMap.Remove() is called and the result is validated.
func TestConditionKeyMapRemove(t *testing.T) {
condKeyMap := make(ConditionKeyMap)
condKeyMap.Add("s3:prefix", set.CreateStringSet("hello", "world"))
testCases := []struct {
key string
value set.StringSet
expectedResult string
}{
// Remove non-existent key and value.
{"s3:myprefix", set.CreateStringSet("hello"), `{"s3:prefix":["hello","world"]}`},
// Remove existing key and value.
{"s3:prefix", set.CreateStringSet("hello"), `{"s3:prefix":["world"]}`},
// Remove existing key to make the key also removed.
{"s3:prefix", set.CreateStringSet("world"), `{}`},
}
for _, testCase := range testCases {
condKeyMap.Remove(testCase.key, testCase.value)
if data, err := json.Marshal(condKeyMap); err != nil {
t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
} else {
if string(data) != testCase.expectedResult {
t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
}
}
}
}
// ConditionKeyMap.RemoveKey() is called and the result is validated.
func TestConditionKeyMapRemoveKey(t *testing.T) {
condKeyMap := make(ConditionKeyMap)
condKeyMap.Add("s3:prefix", set.CreateStringSet("hello", "world"))
testCases := []struct {
key string
expectedResult string
}{
// Remove non-existent key.
{"s3:myprefix", `{"s3:prefix":["hello","world"]}`},
// Remove existing key.
{"s3:prefix", `{}`},
}
for _, testCase := range testCases {
condKeyMap.RemoveKey(testCase.key)
if data, err := json.Marshal(condKeyMap); err != nil {
t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
} else {
if string(data) != testCase.expectedResult {
t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
}
}
}
}
// CopyConditionKeyMap() is called and the result is validated.
func TestCopyConditionKeyMap(t *testing.T) {
emptyCondKeyMap := make(ConditionKeyMap)
nonEmptyCondKeyMap := make(ConditionKeyMap)
nonEmptyCondKeyMap.Add("s3:prefix", set.CreateStringSet("hello", "world"))
testCases := []struct {
condKeyMap ConditionKeyMap
expectedResult string
}{
// To test empty ConditionKeyMap.
{emptyCondKeyMap, `{}`},
// To test non-empty ConditionKeyMap.
{nonEmptyCondKeyMap, `{"s3:prefix":["hello","world"]}`},
}
for _, testCase := range testCases {
condKeyMap := CopyConditionKeyMap(testCase.condKeyMap)
if data, err := json.Marshal(condKeyMap); err != nil {
t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
} else {
if string(data) != testCase.expectedResult {
t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
}
}
}
}
// mergeConditionKeyMap() is called and the result is validated.
func TestMergeConditionKeyMap(t *testing.T) {
condKeyMap1 := make(ConditionKeyMap)
condKeyMap1.Add("s3:prefix", set.CreateStringSet("hello"))
condKeyMap2 := make(ConditionKeyMap)
condKeyMap2.Add("s3:prefix", set.CreateStringSet("world"))
condKeyMap3 := make(ConditionKeyMap)
condKeyMap3.Add("s3:myprefix", set.CreateStringSet("world"))
testCases := []struct {
condKeyMap1 ConditionKeyMap
condKeyMap2 ConditionKeyMap
expectedResult string
}{
// Both arguments are empty.
{make(ConditionKeyMap), make(ConditionKeyMap), `{}`},
// First argument is empty.
{make(ConditionKeyMap), condKeyMap1, `{"s3:prefix":["hello"]}`},
// Second argument is empty.
{condKeyMap1, make(ConditionKeyMap), `{"s3:prefix":["hello"]}`},
// Both arguments are same value.
{condKeyMap1, condKeyMap1, `{"s3:prefix":["hello"]}`},
// Value of second argument will be merged.
{condKeyMap1, condKeyMap2, `{"s3:prefix":["hello","world"]}`},
// second argument will be added.
{condKeyMap1, condKeyMap3, `{"s3:myprefix":["world"],"s3:prefix":["hello"]}`},
}
for _, testCase := range testCases {
condKeyMap := mergeConditionKeyMap(testCase.condKeyMap1, testCase.condKeyMap2)
if data, err := json.Marshal(condKeyMap); err != nil {
t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
} else {
if string(data) != testCase.expectedResult {
t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
}
}
}
}
// ConditionMap.Add() is called and the result is validated.
func TestConditionMapAdd(t *testing.T) {
condMap := make(ConditionMap)
condKeyMap1 := make(ConditionKeyMap)
condKeyMap1.Add("s3:prefix", set.CreateStringSet("hello"))
condKeyMap2 := make(ConditionKeyMap)
condKeyMap2.Add("s3:prefix", set.CreateStringSet("hello", "world"))
testCases := []struct {
key string
value ConditionKeyMap
expectedResult string
}{
// Add new key and value.
{"StringEquals", condKeyMap1, `{"StringEquals":{"s3:prefix":["hello"]}}`},
// Add existing key and value.
{"StringEquals", condKeyMap1, `{"StringEquals":{"s3:prefix":["hello"]}}`},
// Add existing key and not value.
{"StringEquals", condKeyMap2, `{"StringEquals":{"s3:prefix":["hello","world"]}}`},
}
for _, testCase := range testCases {
condMap.Add(testCase.key, testCase.value)
if data, err := json.Marshal(condMap); err != nil {
t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
} else {
if string(data) != testCase.expectedResult {
t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
}
}
}
}
// ConditionMap.Remove() is called and the result is validated.
func TestConditionMapRemove(t *testing.T) {
condMap := make(ConditionMap)
condKeyMap := make(ConditionKeyMap)
condKeyMap.Add("s3:prefix", set.CreateStringSet("hello", "world"))
condMap.Add("StringEquals", condKeyMap)
testCases := []struct {
key string
expectedResult string
}{
// Remove non-existent key.
{"StringNotEquals", `{"StringEquals":{"s3:prefix":["hello","world"]}}`},
// Remove existing key.
{"StringEquals", `{}`},
}
for _, testCase := range testCases {
condMap.Remove(testCase.key)
if data, err := json.Marshal(condMap); err != nil {
t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
} else {
if string(data) != testCase.expectedResult {
t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
}
}
}
}
// mergeConditionMap() is called and the result is validated.
func TestMergeConditionMap(t *testing.T) {
condKeyMap1 := make(ConditionKeyMap)
condKeyMap1.Add("s3:prefix", set.CreateStringSet("hello"))
condMap1 := make(ConditionMap)
condMap1.Add("StringEquals", condKeyMap1)
condKeyMap2 := make(ConditionKeyMap)
condKeyMap2.Add("s3:prefix", set.CreateStringSet("world"))
condMap2 := make(ConditionMap)
condMap2.Add("StringEquals", condKeyMap2)
condMap3 := make(ConditionMap)
condMap3.Add("StringNotEquals", condKeyMap2)
testCases := []struct {
condMap1 ConditionMap
condMap2 ConditionMap
expectedResult string
}{
// Both arguments are empty.
{make(ConditionMap), make(ConditionMap), `{}`},
// First argument is empty.
{make(ConditionMap), condMap1, `{"StringEquals":{"s3:prefix":["hello"]}}`},
// Second argument is empty.
{condMap1, make(ConditionMap), `{"StringEquals":{"s3:prefix":["hello"]}}`},
// Both arguments are same value.
{condMap1, condMap1, `{"StringEquals":{"s3:prefix":["hello"]}}`},
// Value of second argument will be merged.
{condMap1, condMap2, `{"StringEquals":{"s3:prefix":["hello","world"]}}`},
// second argument will be added.
{condMap1, condMap3, `{"StringEquals":{"s3:prefix":["hello"]},"StringNotEquals":{"s3:prefix":["world"]}}`},
}
for _, testCase := range testCases {
condMap := mergeConditionMap(testCase.condMap1, testCase.condMap2)
if data, err := json.Marshal(condMap); err != nil {
t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
} else {
if string(data) != testCase.expectedResult {
t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
}
}
}
}

View File

@ -0,0 +1,608 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package policy
import (
"reflect"
"strings"
"github.com/minio/minio-go/pkg/set"
)
// BucketPolicy - Bucket level policy.
type BucketPolicy string
// Different types of Policies currently supported for buckets.
const (
BucketPolicyNone BucketPolicy = "none"
BucketPolicyReadOnly = "readonly"
BucketPolicyReadWrite = "readwrite"
BucketPolicyWriteOnly = "writeonly"
)
// isValidBucketPolicy - Is provided policy value supported.
func (p BucketPolicy) IsValidBucketPolicy() bool {
switch p {
case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
return true
}
return false
}
// Resource prefix for all aws resources.
const awsResourcePrefix = "arn:aws:s3:::"
// Common bucket actions for both read and write policies.
var commonBucketActions = set.CreateStringSet("s3:GetBucketLocation")
// Read only bucket actions.
var readOnlyBucketActions = set.CreateStringSet("s3:ListBucket")
// Write only bucket actions.
var writeOnlyBucketActions = set.CreateStringSet("s3:ListBucketMultipartUploads")
// Read only object actions.
var readOnlyObjectActions = set.CreateStringSet("s3:GetObject")
// Write only object actions.
var writeOnlyObjectActions = set.CreateStringSet("s3:AbortMultipartUpload", "s3:DeleteObject", "s3:ListMultipartUploadParts", "s3:PutObject")
// Read and write object actions.
var readWriteObjectActions = readOnlyObjectActions.Union(writeOnlyObjectActions)
// All valid bucket and object actions.
var validActions = commonBucketActions.
Union(readOnlyBucketActions).
Union(writeOnlyBucketActions).
Union(readOnlyObjectActions).
Union(writeOnlyObjectActions)
var startsWithFunc = func(resource string, resourcePrefix string) bool {
return strings.HasPrefix(resource, resourcePrefix)
}
// User - canonical users list.
type User struct {
AWS set.StringSet `json:"AWS,omitempty"`
CanonicalUser set.StringSet `json:"CanonicalUser,omitempty"`
}
// Statement - minio policy statement
type Statement struct {
Actions set.StringSet `json:"Action"`
Conditions ConditionMap `json:"Condition,omitempty"`
Effect string
Principal User `json:"Principal"`
Resources set.StringSet `json:"Resource"`
Sid string
}
// BucketAccessPolicy - minio policy collection
type BucketAccessPolicy struct {
Version string // date in YYYY-MM-DD format
Statements []Statement `json:"Statement"`
}
// isValidStatement - returns whether given statement is valid to process for given bucket name.
func isValidStatement(statement Statement, bucketName string) bool {
if statement.Actions.Intersection(validActions).IsEmpty() {
return false
}
if statement.Effect != "Allow" {
return false
}
if statement.Principal.AWS == nil || !statement.Principal.AWS.Contains("*") {
return false
}
bucketResource := awsResourcePrefix + bucketName
if statement.Resources.Contains(bucketResource) {
return true
}
if statement.Resources.FuncMatch(startsWithFunc, bucketResource+"/").IsEmpty() {
return false
}
return true
}
// Returns new statements with bucket actions for given policy.
func newBucketStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
statements = []Statement{}
if policy == BucketPolicyNone || bucketName == "" {
return statements
}
bucketResource := set.CreateStringSet(awsResourcePrefix + bucketName)
statement := Statement{
Actions: commonBucketActions,
Effect: "Allow",
Principal: User{AWS: set.CreateStringSet("*")},
Resources: bucketResource,
Sid: "",
}
statements = append(statements, statement)
if policy == BucketPolicyReadOnly || policy == BucketPolicyReadWrite {
statement = Statement{
Actions: readOnlyBucketActions,
Effect: "Allow",
Principal: User{AWS: set.CreateStringSet("*")},
Resources: bucketResource,
Sid: "",
}
if prefix != "" {
condKeyMap := make(ConditionKeyMap)
condKeyMap.Add("s3:prefix", set.CreateStringSet(prefix))
condMap := make(ConditionMap)
condMap.Add("StringEquals", condKeyMap)
statement.Conditions = condMap
}
statements = append(statements, statement)
}
if policy == BucketPolicyWriteOnly || policy == BucketPolicyReadWrite {
statement = Statement{
Actions: writeOnlyBucketActions,
Effect: "Allow",
Principal: User{AWS: set.CreateStringSet("*")},
Resources: bucketResource,
Sid: "",
}
statements = append(statements, statement)
}
return statements
}
// Returns new statements contains object actions for given policy.
func newObjectStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
statements = []Statement{}
if policy == BucketPolicyNone || bucketName == "" {
return statements
}
statement := Statement{
Effect: "Allow",
Principal: User{AWS: set.CreateStringSet("*")},
Resources: set.CreateStringSet(awsResourcePrefix + bucketName + "/" + prefix + "*"),
Sid: "",
}
if policy == BucketPolicyReadOnly {
statement.Actions = readOnlyObjectActions
} else if policy == BucketPolicyWriteOnly {
statement.Actions = writeOnlyObjectActions
} else if policy == BucketPolicyReadWrite {
statement.Actions = readWriteObjectActions
}
statements = append(statements, statement)
return statements
}
// Returns new statements for given policy, bucket and prefix.
func newStatements(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
statements = []Statement{}
ns := newBucketStatement(policy, bucketName, prefix)
statements = append(statements, ns...)
ns = newObjectStatement(policy, bucketName, prefix)
statements = append(statements, ns...)
return statements
}
// Returns whether given bucket statements are used by other than given prefix statements.
func getInUsePolicy(statements []Statement, bucketName string, prefix string) (readOnlyInUse, writeOnlyInUse bool) {
resourcePrefix := awsResourcePrefix + bucketName + "/"
objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
for _, s := range statements {
if !s.Resources.Contains(objectResource) && !s.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() {
if s.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) {
readOnlyInUse = true
}
if s.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) {
writeOnlyInUse = true
}
}
if readOnlyInUse && writeOnlyInUse {
break
}
}
return readOnlyInUse, writeOnlyInUse
}
// Removes object actions in given statement.
func removeObjectActions(statement Statement, objectResource string) Statement {
if statement.Conditions == nil {
if len(statement.Resources) > 1 {
statement.Resources.Remove(objectResource)
} else {
statement.Actions = statement.Actions.Difference(readOnlyObjectActions)
statement.Actions = statement.Actions.Difference(writeOnlyObjectActions)
}
}
return statement
}
// Removes bucket actions for given policy in given statement.
func removeBucketActions(statement Statement, prefix string, bucketResource string, readOnlyInUse, writeOnlyInUse bool) Statement {
removeReadOnly := func() {
if !statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) {
return
}
if statement.Conditions == nil {
statement.Actions = statement.Actions.Difference(readOnlyBucketActions)
return
}
if prefix != "" {
stringEqualsValue := statement.Conditions["StringEquals"]
values := set.NewStringSet()
if stringEqualsValue != nil {
values = stringEqualsValue["s3:prefix"]
if values == nil {
values = set.NewStringSet()
}
}
values.Remove(prefix)
if stringEqualsValue != nil {
if values.IsEmpty() {
delete(stringEqualsValue, "s3:prefix")
}
if len(stringEqualsValue) == 0 {
delete(statement.Conditions, "StringEquals")
}
}
if len(statement.Conditions) == 0 {
statement.Conditions = nil
statement.Actions = statement.Actions.Difference(readOnlyBucketActions)
}
}
}
removeWriteOnly := func() {
if statement.Conditions == nil {
statement.Actions = statement.Actions.Difference(writeOnlyBucketActions)
}
}
if len(statement.Resources) > 1 {
statement.Resources.Remove(bucketResource)
} else {
if !readOnlyInUse {
removeReadOnly()
}
if !writeOnlyInUse {
removeWriteOnly()
}
}
return statement
}
// Returns statements containing removed actions/statements for given
// policy, bucket name and prefix.
func removeStatements(statements []Statement, bucketName string, prefix string) []Statement {
bucketResource := awsResourcePrefix + bucketName
objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
readOnlyInUse, writeOnlyInUse := getInUsePolicy(statements, bucketName, prefix)
out := []Statement{}
readOnlyBucketStatements := []Statement{}
s3PrefixValues := set.NewStringSet()
for _, statement := range statements {
if !isValidStatement(statement, bucketName) {
out = append(out, statement)
continue
}
if statement.Resources.Contains(bucketResource) {
if statement.Conditions != nil {
statement = removeBucketActions(statement, prefix, bucketResource, false, false)
} else {
statement = removeBucketActions(statement, prefix, bucketResource, readOnlyInUse, writeOnlyInUse)
}
} else if statement.Resources.Contains(objectResource) {
statement = removeObjectActions(statement, objectResource)
}
if !statement.Actions.IsEmpty() {
if statement.Resources.Contains(bucketResource) &&
statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) &&
statement.Effect == "Allow" &&
statement.Principal.AWS.Contains("*") {
if statement.Conditions != nil {
stringEqualsValue := statement.Conditions["StringEquals"]
values := set.NewStringSet()
if stringEqualsValue != nil {
values = stringEqualsValue["s3:prefix"]
if values == nil {
values = set.NewStringSet()
}
}
s3PrefixValues = s3PrefixValues.Union(values.ApplyFunc(func(v string) string {
return bucketResource + "/" + v + "*"
}))
} else if !s3PrefixValues.IsEmpty() {
readOnlyBucketStatements = append(readOnlyBucketStatements, statement)
continue
}
}
out = append(out, statement)
}
}
skipBucketStatement := true
resourcePrefix := awsResourcePrefix + bucketName + "/"
for _, statement := range out {
if !statement.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() &&
s3PrefixValues.Intersection(statement.Resources).IsEmpty() {
skipBucketStatement = false
break
}
}
for _, statement := range readOnlyBucketStatements {
if skipBucketStatement &&
statement.Resources.Contains(bucketResource) &&
statement.Effect == "Allow" &&
statement.Principal.AWS.Contains("*") &&
statement.Conditions == nil {
continue
}
out = append(out, statement)
}
if len(out) == 1 {
statement := out[0]
if statement.Resources.Contains(bucketResource) &&
statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) &&
statement.Effect == "Allow" &&
statement.Principal.AWS.Contains("*") &&
statement.Conditions == nil {
out = []Statement{}
}
}
return out
}
// Appends given statement into statement list to have unique statements.
// - If statement already exists in statement list, it ignores.
// - If statement exists with different conditions, they are merged.
// - Else the statement is appended to statement list.
func appendStatement(statements []Statement, statement Statement) []Statement {
for i, s := range statements {
if s.Actions.Equals(statement.Actions) &&
s.Effect == statement.Effect &&
s.Principal.AWS.Equals(statement.Principal.AWS) &&
reflect.DeepEqual(s.Conditions, statement.Conditions) {
statements[i].Resources = s.Resources.Union(statement.Resources)
return statements
} else if s.Resources.Equals(statement.Resources) &&
s.Effect == statement.Effect &&
s.Principal.AWS.Equals(statement.Principal.AWS) &&
reflect.DeepEqual(s.Conditions, statement.Conditions) {
statements[i].Actions = s.Actions.Union(statement.Actions)
return statements
}
if s.Resources.Intersection(statement.Resources).Equals(statement.Resources) &&
s.Actions.Intersection(statement.Actions).Equals(statement.Actions) &&
s.Effect == statement.Effect &&
s.Principal.AWS.Intersection(statement.Principal.AWS).Equals(statement.Principal.AWS) {
if reflect.DeepEqual(s.Conditions, statement.Conditions) {
return statements
}
if s.Conditions != nil && statement.Conditions != nil {
if s.Resources.Equals(statement.Resources) {
statements[i].Conditions = mergeConditionMap(s.Conditions, statement.Conditions)
return statements
}
}
}
}
if !(statement.Actions.IsEmpty() && statement.Resources.IsEmpty()) {
return append(statements, statement)
}
return statements
}
// Appends two statement lists.
func appendStatements(statements []Statement, appendStatements []Statement) []Statement {
for _, s := range appendStatements {
statements = appendStatement(statements, s)
}
return statements
}
// Returns policy of given bucket statement.
func getBucketPolicy(statement Statement, prefix string) (commonFound, readOnly, writeOnly bool) {
if !(statement.Effect == "Allow" && statement.Principal.AWS.Contains("*")) {
return commonFound, readOnly, writeOnly
}
if statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) &&
statement.Conditions == nil {
commonFound = true
}
if statement.Actions.Intersection(writeOnlyBucketActions).Equals(writeOnlyBucketActions) &&
statement.Conditions == nil {
writeOnly = true
}
if statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) {
if prefix != "" && statement.Conditions != nil {
if stringEqualsValue, ok := statement.Conditions["StringEquals"]; ok {
if s3PrefixValues, ok := stringEqualsValue["s3:prefix"]; ok {
if s3PrefixValues.Contains(prefix) {
readOnly = true
}
}
} else if stringNotEqualsValue, ok := statement.Conditions["StringNotEquals"]; ok {
if s3PrefixValues, ok := stringNotEqualsValue["s3:prefix"]; ok {
if !s3PrefixValues.Contains(prefix) {
readOnly = true
}
}
}
} else if prefix == "" && statement.Conditions == nil {
readOnly = true
} else if prefix != "" && statement.Conditions == nil {
readOnly = true
}
}
return commonFound, readOnly, writeOnly
}
// Returns policy of given object statement.
func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) {
if statement.Effect == "Allow" &&
statement.Principal.AWS.Contains("*") &&
statement.Conditions == nil {
if statement.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) {
readOnly = true
}
if statement.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) {
writeOnly = true
}
}
return readOnly, writeOnly
}
// Returns policy of given bucket name, prefix in given statements.
func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy {
bucketResource := awsResourcePrefix + bucketName
objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
bucketCommonFound := false
bucketReadOnly := false
bucketWriteOnly := false
matchedResource := ""
objReadOnly := false
objWriteOnly := false
for _, s := range statements {
matchedObjResources := set.NewStringSet()
if s.Resources.Contains(objectResource) {
matchedObjResources.Add(objectResource)
} else {
matchedObjResources = s.Resources.FuncMatch(resourceMatch, objectResource)
}
if !matchedObjResources.IsEmpty() {
readOnly, writeOnly := getObjectPolicy(s)
for resource := range matchedObjResources {
if len(matchedResource) < len(resource) {
objReadOnly = readOnly
objWriteOnly = writeOnly
matchedResource = resource
} else if len(matchedResource) == len(resource) {
objReadOnly = objReadOnly || readOnly
objWriteOnly = objWriteOnly || writeOnly
matchedResource = resource
}
}
} else if s.Resources.Contains(bucketResource) {
commonFound, readOnly, writeOnly := getBucketPolicy(s, prefix)
bucketCommonFound = bucketCommonFound || commonFound
bucketReadOnly = bucketReadOnly || readOnly
bucketWriteOnly = bucketWriteOnly || writeOnly
}
}
policy := BucketPolicyNone
if bucketCommonFound {
if bucketReadOnly && bucketWriteOnly && objReadOnly && objWriteOnly {
policy = BucketPolicyReadWrite
} else if bucketReadOnly && objReadOnly {
policy = BucketPolicyReadOnly
} else if bucketWriteOnly && objWriteOnly {
policy = BucketPolicyWriteOnly
}
}
return policy
}
// Returns new statements containing policy of given bucket name and
// prefix are appended.
func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement {
out := removeStatements(statements, bucketName, prefix)
// fmt.Println("out = ")
// printstatement(out)
ns := newStatements(policy, bucketName, prefix)
// fmt.Println("ns = ")
// printstatement(ns)
rv := appendStatements(out, ns)
// fmt.Println("rv = ")
// printstatement(rv)
return rv
}
// Match function matches wild cards in 'pattern' for resource.
func resourceMatch(pattern, resource string) bool {
if pattern == "" {
return resource == pattern
}
if pattern == "*" {
return true
}
parts := strings.Split(pattern, "*")
if len(parts) == 1 {
return resource == pattern
}
tGlob := strings.HasSuffix(pattern, "*")
end := len(parts) - 1
if !strings.HasPrefix(resource, parts[0]) {
return false
}
for i := 1; i < end; i++ {
if !strings.Contains(resource, parts[i]) {
return false
}
idx := strings.Index(resource, parts[i]) + len(parts[i])
resource = resource[idx:]
}
return tGlob || strings.HasSuffix(resource, parts[end])
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,196 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package set
import (
"encoding/json"
"fmt"
"sort"
)
// StringSet - uses map as set of strings.
type StringSet map[string]struct{}
// keys - returns StringSet keys.
func (set StringSet) keys() []string {
keys := make([]string, 0, len(set))
for k := range set {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
// IsEmpty - returns whether the set is empty or not.
func (set StringSet) IsEmpty() bool {
return len(set) == 0
}
// Add - adds string to the set.
func (set StringSet) Add(s string) {
set[s] = struct{}{}
}
// Remove - removes string in the set. It does nothing if string does not exist in the set.
func (set StringSet) Remove(s string) {
delete(set, s)
}
// Contains - checks if string is in the set.
func (set StringSet) Contains(s string) bool {
_, ok := set[s]
return ok
}
// FuncMatch - returns new set containing each value who passes match function.
// A 'matchFn' should accept element in a set as first argument and
// 'matchString' as second argument. The function can do any logic to
// compare both the arguments and should return true to accept element in
// a set to include in output set else the element is ignored.
func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet {
nset := NewStringSet()
for k := range set {
if matchFn(k, matchString) {
nset.Add(k)
}
}
return nset
}
// ApplyFunc - returns new set containing each value processed by 'applyFn'.
// A 'applyFn' should accept element in a set as a argument and return
// a processed string. The function can do any logic to return a processed
// string.
func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet {
nset := NewStringSet()
for k := range set {
nset.Add(applyFn(k))
}
return nset
}
// Equals - checks whether given set is equal to current set or not.
func (set StringSet) Equals(sset StringSet) bool {
// If length of set is not equal to length of given set, the
// set is not equal to given set.
if len(set) != len(sset) {
return false
}
// As both sets are equal in length, check each elements are equal.
for k := range set {
if _, ok := sset[k]; !ok {
return false
}
}
return true
}
// Intersection - returns the intersection with given set as new set.
func (set StringSet) Intersection(sset StringSet) StringSet {
nset := NewStringSet()
for k := range set {
if _, ok := sset[k]; ok {
nset.Add(k)
}
}
return nset
}
// Difference - returns the difference with given set as new set.
func (set StringSet) Difference(sset StringSet) StringSet {
nset := NewStringSet()
for k := range set {
if _, ok := sset[k]; !ok {
nset.Add(k)
}
}
return nset
}
// Union - returns the union with given set as new set.
func (set StringSet) Union(sset StringSet) StringSet {
nset := NewStringSet()
for k := range set {
nset.Add(k)
}
for k := range sset {
nset.Add(k)
}
return nset
}
// MarshalJSON - converts to JSON data.
func (set StringSet) MarshalJSON() ([]byte, error) {
return json.Marshal(set.keys())
}
// UnmarshalJSON - parses JSON data and creates new set with it.
// If 'data' contains JSON string array, the set contains each string.
// If 'data' contains JSON string, the set contains the string as one element.
// If 'data' contains Other JSON types, JSON parse error is returned.
func (set *StringSet) UnmarshalJSON(data []byte) error {
sl := []string{}
var err error
if err = json.Unmarshal(data, &sl); err == nil {
*set = make(StringSet)
for _, s := range sl {
set.Add(s)
}
} else {
var s string
if err = json.Unmarshal(data, &s); err == nil {
*set = make(StringSet)
set.Add(s)
}
}
return err
}
// String - returns printable string of the set.
func (set StringSet) String() string {
return fmt.Sprintf("%s", set.keys())
}
// NewStringSet - creates new string set.
func NewStringSet() StringSet {
return make(StringSet)
}
// CreateStringSet - creates new string set with given string values.
func CreateStringSet(sl ...string) StringSet {
set := make(StringSet)
for _, k := range sl {
set.Add(k)
}
return set
}
// CopyStringSet - returns copy of given set.
func CopyStringSet(set StringSet) StringSet {
nset := NewStringSet()
for k, v := range set {
nset[k] = v
}
return nset
}

View File

@ -0,0 +1,322 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package set
import (
"strings"
"testing"
)
// NewStringSet() is called and the result is validated.
func TestNewStringSet(t *testing.T) {
if ss := NewStringSet(); !ss.IsEmpty() {
t.Fatalf("expected: true, got: false")
}
}
// CreateStringSet() is called and the result is validated.
func TestCreateStringSet(t *testing.T) {
ss := CreateStringSet("foo")
if str := ss.String(); str != `[foo]` {
t.Fatalf("expected: %s, got: %s", `["foo"]`, str)
}
}
// CopyStringSet() is called and the result is validated.
func TestCopyStringSet(t *testing.T) {
ss := CreateStringSet("foo")
sscopy := CopyStringSet(ss)
if !ss.Equals(sscopy) {
t.Fatalf("expected: %s, got: %s", ss, sscopy)
}
}
// StringSet.Add() is called with series of cases for valid and erroneous inputs and the result is validated.
func TestStringSetAdd(t *testing.T) {
testCases := []struct {
value string
expectedResult string
}{
// Test first addition.
{"foo", `[foo]`},
// Test duplicate addition.
{"foo", `[foo]`},
// Test new addition.
{"bar", `[bar foo]`},
}
ss := NewStringSet()
for _, testCase := range testCases {
ss.Add(testCase.value)
if str := ss.String(); str != testCase.expectedResult {
t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str)
}
}
}
// StringSet.Remove() is called with series of cases for valid and erroneous inputs and the result is validated.
func TestStringSetRemove(t *testing.T) {
ss := CreateStringSet("foo", "bar")
testCases := []struct {
value string
expectedResult string
}{
// Test removing non-existen item.
{"baz", `[bar foo]`},
// Test remove existing item.
{"foo", `[bar]`},
// Test remove existing item again.
{"foo", `[bar]`},
// Test remove to make set to empty.
{"bar", `[]`},
}
for _, testCase := range testCases {
ss.Remove(testCase.value)
if str := ss.String(); str != testCase.expectedResult {
t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str)
}
}
}
// StringSet.Contains() is called with series of cases for valid and erroneous inputs and the result is validated.
func TestStringSetContains(t *testing.T) {
ss := CreateStringSet("foo")
testCases := []struct {
value string
expectedResult bool
}{
// Test to check non-existent item.
{"bar", false},
// Test to check existent item.
{"foo", true},
// Test to verify case sensitivity.
{"Foo", false},
}
for _, testCase := range testCases {
if result := ss.Contains(testCase.value); result != testCase.expectedResult {
t.Fatalf("expected: %t, got: %t", testCase.expectedResult, result)
}
}
}
// StringSet.FuncMatch() is called with series of cases for valid and erroneous inputs and the result is validated.
func TestStringSetFuncMatch(t *testing.T) {
ss := CreateStringSet("foo", "bar")
testCases := []struct {
matchFn func(string, string) bool
value string
expectedResult string
}{
// Test to check match function doing case insensive compare.
{func(setValue string, compareValue string) bool {
return strings.ToUpper(setValue) == strings.ToUpper(compareValue)
}, "Bar", `[bar]`},
// Test to check match function doing prefix check.
{func(setValue string, compareValue string) bool {
return strings.HasPrefix(compareValue, setValue)
}, "foobar", `[foo]`},
}
for _, testCase := range testCases {
s := ss.FuncMatch(testCase.matchFn, testCase.value)
if result := s.String(); result != testCase.expectedResult {
t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
}
}
}
// StringSet.ApplyFunc() is called with series of cases for valid and erroneous inputs and the result is validated.
func TestStringSetApplyFunc(t *testing.T) {
ss := CreateStringSet("foo", "bar")
testCases := []struct {
applyFn func(string) string
expectedResult string
}{
// Test to apply function prepending a known string.
{func(setValue string) string { return "mybucket/" + setValue }, `[mybucket/bar mybucket/foo]`},
// Test to apply function modifying values.
{func(setValue string) string { return setValue[1:] }, `[ar oo]`},
}
for _, testCase := range testCases {
s := ss.ApplyFunc(testCase.applyFn)
if result := s.String(); result != testCase.expectedResult {
t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
}
}
}
// StringSet.Equals() is called with series of cases for valid and erroneous inputs and the result is validated.
func TestStringSetEquals(t *testing.T) {
testCases := []struct {
set1 StringSet
set2 StringSet
expectedResult bool
}{
// Test equal set
{CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), true},
// Test second set with more items
{CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar", "baz"), false},
// Test second set with less items
{CreateStringSet("foo", "bar"), CreateStringSet("bar"), false},
}
for _, testCase := range testCases {
if result := testCase.set1.Equals(testCase.set2); result != testCase.expectedResult {
t.Fatalf("expected: %t, got: %t", testCase.expectedResult, result)
}
}
}
// StringSet.Intersection() is called with series of cases for valid and erroneous inputs and the result is validated.
func TestStringSetIntersection(t *testing.T) {
testCases := []struct {
set1 StringSet
set2 StringSet
expectedResult StringSet
}{
// Test intersecting all values.
{CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar")},
// Test intersecting all values in second set.
{CreateStringSet("foo", "bar", "baz"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar")},
// Test intersecting different values in second set.
{CreateStringSet("foo", "baz"), CreateStringSet("baz", "bar"), CreateStringSet("baz")},
// Test intersecting none.
{CreateStringSet("foo", "baz"), CreateStringSet("poo", "bar"), NewStringSet()},
}
for _, testCase := range testCases {
if result := testCase.set1.Intersection(testCase.set2); !result.Equals(testCase.expectedResult) {
t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
}
}
}
// StringSet.Difference() is called with series of cases for valid and erroneous inputs and the result is validated.
func TestStringSetDifference(t *testing.T) {
testCases := []struct {
set1 StringSet
set2 StringSet
expectedResult StringSet
}{
// Test differing none.
{CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), NewStringSet()},
// Test differing in first set.
{CreateStringSet("foo", "bar", "baz"), CreateStringSet("foo", "bar"), CreateStringSet("baz")},
// Test differing values in both set.
{CreateStringSet("foo", "baz"), CreateStringSet("baz", "bar"), CreateStringSet("foo")},
// Test differing all values.
{CreateStringSet("foo", "baz"), CreateStringSet("poo", "bar"), CreateStringSet("foo", "baz")},
}
for _, testCase := range testCases {
if result := testCase.set1.Difference(testCase.set2); !result.Equals(testCase.expectedResult) {
t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
}
}
}
// StringSet.Union() is called with series of cases for valid and erroneous inputs and the result is validated.
func TestStringSetUnion(t *testing.T) {
testCases := []struct {
set1 StringSet
set2 StringSet
expectedResult StringSet
}{
// Test union same values.
{CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar")},
// Test union same values in second set.
{CreateStringSet("foo", "bar", "baz"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar", "baz")},
// Test union different values in both set.
{CreateStringSet("foo", "baz"), CreateStringSet("baz", "bar"), CreateStringSet("foo", "baz", "bar")},
// Test union all different values.
{CreateStringSet("foo", "baz"), CreateStringSet("poo", "bar"), CreateStringSet("foo", "baz", "poo", "bar")},
}
for _, testCase := range testCases {
if result := testCase.set1.Union(testCase.set2); !result.Equals(testCase.expectedResult) {
t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
}
}
}
// StringSet.MarshalJSON() is called with series of cases for valid and erroneous inputs and the result is validated.
func TestStringSetMarshalJSON(t *testing.T) {
testCases := []struct {
set StringSet
expectedResult string
}{
// Test set with values.
{CreateStringSet("foo", "bar"), `["bar","foo"]`},
// Test empty set.
{NewStringSet(), "[]"},
}
for _, testCase := range testCases {
if result, _ := testCase.set.MarshalJSON(); string(result) != testCase.expectedResult {
t.Fatalf("expected: %s, got: %s", testCase.expectedResult, string(result))
}
}
}
// StringSet.UnmarshalJSON() is called with series of cases for valid and erroneous inputs and the result is validated.
func TestStringSetUnmarshalJSON(t *testing.T) {
testCases := []struct {
data []byte
expectedResult string
}{
// Test to convert JSON array to set.
{[]byte(`["bar","foo"]`), `[bar foo]`},
// Test to convert JSON string to set.
{[]byte(`"bar"`), `[bar]`},
// Test to convert JSON empty array to set.
{[]byte(`[]`), `[]`},
// Test to convert JSON empty string to set.
{[]byte(`""`), `[]`},
}
for _, testCase := range testCases {
var set StringSet
set.UnmarshalJSON(testCase.data)
if result := set.String(); result != testCase.expectedResult {
t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
}
}
}
// StringSet.String() is called with series of cases for valid and erroneous inputs and the result is validated.
func TestStringSetString(t *testing.T) {
testCases := []struct {
set StringSet
expectedResult string
}{
// Test empty set.
{NewStringSet(), `[]`},
// Test set with empty value.
{CreateStringSet(""), `[]`},
// Test set with value.
{CreateStringSet("foo"), `[foo]`},
}
for _, testCase := range testCases {
if str := testCase.set.String(); str != testCase.expectedResult {
t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str)
}
}
}

View File

@ -113,7 +113,7 @@ func getHashedPayload(req http.Request) string {
hashedPayload := req.Header.Get("X-Amz-Content-Sha256") hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
if hashedPayload == "" { if hashedPayload == "" {
// Presign does not have a payload, use S3 recommended value. // Presign does not have a payload, use S3 recommended value.
hashedPayload = "UNSIGNED-PAYLOAD" hashedPayload = unsignedPayload
} }
return hashedPayload return hashedPayload
} }

View File

@ -24,7 +24,9 @@ var awsS3EndpointMap = map[string]string{
"us-west-1": "s3-us-west-1.amazonaws.com", "us-west-1": "s3-us-west-1.amazonaws.com",
"eu-west-1": "s3-eu-west-1.amazonaws.com", "eu-west-1": "s3-eu-west-1.amazonaws.com",
"eu-central-1": "s3-eu-central-1.amazonaws.com", "eu-central-1": "s3-eu-central-1.amazonaws.com",
"ap-south-1": "s3-ap-south-1.amazonaws.com",
"ap-southeast-1": "s3-ap-southeast-1.amazonaws.com", "ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
"ap-southeast-2": "s3-ap-southeast-2.amazonaws.com",
"ap-northeast-1": "s3-ap-northeast-1.amazonaws.com", "ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
"ap-northeast-2": "s3-ap-northeast-2.amazonaws.com", "ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
"sa-east-1": "s3-sa-east-1.amazonaws.com", "sa-east-1": "s3-sa-east-1.amazonaws.com",