2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-26 23:06:32 +00:00

Add Godeps for s3 backend.

This commit is contained in:
Chris Howey 2015-05-13 13:11:07 -05:00
parent bfe221e71c
commit dc6d92a076
24 changed files with 5434 additions and 0 deletions

12
Godeps/Godeps.json generated
View File

@ -18,10 +18,22 @@
"ImportPath": "github.com/kr/fs",
"Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
},
{
"ImportPath": "github.com/mitchellh/goamz/aws",
"Rev": "caaaea8b30ee15616494ee68abd5d8ebbbef05cf"
},
{
"ImportPath": "github.com/mitchellh/goamz/s3",
"Rev": "caaaea8b30ee15616494ee68abd5d8ebbbef05cf"
},
{
"ImportPath": "github.com/pkg/sftp",
"Rev": "506297c9013d2893d5c5daaa9155e7333a1c58de"
},
{
"ImportPath": "github.com/vaughan0/go-ini",
"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
},
{
"ImportPath": "golang.org/x/crypto/pbkdf2",
"Rev": "24ffb5feb3312a39054178a4b0a4554fc2201248"

View File

@ -0,0 +1,74 @@
package aws
import (
"time"
)
// AttemptStrategy represents a strategy for waiting for an action
// to complete successfully. This is an internal type used by the
// implementation of other goamz packages.
type AttemptStrategy struct {
Total time.Duration // total duration of attempt.
Delay time.Duration // interval between each try in the burst.
Min int // minimum number of retries; overrides Total
}
type Attempt struct {
strategy AttemptStrategy
last time.Time
end time.Time
force bool
count int
}
// Start begins a new sequence of attempts for the given strategy.
func (s AttemptStrategy) Start() *Attempt {
now := time.Now()
return &Attempt{
strategy: s,
last: now,
end: now.Add(s.Total),
force: true,
}
}
// Next waits until it is time to perform the next attempt or returns
// false if it is time to stop trying.
func (a *Attempt) Next() bool {
now := time.Now()
sleep := a.nextSleep(now)
if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
return false
}
a.force = false
if sleep > 0 && a.count > 0 {
time.Sleep(sleep)
now = time.Now()
}
a.count++
a.last = now
return true
}
func (a *Attempt) nextSleep(now time.Time) time.Duration {
sleep := a.strategy.Delay - now.Sub(a.last)
if sleep < 0 {
return 0
}
return sleep
}
// HasNext returns whether another attempt will be made if the current
// one fails. If it returns true, the following call to Next is
// guaranteed to return true.
func (a *Attempt) HasNext() bool {
if a.force || a.strategy.Min > a.count {
return true
}
now := time.Now()
if now.Add(a.nextSleep(now)).Before(a.end) {
a.force = true
return true
}
return false
}

View File

@ -0,0 +1,57 @@
package aws_test
import (
"github.com/mitchellh/goamz/aws"
. "github.com/motain/gocheck"
"time"
)
func (S) TestAttemptTiming(c *C) {
testAttempt := aws.AttemptStrategy{
Total: 0.25e9,
Delay: 0.1e9,
}
want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9}
got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing
t0 := time.Now()
for a := testAttempt.Start(); a.Next(); {
got = append(got, time.Now().Sub(t0))
}
got = append(got, time.Now().Sub(t0))
c.Assert(got, HasLen, len(want))
const margin = 0.01e9
for i, got := range want {
lo := want[i] - margin
hi := want[i] + margin
if got < lo || got > hi {
c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds())
}
}
}
func (S) TestAttemptNextHasNext(c *C) {
a := aws.AttemptStrategy{}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, false)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{Total: 2e8}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, true)
time.Sleep(2e8)
c.Assert(a.HasNext(), Equals, true)
c.Assert(a.Next(), Equals, true)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{Total: 1e8, Min: 2}.Start()
time.Sleep(1e8)
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, true)
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, false)
c.Assert(a.Next(), Equals, false)
}

View File

@ -0,0 +1,445 @@
//
// goamz - Go packages to interact with the Amazon Web Services.
//
// https://wiki.ubuntu.com/goamz
//
// Copyright (c) 2011 Canonical Ltd.
//
// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>
//
package aws
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"github.com/vaughan0/go-ini"
)
// Region defines the URLs where AWS services may be accessed.
//
// See http://goo.gl/d8BP1 for more details.
type Region struct {
Name string // the canonical name of this region.
EC2Endpoint string
S3Endpoint string
S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name.
S3LocationConstraint bool // true if this region requires a LocationConstraint declaration.
S3LowercaseBucket bool // true if the region requires bucket names to be lower case.
SDBEndpoint string
SNSEndpoint string
SQSEndpoint string
IAMEndpoint string
ELBEndpoint string
AutoScalingEndpoint string
RdsEndpoint string
Route53Endpoint string
}
var USGovWest = Region{
"us-gov-west-1",
"https://ec2.us-gov-west-1.amazonaws.com",
"https://s3-fips-us-gov-west-1.amazonaws.com",
"",
true,
true,
"",
"https://sns.us-gov-west-1.amazonaws.com",
"https://sqs.us-gov-west-1.amazonaws.com",
"https://iam.us-gov.amazonaws.com",
"https://elasticloadbalancing.us-gov-west-1.amazonaws.com",
"https://autoscaling.us-gov-west-1.amazonaws.com",
"https://rds.us-gov-west-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var USEast = Region{
"us-east-1",
"https://ec2.us-east-1.amazonaws.com",
"https://s3.amazonaws.com",
"",
false,
false,
"https://sdb.amazonaws.com",
"https://sns.us-east-1.amazonaws.com",
"https://sqs.us-east-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.us-east-1.amazonaws.com",
"https://autoscaling.us-east-1.amazonaws.com",
"https://rds.us-east-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var USWest = Region{
"us-west-1",
"https://ec2.us-west-1.amazonaws.com",
"https://s3-us-west-1.amazonaws.com",
"",
true,
true,
"https://sdb.us-west-1.amazonaws.com",
"https://sns.us-west-1.amazonaws.com",
"https://sqs.us-west-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.us-west-1.amazonaws.com",
"https://autoscaling.us-west-1.amazonaws.com",
"https://rds.us-west-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var USWest2 = Region{
"us-west-2",
"https://ec2.us-west-2.amazonaws.com",
"https://s3-us-west-2.amazonaws.com",
"",
true,
true,
"https://sdb.us-west-2.amazonaws.com",
"https://sns.us-west-2.amazonaws.com",
"https://sqs.us-west-2.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.us-west-2.amazonaws.com",
"https://autoscaling.us-west-2.amazonaws.com",
"https://rds.us-west-2.amazonaws.com",
"https://route53.amazonaws.com",
}
var EUWest = Region{
"eu-west-1",
"https://ec2.eu-west-1.amazonaws.com",
"https://s3-eu-west-1.amazonaws.com",
"",
true,
true,
"https://sdb.eu-west-1.amazonaws.com",
"https://sns.eu-west-1.amazonaws.com",
"https://sqs.eu-west-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.eu-west-1.amazonaws.com",
"https://autoscaling.eu-west-1.amazonaws.com",
"https://rds.eu-west-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var EUCentral = Region{
"eu-central-1",
"https://ec2.eu-central-1.amazonaws.com",
"https://s3-eu-central-1.amazonaws.com",
"",
true,
true,
"",
"https://sns.eu-central-1.amazonaws.com",
"https://sqs.eu-central-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.eu-central-1.amazonaws.com",
"https://autoscaling.eu-central-1.amazonaws.com",
"https://rds.eu-central-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var APSoutheast = Region{
"ap-southeast-1",
"https://ec2.ap-southeast-1.amazonaws.com",
"https://s3-ap-southeast-1.amazonaws.com",
"",
true,
true,
"https://sdb.ap-southeast-1.amazonaws.com",
"https://sns.ap-southeast-1.amazonaws.com",
"https://sqs.ap-southeast-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.ap-southeast-1.amazonaws.com",
"https://autoscaling.ap-southeast-1.amazonaws.com",
"https://rds.ap-southeast-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var APSoutheast2 = Region{
"ap-southeast-2",
"https://ec2.ap-southeast-2.amazonaws.com",
"https://s3-ap-southeast-2.amazonaws.com",
"",
true,
true,
"https://sdb.ap-southeast-2.amazonaws.com",
"https://sns.ap-southeast-2.amazonaws.com",
"https://sqs.ap-southeast-2.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.ap-southeast-2.amazonaws.com",
"https://autoscaling.ap-southeast-2.amazonaws.com",
"https://rds.ap-southeast-2.amazonaws.com",
"https://route53.amazonaws.com",
}
var APNortheast = Region{
"ap-northeast-1",
"https://ec2.ap-northeast-1.amazonaws.com",
"https://s3-ap-northeast-1.amazonaws.com",
"",
true,
true,
"https://sdb.ap-northeast-1.amazonaws.com",
"https://sns.ap-northeast-1.amazonaws.com",
"https://sqs.ap-northeast-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.ap-northeast-1.amazonaws.com",
"https://autoscaling.ap-northeast-1.amazonaws.com",
"https://rds.ap-northeast-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var SAEast = Region{
"sa-east-1",
"https://ec2.sa-east-1.amazonaws.com",
"https://s3-sa-east-1.amazonaws.com",
"",
true,
true,
"https://sdb.sa-east-1.amazonaws.com",
"https://sns.sa-east-1.amazonaws.com",
"https://sqs.sa-east-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.sa-east-1.amazonaws.com",
"https://autoscaling.sa-east-1.amazonaws.com",
"https://rds.sa-east-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var CNNorth = Region{
"cn-north-1",
"https://ec2.cn-north-1.amazonaws.com.cn",
"https://s3.cn-north-1.amazonaws.com.cn",
"",
true,
true,
"",
"https://sns.cn-north-1.amazonaws.com.cn",
"https://sqs.cn-north-1.amazonaws.com.cn",
"https://iam.cn-north-1.amazonaws.com.cn",
"https://elasticloadbalancing.cn-north-1.amazonaws.com.cn",
"https://autoscaling.cn-north-1.amazonaws.com.cn",
"https://rds.cn-north-1.amazonaws.com.cn",
"https://route53.amazonaws.com",
}
var Regions = map[string]Region{
APNortheast.Name: APNortheast,
APSoutheast.Name: APSoutheast,
APSoutheast2.Name: APSoutheast2,
EUWest.Name: EUWest,
EUCentral.Name: EUCentral,
USEast.Name: USEast,
USWest.Name: USWest,
USWest2.Name: USWest2,
SAEast.Name: SAEast,
USGovWest.Name: USGovWest,
CNNorth.Name: CNNorth,
}
type Auth struct {
AccessKey, SecretKey, Token string
}
var unreserved = make([]bool, 128)
var hex = "0123456789ABCDEF"
func init() {
// RFC3986
u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~"
for _, c := range u {
unreserved[c] = true
}
}
type credentials struct {
Code string
LastUpdated string
Type string
AccessKeyId string
SecretAccessKey string
Token string
Expiration string
}
// GetMetaData retrieves instance metadata about the current machine.
//
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details.
func GetMetaData(path string) (contents []byte, err error) {
url := "http://169.254.169.254/latest/meta-data/" + path
resp, err := RetryingClient.Get(url)
if err != nil {
return
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url)
return
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
return []byte(body), err
}
func getInstanceCredentials() (cred credentials, err error) {
credentialPath := "iam/security-credentials/"
// Get the instance role
role, err := GetMetaData(credentialPath)
if err != nil {
return
}
// Get the instance role credentials
credentialJSON, err := GetMetaData(credentialPath + string(role))
if err != nil {
return
}
err = json.Unmarshal([]byte(credentialJSON), &cred)
return
}
// GetAuth creates an Auth based on either passed in credentials,
// environment information or instance based role credentials.
func GetAuth(accessKey string, secretKey string) (auth Auth, err error) {
// First try passed in credentials
if accessKey != "" && secretKey != "" {
return Auth{accessKey, secretKey, ""}, nil
}
// Next try to get auth from the environment
auth, err = SharedAuth()
if err == nil {
// Found auth, return
return
}
// Next try to get auth from the environment
auth, err = EnvAuth()
if err == nil {
// Found auth, return
return
}
// Next try getting auth from the instance role
cred, err := getInstanceCredentials()
if err == nil {
// Found auth, return
auth.AccessKey = cred.AccessKeyId
auth.SecretKey = cred.SecretAccessKey
auth.Token = cred.Token
return
}
err = errors.New("No valid AWS authentication found")
return
}
// SharedAuth creates an Auth based on shared credentials stored in
// $HOME/.aws/credentials. The AWS_PROFILE environment variables is used to
// select the profile.
func SharedAuth() (auth Auth, err error) {
var profileName = os.Getenv("AWS_PROFILE")
if profileName == "" {
profileName = "default"
}
var credentialsFile = os.Getenv("AWS_CREDENTIAL_FILE")
if credentialsFile == "" {
var homeDir = os.Getenv("HOME")
if homeDir == "" {
err = errors.New("Could not get HOME")
return
}
credentialsFile = homeDir + "/.aws/credentials"
}
file, err := ini.LoadFile(credentialsFile)
if err != nil {
err = errors.New("Couldn't parse AWS credentials file")
return
}
var profile = file[profileName]
if profile == nil {
err = errors.New("Couldn't find profile in AWS credentials file")
return
}
auth.AccessKey = profile["aws_access_key_id"]
auth.SecretKey = profile["aws_secret_access_key"]
if auth.AccessKey == "" {
err = errors.New("AWS_ACCESS_KEY_ID not found in environment in credentials file")
}
if auth.SecretKey == "" {
err = errors.New("AWS_SECRET_ACCESS_KEY not found in credentials file")
}
return
}
// EnvAuth creates an Auth based on environment information.
// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment
// For accounts that require a security token, it is read from AWS_SECURITY_TOKEN
// variables are used.
func EnvAuth() (auth Auth, err error) {
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
if auth.AccessKey == "" {
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY")
}
auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
if auth.SecretKey == "" {
auth.SecretKey = os.Getenv("AWS_SECRET_KEY")
}
auth.Token = os.Getenv("AWS_SECURITY_TOKEN")
if auth.AccessKey == "" {
err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
}
if auth.SecretKey == "" {
err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
}
return
}
// Encode takes a string and URI-encodes it in a way suitable
// to be used in AWS signatures.
func Encode(s string) string {
encode := false
for i := 0; i != len(s); i++ {
c := s[i]
if c > 127 || !unreserved[c] {
encode = true
break
}
}
if !encode {
return s
}
e := make([]byte, len(s)*3)
ei := 0
for i := 0; i != len(s); i++ {
c := s[i]
if c > 127 || !unreserved[c] {
e[ei] = '%'
e[ei+1] = hex[c>>4]
e[ei+2] = hex[c&0xF]
ei += 3
} else {
e[ei] = c
ei += 1
}
}
return string(e[:ei])
}

View File

@ -0,0 +1,203 @@
package aws_test
import (
"github.com/mitchellh/goamz/aws"
. "github.com/motain/gocheck"
"io/ioutil"
"os"
"strings"
"testing"
)
func Test(t *testing.T) {
TestingT(t)
}
var _ = Suite(&S{})
type S struct {
environ []string
}
func (s *S) SetUpSuite(c *C) {
s.environ = os.Environ()
}
func (s *S) TearDownTest(c *C) {
os.Clearenv()
for _, kv := range s.environ {
l := strings.SplitN(kv, "=", 2)
os.Setenv(l[0], l[1])
}
}
func (s *S) TestSharedAuthNoHome(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "foo")
_, err := aws.SharedAuth()
c.Assert(err, ErrorMatches, "Could not get HOME")
}
func (s *S) TestSharedAuthNoCredentialsFile(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "foo")
os.Setenv("HOME", "/tmp")
_, err := aws.SharedAuth()
c.Assert(err, ErrorMatches, "Couldn't parse AWS credentials file")
}
func (s *S) TestSharedAuthNoProfileInFile(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "foo")
d, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(d)
err = os.Mkdir(d+"/.aws", 0755)
if err != nil {
panic(err)
}
ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\n"), 0644)
os.Setenv("HOME", d)
_, err = aws.SharedAuth()
c.Assert(err, ErrorMatches, "Couldn't find profile in AWS credentials file")
}
func (s *S) TestSharedAuthNoKeysInProfile(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "bar")
d, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(d)
err = os.Mkdir(d+"/.aws", 0755)
if err != nil {
panic(err)
}
ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\nawsaccesskeyid = AK.."), 0644)
os.Setenv("HOME", d)
_, err = aws.SharedAuth()
c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY not found in credentials file")
}
func (s *S) TestSharedAuthDefaultCredentials(c *C) {
os.Clearenv()
d, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(d)
err = os.Mkdir(d+"/.aws", 0755)
if err != nil {
panic(err)
}
ioutil.WriteFile(d+"/.aws/credentials", []byte("[default]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644)
os.Setenv("HOME", d)
auth, err := aws.SharedAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestSharedAuth(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "bar")
d, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(d)
err = os.Mkdir(d+"/.aws", 0755)
if err != nil {
panic(err)
}
ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644)
os.Setenv("HOME", d)
auth, err := aws.SharedAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEnvAuthNoSecret(c *C) {
os.Clearenv()
_, err := aws.EnvAuth()
c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
}
func (s *S) TestEnvAuthNoAccess(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "foo")
_, err := aws.EnvAuth()
c.Assert(err, ErrorMatches, "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
}
func (s *S) TestEnvAuth(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY_ID", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEnvAuthWithToken(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY_ID", "access")
os.Setenv("AWS_SECURITY_TOKEN", "token")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access", Token: "token"})
}
func (s *S) TestEnvAuthAlt(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestGetAuthStatic(c *C) {
auth, err := aws.GetAuth("access", "secret")
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestGetAuthEnv(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY_ID", "access")
auth, err := aws.GetAuth("", "")
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEncode(c *C) {
c.Assert(aws.Encode("foo"), Equals, "foo")
c.Assert(aws.Encode("/"), Equals, "%2F")
}
func (s *S) TestRegionsAreNamed(c *C) {
for n, r := range aws.Regions {
c.Assert(n, Equals, r.Name)
}
}

View File

@ -0,0 +1,125 @@
package aws
import (
"math"
"net"
"net/http"
"time"
)
type RetryableFunc func(*http.Request, *http.Response, error) bool
type WaitFunc func(try int)
type DeadlineFunc func() time.Time
type ResilientTransport struct {
// Timeout is the maximum amount of time a dial will wait for
// a connect to complete.
//
// The default is no timeout.
//
// With or without a timeout, the operating system may impose
// its own earlier timeout. For instance, TCP timeouts are
// often around 3 minutes.
DialTimeout time.Duration
// MaxTries, if non-zero, specifies the number of times we will retry on
// failure. Retries are only attempted for temporary network errors or known
// safe failures.
MaxTries int
Deadline DeadlineFunc
ShouldRetry RetryableFunc
Wait WaitFunc
transport *http.Transport
}
// Convenience method for creating an http client
func NewClient(rt *ResilientTransport) *http.Client {
rt.transport = &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
c, err := net.DialTimeout(netw, addr, rt.DialTimeout)
if err != nil {
return nil, err
}
c.SetDeadline(rt.Deadline())
return c, nil
},
DisableKeepAlives: true,
Proxy: http.ProxyFromEnvironment,
}
// TODO: Would be nice is ResilientTransport allowed clients to initialize
// with http.Transport attributes.
return &http.Client{
Transport: rt,
}
}
var retryingTransport = &ResilientTransport{
Deadline: func() time.Time {
return time.Now().Add(5 * time.Second)
},
DialTimeout: 10 * time.Second,
MaxTries: 3,
ShouldRetry: awsRetry,
Wait: ExpBackoff,
}
// Exported default client
var RetryingClient = NewClient(retryingTransport)
func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) {
return t.tries(req)
}
// Retry a request a maximum of t.MaxTries times.
// We'll only retry if the proper criteria are met.
// If a wait function is specified, wait that amount of time
// In between requests.
func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) {
for try := 0; try < t.MaxTries; try += 1 {
res, err = t.transport.RoundTrip(req)
if !t.ShouldRetry(req, res, err) {
break
}
if res != nil {
res.Body.Close()
}
if t.Wait != nil {
t.Wait(try)
}
}
return
}
func ExpBackoff(try int) {
time.Sleep(100 * time.Millisecond *
time.Duration(math.Exp2(float64(try))))
}
func LinearBackoff(try int) {
time.Sleep(time.Duration(try*100) * time.Millisecond)
}
// Decide if we should retry a request.
// In general, the criteria for retrying a request is described here
// http://docs.aws.amazon.com/general/latest/gr/api-retries.html
func awsRetry(req *http.Request, res *http.Response, err error) bool {
retry := false
// Retry if there's a temporary network error.
if neterr, ok := err.(net.Error); ok {
if neterr.Temporary() {
retry = true
}
}
// Retry if we get a 5xx series error.
if res != nil {
if res.StatusCode >= 500 && res.StatusCode < 600 {
retry = true
}
}
return retry
}

View File

@ -0,0 +1,121 @@
package aws_test
import (
"fmt"
"github.com/mitchellh/goamz/aws"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
)
// Retrieve the response from handler using aws.RetryingClient
func serveAndGet(handler http.HandlerFunc) (body string, err error) {
ts := httptest.NewServer(handler)
defer ts.Close()
resp, err := aws.RetryingClient.Get(ts.URL)
if err != nil {
return
}
if resp.StatusCode != 200 {
return "", fmt.Errorf("Bad status code: %d", resp.StatusCode)
}
greeting, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return
}
return strings.TrimSpace(string(greeting)), nil
}
func TestClient_expected(t *testing.T) {
body := "foo bar"
resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, body)
})
if err != nil {
t.Fatal(err)
}
if resp != body {
t.Fatal("Body not as expected.")
}
}
func TestClient_delay(t *testing.T) {
body := "baz"
wait := 4
resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
if wait < 0 {
// If we dipped to zero delay and still failed.
t.Fatal("Never succeeded.")
}
wait -= 1
time.Sleep(time.Second * time.Duration(wait))
fmt.Fprintln(w, body)
})
if err != nil {
t.Fatal(err)
}
if resp != body {
t.Fatal("Body not as expected.", resp)
}
}
func TestClient_no4xxRetry(t *testing.T) {
tries := 0
// Fail once before succeeding.
_, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
tries += 1
http.Error(w, "error", 404)
})
if err == nil {
t.Fatal("should have error")
}
if tries != 1 {
t.Fatalf("should only try once: %d", tries)
}
}
func TestClient_retries(t *testing.T) {
body := "biz"
failed := false
// Fail once before succeeding.
resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
if !failed {
http.Error(w, "error", 500)
failed = true
} else {
fmt.Fprintln(w, body)
}
})
if failed != true {
t.Error("We didn't retry!")
}
if err != nil {
t.Fatal(err)
}
if resp != body {
t.Fatal("Body not as expected.")
}
}
func TestClient_fails(t *testing.T) {
tries := 0
// Fail 3 times and return the last error.
_, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
tries += 1
http.Error(w, "error", 500)
})
if err == nil {
t.Fatal(err)
}
if tries != 3 {
t.Fatal("Didn't retry enough")
}
}

View File

@ -0,0 +1,27 @@
package s3
import (
"github.com/mitchellh/goamz/aws"
)
var originalStrategy = attempts
func SetAttemptStrategy(s *aws.AttemptStrategy) {
if s == nil {
attempts = originalStrategy
} else {
attempts = *s
}
}
func Sign(auth aws.Auth, method, path string, params, headers map[string][]string) {
sign(auth, method, path, params, headers)
}
func SetListPartsMax(n int) {
listPartsMax = n
}
func SetListMultiMax(n int) {
listMultiMax = n
}

View File

@ -0,0 +1,409 @@
package s3
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"errors"
"io"
"sort"
"strconv"
)
// Multi represents an unfinished multipart upload.
//
// Multipart uploads allow sending big objects in smaller chunks.
// After all parts have been sent, the upload must be explicitly
// completed by calling Complete with the list of parts.
//
// See http://goo.gl/vJfTG for an overview of multipart uploads.
type Multi struct {
Bucket *Bucket
Key string
UploadId string
}
// That's the default. Here just for testing.
var listMultiMax = 1000
type listMultiResp struct {
NextKeyMarker string
NextUploadIdMarker string
IsTruncated bool
Upload []Multi
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
}
// ListMulti returns the list of unfinished multipart uploads in b.
//
// The prefix parameter limits the response to keys that begin with the
// specified prefix. You can use prefixes to separate a bucket into different
// groupings of keys (to get the feeling of folders, for example).
//
// The delim parameter causes the response to group all of the keys that
// share a common prefix up to the next delimiter in a single entry within
// the CommonPrefixes field. You can use delimiters to separate a bucket
// into different groupings of keys, similar to how folders would work.
//
// See http://goo.gl/ePioY for details.
func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {
params := map[string][]string{
"uploads": {""},
"max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)},
"prefix": {prefix},
"delimiter": {delim},
}
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "GET",
bucket: b.Name,
params: params,
}
var resp listMultiResp
err := b.S3.query(req, &resp)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, nil, err
}
for i := range resp.Upload {
multi := &resp.Upload[i]
multi.Bucket = b
multis = append(multis, multi)
}
prefixes = append(prefixes, resp.CommonPrefixes...)
if !resp.IsTruncated {
return multis, prefixes, nil
}
params["key-marker"] = []string{resp.NextKeyMarker}
params["upload-id-marker"] = []string{resp.NextUploadIdMarker}
attempt = attempts.Start() // Last request worked.
}
panic("unreachable")
}
// Multi returns a multipart upload handler for the provided key
// inside b. If a multipart upload exists for key, it is returned,
// otherwise a new multipart upload is initiated with contType and perm.
func (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) {
multis, _, err := b.ListMulti(key, "")
if err != nil && !hasCode(err, "NoSuchUpload") {
return nil, err
}
for _, m := range multis {
if m.Key == key {
return m, nil
}
}
return b.InitMulti(key, contType, perm)
}
// InitMulti initializes a new multipart upload at the provided
// key inside b and returns a value for manipulating it.
//
// See http://goo.gl/XP8kL for details.
func (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) {
headers := map[string][]string{
"Content-Type": {contType},
"Content-Length": {"0"},
"x-amz-acl": {string(perm)},
}
params := map[string][]string{
"uploads": {""},
}
req := &request{
method: "POST",
bucket: b.Name,
path: key,
headers: headers,
params: params,
}
var err error
var resp struct {
UploadId string `xml:"UploadId"`
}
for attempt := attempts.Start(); attempt.Next(); {
err = b.S3.query(req, &resp)
if !shouldRetry(err) {
break
}
}
if err != nil {
return nil, err
}
return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil
}
// PutPart sends part n of the multipart upload, reading all the content from r.
// Each part, except for the last one, must be at least 5MB in size.
//
// See http://goo.gl/pqZer for details.
func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
partSize, _, md5b64, err := seekerInfo(r)
if err != nil {
return Part{}, err
}
return m.putPart(n, r, partSize, md5b64)
}
func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) {
headers := map[string][]string{
"Content-Length": {strconv.FormatInt(partSize, 10)},
"Content-MD5": {md5b64},
}
params := map[string][]string{
"uploadId": {m.UploadId},
"partNumber": {strconv.FormatInt(int64(n), 10)},
}
for attempt := attempts.Start(); attempt.Next(); {
_, err := r.Seek(0, 0)
if err != nil {
return Part{}, err
}
req := &request{
method: "PUT",
bucket: m.Bucket.Name,
path: m.Key,
headers: headers,
params: params,
payload: r,
}
err = m.Bucket.S3.prepare(req)
if err != nil {
return Part{}, err
}
resp, err := m.Bucket.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return Part{}, err
}
etag := resp.Header.Get("ETag")
if etag == "" {
return Part{}, errors.New("part upload succeeded with no ETag")
}
return Part{n, etag, partSize}, nil
}
panic("unreachable")
}
func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {
_, err = r.Seek(0, 0)
if err != nil {
return 0, "", "", err
}
digest := md5.New()
size, err = io.Copy(digest, r)
if err != nil {
return 0, "", "", err
}
sum := digest.Sum(nil)
md5hex = hex.EncodeToString(sum)
md5b64 = base64.StdEncoding.EncodeToString(sum)
return size, md5hex, md5b64, nil
}
type Part struct {
N int `xml:"PartNumber"`
ETag string
Size int64
}
type partSlice []Part
func (s partSlice) Len() int { return len(s) }
func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N }
func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
type listPartsResp struct {
NextPartNumberMarker string
IsTruncated bool
Part []Part
}
// That's the default. Here just for testing.
var listPartsMax = 1000
// ListParts returns the list of previously uploaded parts in m,
// ordered by part number.
//
// See http://goo.gl/ePioY for details.
func (m *Multi) ListParts() ([]Part, error) {
params := map[string][]string{
"uploadId": {m.UploadId},
"max-parts": {strconv.FormatInt(int64(listPartsMax), 10)},
}
var parts partSlice
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "GET",
bucket: m.Bucket.Name,
path: m.Key,
params: params,
}
var resp listPartsResp
err := m.Bucket.S3.query(req, &resp)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, err
}
parts = append(parts, resp.Part...)
if !resp.IsTruncated {
sort.Sort(parts)
return parts, nil
}
params["part-number-marker"] = []string{resp.NextPartNumberMarker}
attempt = attempts.Start() // Last request worked.
}
panic("unreachable")
}
type ReaderAtSeeker interface {
io.ReaderAt
io.ReadSeeker
}
// PutAll sends all of r via a multipart upload with parts no larger
// than partSize bytes, which must be set to at least 5MB.
// Parts previously uploaded are either reused if their checksum
// and size match the new part, or otherwise overwritten with the
// new content.
// PutAll returns all the parts of m (reused or not).
func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {
old, err := m.ListParts()
if err != nil && !hasCode(err, "NoSuchUpload") {
return nil, err
}
reuse := 0 // Index of next old part to consider reusing.
current := 1 // Part number of latest good part handled.
totalSize, err := r.Seek(0, 2)
if err != nil {
return nil, err
}
first := true // Must send at least one empty part if the file is empty.
var result []Part
NextSection:
for offset := int64(0); offset < totalSize || first; offset += partSize {
first = false
if offset+partSize > totalSize {
partSize = totalSize - offset
}
section := io.NewSectionReader(r, offset, partSize)
_, md5hex, md5b64, err := seekerInfo(section)
if err != nil {
return nil, err
}
for reuse < len(old) && old[reuse].N <= current {
// Looks like this part was already sent.
part := &old[reuse]
etag := `"` + md5hex + `"`
if part.N == current && part.Size == partSize && part.ETag == etag {
// Checksum matches. Reuse the old part.
result = append(result, *part)
current++
continue NextSection
}
reuse++
}
// Part wasn't found or doesn't match. Send it.
part, err := m.putPart(current, section, partSize, md5b64)
if err != nil {
return nil, err
}
result = append(result, part)
current++
}
return result, nil
}
type completeUpload struct {
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Parts completeParts `xml:"Part"`
}
type completePart struct {
PartNumber int
ETag string
}
type completeParts []completePart
func (p completeParts) Len() int { return len(p) }
func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber }
func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Complete assembles the given previously uploaded parts into the
// final object. This operation may take several minutes.
//
// See http://goo.gl/2Z7Tw for details.
func (m *Multi) Complete(parts []Part) error {
params := map[string][]string{
"uploadId": {m.UploadId},
}
c := completeUpload{}
for _, p := range parts {
c.Parts = append(c.Parts, completePart{p.N, p.ETag})
}
sort.Sort(c.Parts)
data, err := xml.Marshal(&c)
if err != nil {
return err
}
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "POST",
bucket: m.Bucket.Name,
path: m.Key,
params: params,
payload: bytes.NewReader(data),
}
err := m.Bucket.S3.query(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
return err
}
panic("unreachable")
}
// Abort deletes an unifinished multipart upload and any previously
// uploaded parts for it.
//
// After a multipart upload is aborted, no additional parts can be
// uploaded using it. However, if any part uploads are currently in
// progress, those part uploads might or might not succeed. As a result,
// it might be necessary to abort a given multipart upload multiple
// times in order to completely free all storage consumed by all parts.
//
// NOTE: If the described scenario happens to you, please report back to
// the goamz authors with details. In the future such retrying should be
// handled internally, but it's not clear what happens precisely (Is an
// error returned? Is the issue completely undetectable?).
//
// See http://goo.gl/dnyJw for details.
func (m *Multi) Abort() error {
params := map[string][]string{
"uploadId": {m.UploadId},
}
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "DELETE",
bucket: m.Bucket.Name,
path: m.Key,
params: params,
}
err := m.Bucket.S3.query(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
return err
}
panic("unreachable")
}

View File

@ -0,0 +1,370 @@
package s3_test
import (
"encoding/xml"
"github.com/mitchellh/goamz/s3"
. "github.com/motain/gocheck"
"io"
"io/ioutil"
"strings"
)
func (s *S) TestInitMulti(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"text/plain"})
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--")
}
func (s *S) TestMultiNoPreviousUpload(c *C) {
// Don't retry the NoSuchUpload error.
s.DisableRetries()
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, nil, InitMultiResultDump)
b := s.s3.Bucket("sample")
multi, err := b.Multi("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(req.Form["prefix"], DeepEquals, []string{"multi"})
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--")
}
func (s *S) TestMultiReturnOld(c *C) {
testServer.Response(200, nil, ListMultiResultDump)
b := s.s3.Bucket("sample")
multi, err := b.Multi("multi1", "text/plain", s3.Private)
c.Assert(err, IsNil)
c.Assert(multi.Key, Equals, "multi1")
c.Assert(multi.UploadId, Equals, "iUVug89pPvSswrikD")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(req.Form["prefix"], DeepEquals, []string{"multi1"})
}
func (s *S) TestListParts(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, nil, ListPartsResultDump1)
testServer.Response(404, nil, NoSuchUploadErrorDump) // :-(
testServer.Response(200, nil, ListPartsResultDump2)
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
parts, err := multi.ListParts()
c.Assert(err, IsNil)
c.Assert(parts, HasLen, 3)
c.Assert(parts[0].N, Equals, 1)
c.Assert(parts[0].Size, Equals, int64(5))
c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
c.Assert(parts[1].N, Equals, 2)
c.Assert(parts[1].Size, Equals, int64(5))
c.Assert(parts[1].ETag, Equals, `"d067a0fa9dc61a6e7195ca99696b5a89"`)
c.Assert(parts[2].N, Equals, 3)
c.Assert(parts[2].Size, Equals, int64(5))
c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`)
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"})
testServer.WaitRequest() // The internal error.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"})
c.Assert(req.Form["part-number-marker"], DeepEquals, []string{"2"})
}
func (s *S) TestPutPart(c *C) {
headers := map[string]string{
"ETag": `"26f90efd10d614f100252ff56d88dad8"`,
}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, headers, "")
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
part, err := multi.PutPart(1, strings.NewReader("<part 1>"))
c.Assert(err, IsNil)
c.Assert(part.N, Equals, 1)
c.Assert(part.Size, Equals, int64(8))
c.Assert(part.ETag, Equals, headers["ETag"])
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"8"})
c.Assert(req.Header["Content-Md5"], DeepEquals, []string{"JvkO/RDWFPEAJS/1bYja2A=="})
}
func readAll(r io.Reader) string {
data, err := ioutil.ReadAll(r)
if err != nil {
panic(err)
}
return string(data)
}
func (s *S) TestPutAllNoPreviousUpload(c *C) {
// Don't retry the NoSuchUpload error.
s.DisableRetries()
etag1 := map[string]string{"ETag": `"etag1"`}
etag2 := map[string]string{"ETag": `"etag2"`}
etag3 := map[string]string{"ETag": `"etag3"`}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, etag1, "")
testServer.Response(200, etag2, "")
testServer.Response(200, etag3, "")
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5)
c.Assert(parts, HasLen, 3)
c.Assert(parts[0].ETag, Equals, `"etag1"`)
c.Assert(parts[1].ETag, Equals, `"etag2"`)
c.Assert(parts[2].ETag, Equals, `"etag3"`)
c.Assert(err, IsNil)
// Init
testServer.WaitRequest()
// List old parts. Won't find anything.
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
// Send part 1.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
c.Assert(readAll(req.Body), Equals, "part1")
// Send part 2.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
c.Assert(readAll(req.Body), Equals, "part2")
// Send part 3 with shorter body.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"3"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"4"})
c.Assert(readAll(req.Body), Equals, "last")
}
func (s *S) TestPutAllZeroSizeFile(c *C) {
// Don't retry the NoSuchUpload error.
s.DisableRetries()
etag1 := map[string]string{"ETag": `"etag1"`}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, etag1, "")
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
// Must send at least one part, so that completing it will work.
parts, err := multi.PutAll(strings.NewReader(""), 5)
c.Assert(parts, HasLen, 1)
c.Assert(parts[0].ETag, Equals, `"etag1"`)
c.Assert(err, IsNil)
// Init
testServer.WaitRequest()
// List old parts. Won't find anything.
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
// Send empty part.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"0"})
c.Assert(readAll(req.Body), Equals, "")
}
func (s *S) TestPutAllResume(c *C) {
etag2 := map[string]string{"ETag": `"etag2"`}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, nil, ListPartsResultDump1)
testServer.Response(200, nil, ListPartsResultDump2)
testServer.Response(200, etag2, "")
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
// "part1" and "part3" match the checksums in ResultDump1.
// The middle one is a mismatch (it refers to "part2").
parts, err := multi.PutAll(strings.NewReader("part1partXpart3"), 5)
c.Assert(parts, HasLen, 3)
c.Assert(parts[0].N, Equals, 1)
c.Assert(parts[0].Size, Equals, int64(5))
c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
c.Assert(parts[1].N, Equals, 2)
c.Assert(parts[1].Size, Equals, int64(5))
c.Assert(parts[1].ETag, Equals, `"etag2"`)
c.Assert(parts[2].N, Equals, 3)
c.Assert(parts[2].Size, Equals, int64(5))
c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`)
c.Assert(err, IsNil)
// Init
testServer.WaitRequest()
// List old parts, broken in two requests.
for i := 0; i < 2; i++ {
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
}
// Send part 2, as it didn't match the checksum.
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
c.Assert(readAll(req.Body), Equals, "partX")
}
func (s *S) TestMultiComplete(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
// Note the 200 response. Completing will hold the connection on some
// kind of long poll, and may return a late error even after a 200.
testServer.Response(200, nil, InternalErrorDump)
testServer.Response(200, nil, "")
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}})
c.Assert(err, IsNil)
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
var payload struct {
XMLName xml.Name
Part []struct {
PartNumber int
ETag string
}
}
dec := xml.NewDecoder(req.Body)
err = dec.Decode(&payload)
c.Assert(err, IsNil)
c.Assert(payload.XMLName.Local, Equals, "CompleteMultipartUpload")
c.Assert(len(payload.Part), Equals, 2)
c.Assert(payload.Part[0].PartNumber, Equals, 1)
c.Assert(payload.Part[0].ETag, Equals, `"ETag1"`)
c.Assert(payload.Part[1].PartNumber, Equals, 2)
c.Assert(payload.Part[1].ETag, Equals, `"ETag2"`)
}
func (s *S) TestMultiAbort(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, nil, "")
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
err = multi.Abort()
c.Assert(err, IsNil)
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
}
func (s *S) TestListMulti(c *C) {
testServer.Response(200, nil, ListMultiResultDump)
b := s.s3.Bucket("sample")
multis, prefixes, err := b.ListMulti("", "/")
c.Assert(err, IsNil)
c.Assert(prefixes, DeepEquals, []string{"a/", "b/"})
c.Assert(multis, HasLen, 2)
c.Assert(multis[0].Key, Equals, "multi1")
c.Assert(multis[0].UploadId, Equals, "iUVug89pPvSswrikD")
c.Assert(multis[1].Key, Equals, "multi2")
c.Assert(multis[1].UploadId, Equals, "DkirwsSvPp98guVUi")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(req.Form["prefix"], DeepEquals, []string{""})
c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"})
c.Assert(req.Form["max-uploads"], DeepEquals, []string{"1000"})
}

View File

@ -0,0 +1,241 @@
package s3_test
var GetObjectErrorDump = `
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>NoSuchBucket</Code><Message>The specified bucket does not exist</Message>
<BucketName>non-existent-bucket</BucketName><RequestId>3F1B667FAD71C3D8</RequestId>
<HostId>L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D</HostId></Error>
`
var GetListResultDump1 = `
<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>quotes</Name>
<Prefix>N</Prefix>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>Nelson</Key>
<LastModified>2006-01-01T12:00:00.000Z</LastModified>
<ETag>&quot;828ef3fdfa96f00ad9f27c383fc9ac7f&quot;</ETag>
<Size>5</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>bcaf161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>Neo</Key>
<LastModified>2006-01-01T12:00:00.000Z</LastModified>
<ETag>&quot;828ef3fdfa96f00ad9f27c383fc9ac7f&quot;</ETag>
<Size>4</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>bcaf1ffd86a5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
</ListBucketResult>
`
var GetListResultDump2 = `
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>example-bucket</Name>
<Prefix>photos/2006/</Prefix>
<Marker>some-marker</Marker>
<MaxKeys>1000</MaxKeys>
<Delimiter>/</Delimiter>
<IsTruncated>false</IsTruncated>
<CommonPrefixes>
<Prefix>photos/2006/feb/</Prefix>
</CommonPrefixes>
<CommonPrefixes>
<Prefix>photos/2006/jan/</Prefix>
</CommonPrefixes>
</ListBucketResult>
`
var InitMultiResultDump = `
<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>sample</Bucket>
<Key>multi</Key>
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
</InitiateMultipartUploadResult>
`
var ListPartsResultDump1 = `
<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>sample</Bucket>
<Key>multi</Key>
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<PartNumberMarker>0</PartNumberMarker>
<NextPartNumberMarker>2</NextPartNumberMarker>
<MaxParts>2</MaxParts>
<IsTruncated>true</IsTruncated>
<Part>
<PartNumber>1</PartNumber>
<LastModified>2013-01-30T13:45:51.000Z</LastModified>
<ETag>&quot;ffc88b4ca90a355f8ddba6b2c3b2af5c&quot;</ETag>
<Size>5</Size>
</Part>
<Part>
<PartNumber>2</PartNumber>
<LastModified>2013-01-30T13:45:52.000Z</LastModified>
<ETag>&quot;d067a0fa9dc61a6e7195ca99696b5a89&quot;</ETag>
<Size>5</Size>
</Part>
</ListPartsResult>
`
var ListPartsResultDump2 = `
<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>sample</Bucket>
<Key>multi</Key>
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<PartNumberMarker>2</PartNumberMarker>
<NextPartNumberMarker>3</NextPartNumberMarker>
<MaxParts>2</MaxParts>
<IsTruncated>false</IsTruncated>
<Part>
<PartNumber>3</PartNumber>
<LastModified>2013-01-30T13:46:50.000Z</LastModified>
<ETag>&quot;49dcd91231f801159e893fb5c6674985&quot;</ETag>
<Size>5</Size>
</Part>
</ListPartsResult>
`
var ListMultiResultDump = `
<?xml version="1.0"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>goamz-test-bucket-us-east-1-akiajk3wyewhctyqbf7a</Bucket>
<KeyMarker/>
<UploadIdMarker/>
<NextKeyMarker>multi1</NextKeyMarker>
<NextUploadIdMarker>iUVug89pPvSswrikD72p8uO62EzhNtpDxRmwC5WSiWDdK9SfzmDqe3xpP1kMWimyimSnz4uzFc3waVM5ufrKYQ--</NextUploadIdMarker>
<Delimiter>/</Delimiter>
<MaxUploads>1000</MaxUploads>
<IsTruncated>false</IsTruncated>
<Upload>
<Key>multi1</Key>
<UploadId>iUVug89pPvSswrikD</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>gustavoniemeyer</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>gustavoniemeyer</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2013-01-30T18:15:47.000Z</Initiated>
</Upload>
<Upload>
<Key>multi2</Key>
<UploadId>DkirwsSvPp98guVUi</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>joe</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>joe</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2013-01-30T18:15:47.000Z</Initiated>
</Upload>
<CommonPrefixes>
<Prefix>a/</Prefix>
</CommonPrefixes>
<CommonPrefixes>
<Prefix>b/</Prefix>
</CommonPrefixes>
</ListMultipartUploadsResult>
`
var NoSuchUploadErrorDump = `
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchUpload</Code>
<Message>Not relevant</Message>
<BucketName>sample</BucketName>
<RequestId>3F1B667FAD71C3D8</RequestId>
<HostId>kjhwqk</HostId>
</Error>
`
var InternalErrorDump = `
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>InternalError</Code>
<Message>Not relevant</Message>
<BucketName>sample</BucketName>
<RequestId>3F1B667FAD71C3D8</RequestId>
<HostId>kjhwqk</HostId>
</Error>
`
var GetKeyHeaderDump = map[string]string{
"x-amz-id-2": "ef8yU9AS1ed4OpIszj7UDNEHGran",
"x-amz-request-id": "318BC8BC143432E5",
"x-amz-version-id": "3HL4kqtJlcpXroDTDmjVBH40Nrjfkd",
"Date": "Wed, 28 Oct 2009 22:32:00 GMT",
"Last-Modified": "Sun, 1 Jan 2006 12:00:00 GMT",
"ETag": "fba9dede5f27731c9771645a39863328",
"Content-Length": "434234",
"Content-Type": "text/plain",
}
var GetListBucketsDump = `
<?xml version="1.0" encoding="UTF-8"?>
<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>joe</DisplayName>
</Owner>
<Buckets>
<Bucket>
<Name>bucket1</Name>
<CreationDate>2012-01-01T02:03:04.000Z</CreationDate>
</Bucket>
<Bucket>
<Name>bucket2</Name>
<CreationDate>2014-01-11T02:03:04.000Z</CreationDate>
</Bucket>
</Buckets>
</ListAllMyBucketsResult>
`
var MultiDelDump = `
<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Deleted>
<Key>a.go</Key>
</Deleted>
<Deleted>
<Key>b.go</Key>
</Deleted>
</DeleteResult>
`

View File

@ -0,0 +1,893 @@
//
// goamz - Go packages to interact with the Amazon Web Services.
//
// https://wiki.ubuntu.com/goamz
//
// Copyright (c) 2011 Canonical Ltd.
//
// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>
//
package s3
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/xml"
"fmt"
"github.com/mitchellh/goamz/aws"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httputil"
"net/url"
"strconv"
"strings"
"time"
)
const debug = false
// The S3 type encapsulates operations with an S3 region.
type S3 struct {
aws.Auth
aws.Region
HTTPClient func() *http.Client
private byte // Reserve the right of using private data.
}
// The Bucket type encapsulates operations with an S3 bucket.
type Bucket struct {
*S3
Name string
}
// The Owner type represents the owner of the object in an S3 bucket.
type Owner struct {
ID string
DisplayName string
}
var attempts = aws.AttemptStrategy{
Min: 5,
Total: 5 * time.Second,
Delay: 200 * time.Millisecond,
}
// New creates a new S3.
func New(auth aws.Auth, region aws.Region) *S3 {
return &S3{
Auth: auth,
Region: region,
HTTPClient: func() *http.Client {
return http.DefaultClient
},
private: 0}
}
// Bucket returns a Bucket with the given name.
func (s3 *S3) Bucket(name string) *Bucket {
if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket {
name = strings.ToLower(name)
}
return &Bucket{s3, name}
}
var createBucketConfiguration = `<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LocationConstraint>%s</LocationConstraint>
</CreateBucketConfiguration>`
// locationConstraint returns an io.Reader specifying a LocationConstraint if
// required for the region.
//
// See http://goo.gl/bh9Kq for details.
func (s3 *S3) locationConstraint() io.Reader {
constraint := ""
if s3.Region.S3LocationConstraint {
constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name)
}
return strings.NewReader(constraint)
}
type ACL string
const (
Private = ACL("private")
PublicRead = ACL("public-read")
PublicReadWrite = ACL("public-read-write")
AuthenticatedRead = ACL("authenticated-read")
BucketOwnerRead = ACL("bucket-owner-read")
BucketOwnerFull = ACL("bucket-owner-full-control")
)
// The ListBucketsResp type holds the results of a List buckets operation.
type ListBucketsResp struct {
Buckets []Bucket `xml:">Bucket"`
}
// ListBuckets lists all buckets
//
// See: http://goo.gl/NqlyMN
func (s3 *S3) ListBuckets() (result *ListBucketsResp, err error) {
req := &request{
path: "/",
}
result = &ListBucketsResp{}
for attempt := attempts.Start(); attempt.Next(); {
err = s3.query(req, result)
if !shouldRetry(err) {
break
}
}
if err != nil {
return nil, err
}
// set S3 instance on buckets
for i := range result.Buckets {
result.Buckets[i].S3 = s3
}
return result, nil
}
// PutBucket creates a new bucket.
//
// See http://goo.gl/ndjnR for details.
func (b *Bucket) PutBucket(perm ACL) error {
headers := map[string][]string{
"x-amz-acl": {string(perm)},
}
req := &request{
method: "PUT",
bucket: b.Name,
path: "/",
headers: headers,
payload: b.locationConstraint(),
}
return b.S3.query(req, nil)
}
// DelBucket removes an existing S3 bucket. All objects in the bucket must
// be removed before the bucket itself can be removed.
//
// See http://goo.gl/GoBrY for details.
func (b *Bucket) DelBucket() (err error) {
req := &request{
method: "DELETE",
bucket: b.Name,
path: "/",
}
for attempt := attempts.Start(); attempt.Next(); {
err = b.S3.query(req, nil)
if !shouldRetry(err) {
break
}
}
return err
}
// Get retrieves an object from an S3 bucket.
//
// See http://goo.gl/isCO7 for details.
func (b *Bucket) Get(path string) (data []byte, err error) {
body, err := b.GetReader(path)
if err != nil {
return nil, err
}
data, err = ioutil.ReadAll(body)
body.Close()
return data, err
}
// GetReader retrieves an object from an S3 bucket.
// It is the caller's responsibility to call Close on rc when
// finished reading.
func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) {
resp, err := b.GetResponse(path)
if resp != nil {
return resp.Body, err
}
return nil, err
}
// GetResponse retrieves an object from an S3 bucket returning the http response
// It is the caller's responsibility to call Close on rc when
// finished reading.
func (b *Bucket) GetResponse(path string) (*http.Response, error) {
return b.getResponseParams(path, nil)
}
// GetTorrent retrieves an Torrent object from an S3 bucket an io.ReadCloser.
// It is the caller's responsibility to call Close on rc when finished reading.
func (b *Bucket) GetTorrentReader(path string) (io.ReadCloser, error) {
resp, err := b.getResponseParams(path, url.Values{"torrent": {""}})
if err != nil {
return nil, err
}
return resp.Body, nil
}
// GetTorrent retrieves an Torrent object from an S3, returning
// the torrent as a []byte.
func (b *Bucket) GetTorrent(path string) ([]byte, error) {
body, err := b.GetTorrentReader(path)
if err != nil {
return nil, err
}
defer body.Close()
return ioutil.ReadAll(body)
}
func (b *Bucket) getResponseParams(path string, params url.Values) (*http.Response, error) {
req := &request{
bucket: b.Name,
path: path,
params: params,
}
err := b.S3.prepare(req)
if err != nil {
return nil, err
}
for attempt := attempts.Start(); attempt.Next(); {
resp, err := b.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, err
}
return resp, nil
}
panic("unreachable")
}
func (b *Bucket) Head(path string) (*http.Response, error) {
req := &request{
method: "HEAD",
bucket: b.Name,
path: path,
}
err := b.S3.prepare(req)
if err != nil {
return nil, err
}
for attempt := attempts.Start(); attempt.Next(); {
resp, err := b.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, err
}
return resp, nil
}
panic("unreachable")
}
// Put inserts an object into the S3 bucket.
//
// See http://goo.gl/FEBPD for details.
func (b *Bucket) Put(path string, data []byte, contType string, perm ACL) error {
body := bytes.NewBuffer(data)
return b.PutReader(path, body, int64(len(data)), contType, perm)
}
/*
PutHeader - like Put, inserts an object into the S3 bucket.
Instead of Content-Type string, pass in custom headers to override defaults.
*/
func (b *Bucket) PutHeader(path string, data []byte, customHeaders map[string][]string, perm ACL) error {
body := bytes.NewBuffer(data)
return b.PutReaderHeader(path, body, int64(len(data)), customHeaders, perm)
}
// PutReader inserts an object into the S3 bucket by consuming data
// from r until EOF.
func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL) error {
headers := map[string][]string{
"Content-Length": {strconv.FormatInt(length, 10)},
"Content-Type": {contType},
"x-amz-acl": {string(perm)},
}
req := &request{
method: "PUT",
bucket: b.Name,
path: path,
headers: headers,
payload: r,
}
return b.S3.query(req, nil)
}
/*
PutReaderHeader - like PutReader, inserts an object into S3 from a reader.
Instead of Content-Type string, pass in custom headers to override defaults.
*/
func (b *Bucket) PutReaderHeader(path string, r io.Reader, length int64, customHeaders map[string][]string, perm ACL) error {
// Default headers
headers := map[string][]string{
"Content-Length": {strconv.FormatInt(length, 10)},
"Content-Type": {"application/text"},
"x-amz-acl": {string(perm)},
}
// Override with custom headers
for key, value := range customHeaders {
headers[key] = value
}
req := &request{
method: "PUT",
bucket: b.Name,
path: path,
headers: headers,
payload: r,
}
return b.S3.query(req, nil)
}
/*
Copy - copy objects inside bucket
*/
func (b *Bucket) Copy(oldPath, newPath string, perm ACL) error {
if !strings.HasPrefix(oldPath, "/") {
oldPath = "/" + oldPath
}
req := &request{
method: "PUT",
bucket: b.Name,
path: newPath,
headers: map[string][]string{
"x-amz-copy-source": {amazonEscape("/" + b.Name + oldPath)},
"x-amz-acl": {string(perm)},
},
}
err := b.S3.prepare(req)
if err != nil {
return err
}
for attempt := attempts.Start(); attempt.Next(); {
_, err = b.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return err
}
return nil
}
panic("unreachable")
}
// Del removes an object from the S3 bucket.
//
// See http://goo.gl/APeTt for details.
func (b *Bucket) Del(path string) error {
req := &request{
method: "DELETE",
bucket: b.Name,
path: path,
}
return b.S3.query(req, nil)
}
type Object struct {
Key string
}
type MultiObjectDeleteBody struct {
XMLName xml.Name `xml:"Delete"`
Quiet bool
Object []Object
}
func base64md5(data []byte) string {
h := md5.New()
h.Write(data)
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
// MultiDel removes multiple objects from the S3 bucket efficiently.
// A maximum of 1000 keys at once may be specified.
//
// See http://goo.gl/WvA5sj for details.
func (b *Bucket) MultiDel(paths []string) error {
// create XML payload
v := MultiObjectDeleteBody{}
v.Object = make([]Object, len(paths))
for i, path := range paths {
v.Object[i] = Object{path}
}
data, _ := xml.Marshal(v)
// Content-MD5 is required
md5hash := base64md5(data)
req := &request{
method: "POST",
bucket: b.Name,
path: "/",
params: url.Values{"delete": {""}},
headers: http.Header{"Content-MD5": {md5hash}},
payload: bytes.NewReader(data),
}
return b.S3.query(req, nil)
}
// The ListResp type holds the results of a List bucket operation.
type ListResp struct {
Name string
Prefix string
Delimiter string
Marker string
NextMarker string
MaxKeys int
// IsTruncated is true if the results have been truncated because
// there are more keys and prefixes than can fit in MaxKeys.
// N.B. this is the opposite sense to that documented (incorrectly) in
// http://goo.gl/YjQTc
IsTruncated bool
Contents []Key
CommonPrefixes []string `xml:">Prefix"`
}
// The Key type represents an item stored in an S3 bucket.
type Key struct {
Key string
LastModified string
Size int64
// ETag gives the hex-encoded MD5 sum of the contents,
// surrounded with double-quotes.
ETag string
StorageClass string
Owner Owner
}
// List returns information about objects in an S3 bucket.
//
// The prefix parameter limits the response to keys that begin with the
// specified prefix.
//
// The delim parameter causes the response to group all of the keys that
// share a common prefix up to the next delimiter in a single entry within
// the CommonPrefixes field. You can use delimiters to separate a bucket
// into different groupings of keys, similar to how folders would work.
//
// The marker parameter specifies the key to start with when listing objects
// in a bucket. Amazon S3 lists objects in alphabetical order and
// will return keys alphabetically greater than the marker.
//
// The max parameter specifies how many keys + common prefixes to return in
// the response. The default is 1000.
//
// For example, given these keys in a bucket:
//
// index.html
// index2.html
// photos/2006/January/sample.jpg
// photos/2006/February/sample2.jpg
// photos/2006/February/sample3.jpg
// photos/2006/February/sample4.jpg
//
// Listing this bucket with delimiter set to "/" would yield the
// following result:
//
// &ListResp{
// Name: "sample-bucket",
// MaxKeys: 1000,
// Delimiter: "/",
// Contents: []Key{
// {Key: "index.html", "index2.html"},
// },
// CommonPrefixes: []string{
// "photos/",
// },
// }
//
// Listing the same bucket with delimiter set to "/" and prefix set to
// "photos/2006/" would yield the following result:
//
// &ListResp{
// Name: "sample-bucket",
// MaxKeys: 1000,
// Delimiter: "/",
// Prefix: "photos/2006/",
// CommonPrefixes: []string{
// "photos/2006/February/",
// "photos/2006/January/",
// },
// }
//
// See http://goo.gl/YjQTc for details.
func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) {
params := map[string][]string{
"prefix": {prefix},
"delimiter": {delim},
"marker": {marker},
}
if max != 0 {
params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)}
}
req := &request{
bucket: b.Name,
params: params,
}
result = &ListResp{}
for attempt := attempts.Start(); attempt.Next(); {
err = b.S3.query(req, result)
if !shouldRetry(err) {
break
}
}
if err != nil {
return nil, err
}
return result, nil
}
// Returns a mapping of all key names in this bucket to Key objects
func (b *Bucket) GetBucketContents() (*map[string]Key, error) {
bucket_contents := map[string]Key{}
prefix := ""
path_separator := ""
marker := ""
for {
contents, err := b.List(prefix, path_separator, marker, 1000)
if err != nil {
return &bucket_contents, err
}
last_key := ""
for _, key := range contents.Contents {
bucket_contents[key.Key] = key
last_key = key.Key
}
if contents.IsTruncated {
marker = contents.NextMarker
if marker == "" {
// From the s3 docs: If response does not include the
// NextMarker and it is truncated, you can use the value of the
// last Key in the response as the marker in the subsequent
// request to get the next set of object keys.
marker = last_key
}
} else {
break
}
}
return &bucket_contents, nil
}
// Get metadata from the key without returning the key content
func (b *Bucket) GetKey(path string) (*Key, error) {
req := &request{
bucket: b.Name,
path: path,
method: "HEAD",
}
err := b.S3.prepare(req)
if err != nil {
return nil, err
}
key := &Key{}
for attempt := attempts.Start(); attempt.Next(); {
resp, err := b.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, err
}
key.Key = path
key.LastModified = resp.Header.Get("Last-Modified")
key.ETag = resp.Header.Get("ETag")
contentLength := resp.Header.Get("Content-Length")
size, err := strconv.ParseInt(contentLength, 10, 64)
if err != nil {
return key, fmt.Errorf("bad s3 content-length %v: %v",
contentLength, err)
}
key.Size = size
return key, nil
}
panic("unreachable")
}
// URL returns a non-signed URL that allows retriving the
// object at path. It only works if the object is publicly
// readable (see SignedURL).
func (b *Bucket) URL(path string) string {
req := &request{
bucket: b.Name,
path: path,
}
err := b.S3.prepare(req)
if err != nil {
panic(err)
}
u, err := req.url(true)
if err != nil {
panic(err)
}
u.RawQuery = ""
return u.String()
}
// SignedURL returns a signed URL that allows anyone holding the URL
// to retrieve the object at path. The signature is valid until expires.
func (b *Bucket) SignedURL(path string, expires time.Time) string {
req := &request{
bucket: b.Name,
path: path,
params: url.Values{"Expires": {strconv.FormatInt(expires.Unix(), 10)}},
}
err := b.S3.prepare(req)
if err != nil {
panic(err)
}
u, err := req.url(true)
if err != nil {
panic(err)
}
return u.String()
}
type request struct {
method string
bucket string
path string
signpath string
params url.Values
headers http.Header
baseurl string
payload io.Reader
prepared bool
}
// amazonShouldEscape returns true if byte should be escaped
func amazonShouldEscape(c byte) bool {
return !((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '9') || c == '_' || c == '-' || c == '~' || c == '.' || c == '/' || c == ':')
}
// amazonEscape does uri escaping exactly as Amazon does
func amazonEscape(s string) string {
hexCount := 0
for i := 0; i < len(s); i++ {
if amazonShouldEscape(s[i]) {
hexCount++
}
}
if hexCount == 0 {
return s
}
t := make([]byte, len(s)+2*hexCount)
j := 0
for i := 0; i < len(s); i++ {
if c := s[i]; amazonShouldEscape(c) {
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
} else {
t[j] = s[i]
j++
}
}
return string(t)
}
// url returns url to resource, either full (with host/scheme) or
// partial for HTTP request
func (req *request) url(full bool) (*url.URL, error) {
u, err := url.Parse(req.baseurl)
if err != nil {
return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
}
u.Opaque = amazonEscape(req.path)
if full {
u.Opaque = "//" + u.Host + u.Opaque
}
u.RawQuery = req.params.Encode()
return u, nil
}
// query prepares and runs the req request.
// If resp is not nil, the XML data contained in the response
// body will be unmarshalled on it.
func (s3 *S3) query(req *request, resp interface{}) error {
err := s3.prepare(req)
if err == nil {
var httpResponse *http.Response
httpResponse, err = s3.run(req, resp)
if resp == nil && httpResponse != nil {
httpResponse.Body.Close()
}
}
return err
}
// prepare sets up req to be delivered to S3.
func (s3 *S3) prepare(req *request) error {
if !req.prepared {
req.prepared = true
if req.method == "" {
req.method = "GET"
}
// Copy so they can be mutated without affecting on retries.
params := make(url.Values)
headers := make(http.Header)
for k, v := range req.params {
params[k] = v
}
for k, v := range req.headers {
headers[k] = v
}
req.params = params
req.headers = headers
if !strings.HasPrefix(req.path, "/") {
req.path = "/" + req.path
}
req.signpath = req.path
if req.bucket != "" {
req.baseurl = s3.Region.S3BucketEndpoint
if req.baseurl == "" {
// Use the path method to address the bucket.
req.baseurl = s3.Region.S3Endpoint
req.path = "/" + req.bucket + req.path
} else {
// Just in case, prevent injection.
if strings.IndexAny(req.bucket, "/:@") >= 0 {
return fmt.Errorf("bad S3 bucket: %q", req.bucket)
}
req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1)
}
req.signpath = "/" + req.bucket + req.signpath
} else {
req.baseurl = s3.Region.S3Endpoint
}
}
// Always sign again as it's not clear how far the
// server has handled a previous attempt.
u, err := url.Parse(req.baseurl)
if err != nil {
return fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
}
req.headers["Host"] = []string{u.Host}
req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)}
sign(s3.Auth, req.method, amazonEscape(req.signpath), req.params, req.headers)
return nil
}
// run sends req and returns the http response from the server.
// If resp is not nil, the XML data contained in the response
// body will be unmarshalled on it.
func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) {
if debug {
log.Printf("Running S3 request: %#v", req)
}
u, err := req.url(false)
if err != nil {
return nil, err
}
hreq := http.Request{
URL: u,
Method: req.method,
ProtoMajor: 1,
ProtoMinor: 1,
Close: true,
Header: req.headers,
}
if v, ok := req.headers["Content-Length"]; ok {
hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64)
delete(req.headers, "Content-Length")
}
if req.payload != nil {
hreq.Body = ioutil.NopCloser(req.payload)
}
hresp, err := s3.HTTPClient().Do(&hreq)
if err != nil {
return nil, err
}
if debug {
dump, _ := httputil.DumpResponse(hresp, true)
log.Printf("} -> %s\n", dump)
}
if hresp.StatusCode != 200 && hresp.StatusCode != 204 {
defer hresp.Body.Close()
return nil, buildError(hresp)
}
if resp != nil {
err = xml.NewDecoder(hresp.Body).Decode(resp)
hresp.Body.Close()
}
return hresp, err
}
// Error represents an error in an operation with S3.
type Error struct {
StatusCode int // HTTP status code (200, 403, ...)
Code string // EC2 error code ("UnsupportedOperation", ...)
Message string // The human-oriented error message
BucketName string
RequestId string
HostId string
}
func (e *Error) Error() string {
return e.Message
}
func buildError(r *http.Response) error {
if debug {
log.Printf("got error (status code %v)", r.StatusCode)
data, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Printf("\tread error: %v", err)
} else {
log.Printf("\tdata:\n%s\n\n", data)
}
r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
}
err := Error{}
// TODO return error if Unmarshal fails?
xml.NewDecoder(r.Body).Decode(&err)
r.Body.Close()
err.StatusCode = r.StatusCode
if err.Message == "" {
err.Message = r.Status
}
if debug {
log.Printf("err: %#v\n", err)
}
return &err
}
func shouldRetry(err error) bool {
if err == nil {
return false
}
switch err {
case io.ErrUnexpectedEOF, io.EOF:
return true
}
switch e := err.(type) {
case *net.DNSError:
return true
case *net.OpError:
switch e.Op {
case "read", "write":
return true
}
case *Error:
switch e.Code {
case "InternalError", "NoSuchUpload", "NoSuchBucket":
return true
}
}
return false
}
func hasCode(err error, code string) bool {
s3err, ok := err.(*Error)
return ok && s3err.Code == code
}

View File

@ -0,0 +1,435 @@
package s3_test
import (
"bytes"
"io/ioutil"
"net/http"
"testing"
"time"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"github.com/mitchellh/goamz/testutil"
. "github.com/motain/gocheck"
)
func Test(t *testing.T) {
TestingT(t)
}
type S struct {
s3 *s3.S3
}
var _ = Suite(&S{})
var testServer = testutil.NewHTTPServer()
func (s *S) SetUpSuite(c *C) {
testServer.Start()
auth := aws.Auth{"abc", "123", ""}
s.s3 = s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL})
}
func (s *S) TearDownSuite(c *C) {
s3.SetAttemptStrategy(nil)
}
func (s *S) SetUpTest(c *C) {
attempts := aws.AttemptStrategy{
Total: 300 * time.Millisecond,
Delay: 100 * time.Millisecond,
}
s3.SetAttemptStrategy(&attempts)
}
func (s *S) TearDownTest(c *C) {
testServer.Flush()
}
func (s *S) DisableRetries() {
s3.SetAttemptStrategy(&aws.AttemptStrategy{})
}
// PutBucket docs: http://goo.gl/kBTCu
func (s *S) TestPutBucket(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/")
c.Assert(req.Header["Date"], Not(Equals), "")
}
// DeleteBucket docs: http://goo.gl/GoBrY
func (s *S) TestDelBucket(c *C) {
testServer.Response(204, nil, "")
b := s.s3.Bucket("bucket")
err := b.DelBucket()
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/bucket/")
c.Assert(req.Header["Date"], Not(Equals), "")
}
// ListBuckets: http://goo.gl/NqlyMN
func (s *S) TestListBuckets(c *C) {
testServer.Response(200, nil, GetListBucketsDump)
buckets, err := s.s3.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets.Buckets), Equals, 2)
c.Assert(buckets.Buckets[0].Name, Equals, "bucket1")
c.Assert(buckets.Buckets[1].Name, Equals, "bucket2")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/")
}
// GetObject docs: http://goo.gl/isCO7
func (s *S) TestGet(c *C) {
testServer.Response(200, nil, "content")
b := s.s3.Bucket("bucket")
data, err := b.Get("name")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
}
func (s *S) TestHead(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
resp, err := b.Head("name")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "HEAD")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(err, IsNil)
body, err := ioutil.ReadAll(resp.Body)
c.Assert(err, IsNil)
c.Assert(len(body), Equals, 0)
}
func (s *S) TestURL(c *C) {
testServer.Response(200, nil, "content")
b := s.s3.Bucket("bucket")
url := b.URL("name")
r, err := http.Get(url)
c.Assert(err, IsNil)
data, err := ioutil.ReadAll(r.Body)
r.Body.Close()
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
}
func (s *S) TestGetReader(c *C) {
testServer.Response(200, nil, "content")
b := s.s3.Bucket("bucket")
rc, err := b.GetReader("name")
c.Assert(err, IsNil)
data, err := ioutil.ReadAll(rc)
rc.Close()
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
}
func (s *S) TestGetNotFound(c *C) {
for i := 0; i < 10; i++ {
testServer.Response(404, nil, GetObjectErrorDump)
}
b := s.s3.Bucket("non-existent-bucket")
data, err := b.Get("non-existent")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/non-existent-bucket/non-existent")
c.Assert(req.Header["Date"], Not(Equals), "")
s3err, _ := err.(*s3.Error)
c.Assert(s3err, NotNil)
c.Assert(s3err.StatusCode, Equals, 404)
c.Assert(s3err.BucketName, Equals, "non-existent-bucket")
c.Assert(s3err.RequestId, Equals, "3F1B667FAD71C3D8")
c.Assert(s3err.HostId, Equals, "L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D")
c.Assert(s3err.Code, Equals, "NoSuchBucket")
c.Assert(s3err.Message, Equals, "The specified bucket does not exist")
c.Assert(s3err.Error(), Equals, "The specified bucket does not exist")
c.Assert(data, IsNil)
}
// PutObject docs: http://goo.gl/FEBPD
func (s *S) TestPutObject(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.Put("name", []byte("content"), "content-type", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], DeepEquals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPutObjectHeader(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.PutHeader(
"name",
[]byte("content"),
map[string][]string{"Content-Type": {"content-type"}},
s3.Private,
)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], DeepEquals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPutReader(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
buf := bytes.NewBufferString("content")
err := b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], Equals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPutReaderHeader(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
buf := bytes.NewBufferString("content")
err := b.PutReaderHeader(
"name",
buf,
int64(buf.Len()),
map[string][]string{"Content-Type": {"content-type"}},
s3.Private,
)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], Equals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestCopy(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.Copy(
"old/file",
"new/file",
s3.Private,
)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/new/file")
c.Assert(req.Header["X-Amz-Copy-Source"], DeepEquals, []string{"/bucket/old/file"})
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPlusInURL(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.Copy(
"dir/old+f?le",
"dir/new+f?le",
s3.Private,
)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.RequestURI, Equals, "/bucket/dir/new%2Bf%3Fle")
c.Assert(req.Header["X-Amz-Copy-Source"], DeepEquals, []string{"/bucket/dir/old%2Bf%3Fle"})
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
// DelObject docs: http://goo.gl/APeTt
func (s *S) TestDelObject(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.Del("name")
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
}
// Delete Multiple Objects docs: http://goo.gl/WvA5sj
func (s *S) TestMultiDelObject(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.MultiDel([]string{"a", "b"})
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/bucket/")
c.Assert(req.RequestURI, Equals, "/bucket/?delete=")
c.Assert(req.Header["Content-Md5"], DeepEquals, []string{"nos/vZNvjGs17xIyjEFlwQ=="})
data, err := ioutil.ReadAll(req.Body)
req.Body.Close()
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "<Delete><Quiet>false</Quiet><Object><Key>a</Key></Object><Object><Key>b</Key></Object></Delete>")
}
// Bucket List Objects docs: http://goo.gl/YjQTc
func (s *S) TestList(c *C) {
testServer.Response(200, nil, GetListResultDump1)
b := s.s3.Bucket("quotes")
data, err := b.List("N", "", "", 0)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/quotes/")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(req.Form["prefix"], DeepEquals, []string{"N"})
c.Assert(req.Form["delimiter"], DeepEquals, []string{""})
c.Assert(req.Form["marker"], DeepEquals, []string{""})
c.Assert(req.Form["max-keys"], DeepEquals, []string(nil))
c.Assert(data.Name, Equals, "quotes")
c.Assert(data.Prefix, Equals, "N")
c.Assert(data.IsTruncated, Equals, false)
c.Assert(len(data.Contents), Equals, 2)
c.Assert(data.Contents[0].Key, Equals, "Nelson")
c.Assert(data.Contents[0].LastModified, Equals, "2006-01-01T12:00:00.000Z")
c.Assert(data.Contents[0].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
c.Assert(data.Contents[0].Size, Equals, int64(5))
c.Assert(data.Contents[0].StorageClass, Equals, "STANDARD")
c.Assert(data.Contents[0].Owner.ID, Equals, "bcaf161ca5fb16fd081034f")
c.Assert(data.Contents[0].Owner.DisplayName, Equals, "webfile")
c.Assert(data.Contents[1].Key, Equals, "Neo")
c.Assert(data.Contents[1].LastModified, Equals, "2006-01-01T12:00:00.000Z")
c.Assert(data.Contents[1].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
c.Assert(data.Contents[1].Size, Equals, int64(4))
c.Assert(data.Contents[1].StorageClass, Equals, "STANDARD")
c.Assert(data.Contents[1].Owner.ID, Equals, "bcaf1ffd86a5fb16fd081034f")
c.Assert(data.Contents[1].Owner.DisplayName, Equals, "webfile")
}
func (s *S) TestListWithDelimiter(c *C) {
testServer.Response(200, nil, GetListResultDump2)
b := s.s3.Bucket("quotes")
data, err := b.List("photos/2006/", "/", "some-marker", 1000)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/quotes/")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(req.Form["prefix"], DeepEquals, []string{"photos/2006/"})
c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"})
c.Assert(req.Form["marker"], DeepEquals, []string{"some-marker"})
c.Assert(req.Form["max-keys"], DeepEquals, []string{"1000"})
c.Assert(data.Name, Equals, "example-bucket")
c.Assert(data.Prefix, Equals, "photos/2006/")
c.Assert(data.Delimiter, Equals, "/")
c.Assert(data.Marker, Equals, "some-marker")
c.Assert(data.IsTruncated, Equals, false)
c.Assert(len(data.Contents), Equals, 0)
c.Assert(data.CommonPrefixes, DeepEquals, []string{"photos/2006/feb/", "photos/2006/jan/"})
}
func (s *S) TestGetKey(c *C) {
testServer.Response(200, GetKeyHeaderDump, "")
b := s.s3.Bucket("bucket")
key, err := b.GetKey("name")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "HEAD")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(err, IsNil)
c.Assert(key.Key, Equals, "name")
c.Assert(key.LastModified, Equals, GetKeyHeaderDump["Last-Modified"])
c.Assert(key.Size, Equals, int64(434234))
c.Assert(key.ETag, Equals, GetKeyHeaderDump["ETag"])
}
func (s *S) TestUnescapedColon(c *C) {
b := s.s3.Bucket("bucket")
u := b.URL("foo:bar")
c.Assert(u, Equals, "http://localhost:4444/bucket/foo:bar")
}

View File

@ -0,0 +1,616 @@
package s3_test
import (
"bytes"
"crypto/md5"
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"github.com/mitchellh/goamz/testutil"
. "github.com/motain/gocheck"
"net"
"sort"
"time"
)
// AmazonServer represents an Amazon S3 server.
type AmazonServer struct {
auth aws.Auth
}
func (s *AmazonServer) SetUp(c *C) {
auth, err := aws.EnvAuth()
if err != nil {
c.Fatal(err.Error())
}
s.auth = auth
}
var _ = Suite(&AmazonClientSuite{Region: aws.USEast})
var _ = Suite(&AmazonClientSuite{Region: aws.EUWest})
var _ = Suite(&AmazonClientSuite{Region: aws.EUCentral})
var _ = Suite(&AmazonDomainClientSuite{Region: aws.USEast})
// AmazonClientSuite tests the client against a live S3 server.
type AmazonClientSuite struct {
aws.Region
srv AmazonServer
ClientTests
}
func (s *AmazonClientSuite) SetUpSuite(c *C) {
if !testutil.Amazon {
c.Skip("live tests against AWS disabled (no -amazon)")
}
s.srv.SetUp(c)
s.s3 = s3.New(s.srv.auth, s.Region)
// In case tests were interrupted in the middle before.
s.ClientTests.Cleanup()
}
func (s *AmazonClientSuite) TearDownTest(c *C) {
s.ClientTests.Cleanup()
}
// AmazonDomainClientSuite tests the client against a live S3
// server using bucket names in the endpoint domain name rather
// than the request path.
type AmazonDomainClientSuite struct {
aws.Region
srv AmazonServer
ClientTests
}
func (s *AmazonDomainClientSuite) SetUpSuite(c *C) {
if !testutil.Amazon {
c.Skip("live tests against AWS disabled (no -amazon)")
}
s.srv.SetUp(c)
region := s.Region
region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com"
s.s3 = s3.New(s.srv.auth, region)
s.ClientTests.Cleanup()
}
func (s *AmazonDomainClientSuite) TearDownTest(c *C) {
s.ClientTests.Cleanup()
}
// ClientTests defines integration tests designed to test the client.
// It is not used as a test suite in itself, but embedded within
// another type.
type ClientTests struct {
s3 *s3.S3
authIsBroken bool
}
func (s *ClientTests) Cleanup() {
killBucket(testBucket(s.s3))
}
func testBucket(s *s3.S3) *s3.Bucket {
// Watch out! If this function is corrupted and made to match with something
// people own, killBucket will happily remove *everything* inside the bucket.
key := s.Auth.AccessKey
if len(key) >= 8 {
key = s.Auth.AccessKey[:8]
}
return s.Bucket(fmt.Sprintf("goamz-%s-%s", s.Region.Name, key))
}
var attempts = aws.AttemptStrategy{
Min: 5,
Total: 20 * time.Second,
Delay: 100 * time.Millisecond,
}
func killBucket(b *s3.Bucket) {
var err error
for attempt := attempts.Start(); attempt.Next(); {
err = b.DelBucket()
if err == nil {
return
}
if _, ok := err.(*net.DNSError); ok {
return
}
e, ok := err.(*s3.Error)
if ok && e.Code == "NoSuchBucket" {
return
}
if ok && e.Code == "BucketNotEmpty" {
// Errors are ignored here. Just retry.
resp, err := b.List("", "", "", 1000)
if err == nil {
for _, key := range resp.Contents {
_ = b.Del(key.Key)
}
}
multis, _, _ := b.ListMulti("", "")
for _, m := range multis {
_ = m.Abort()
}
}
}
message := "cannot delete test bucket"
if err != nil {
message += ": " + err.Error()
}
panic(message)
}
func get(url string) ([]byte, error) {
for attempt := attempts.Start(); attempt.Next(); {
resp, err := http.Get(url)
if err != nil {
if attempt.HasNext() {
continue
}
return nil, err
}
data, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
if attempt.HasNext() {
continue
}
return nil, err
}
return data, err
}
panic("unreachable")
}
func (s *ClientTests) TestBasicFunctionality(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
c.Assert(err, IsNil)
err = b.Put("name", []byte("yo!"), "text/plain", s3.PublicRead)
c.Assert(err, IsNil)
defer b.Del("name")
data, err := b.Get("name")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "yo!")
data, err = get(b.URL("name"))
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "yo!")
buf := bytes.NewBufferString("hey!")
err = b.PutReader("name2", buf, int64(buf.Len()), "text/plain", s3.Private)
c.Assert(err, IsNil)
defer b.Del("name2")
rc, err := b.GetReader("name2")
c.Assert(err, IsNil)
data, err = ioutil.ReadAll(rc)
c.Check(err, IsNil)
c.Check(string(data), Equals, "hey!")
rc.Close()
data, err = get(b.SignedURL("name2", time.Now().Add(time.Hour)))
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "hey!")
if !s.authIsBroken {
data, err = get(b.SignedURL("name2", time.Now().Add(-time.Hour)))
c.Assert(err, IsNil)
c.Assert(string(data), Matches, "(?s).*AccessDenied.*")
}
err = b.DelBucket()
c.Assert(err, NotNil)
s3err, ok := err.(*s3.Error)
c.Assert(ok, Equals, true)
c.Assert(s3err.Code, Equals, "BucketNotEmpty")
c.Assert(s3err.BucketName, Equals, b.Name)
c.Assert(s3err.Message, Equals, "The bucket you tried to delete is not empty")
err = b.Del("name")
c.Assert(err, IsNil)
err = b.Del("name2")
c.Assert(err, IsNil)
err = b.DelBucket()
c.Assert(err, IsNil)
}
func (s *ClientTests) TestCopy(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
err = b.Put("name+1", []byte("yo!"), "text/plain", s3.PublicRead)
c.Assert(err, IsNil)
defer b.Del("name+1")
err = b.Copy("name+1", "name+2", s3.PublicRead)
c.Assert(err, IsNil)
defer b.Del("name+2")
data, err := b.Get("name+2")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "yo!")
err = b.Del("name+1")
c.Assert(err, IsNil)
err = b.Del("name+2")
c.Assert(err, IsNil)
err = b.DelBucket()
c.Assert(err, IsNil)
}
func (s *ClientTests) TestGetNotFound(c *C) {
b := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey)
data, err := b.Get("non-existent")
s3err, _ := err.(*s3.Error)
c.Assert(s3err, NotNil)
c.Assert(s3err.StatusCode, Equals, 404)
c.Assert(s3err.Code, Equals, "NoSuchBucket")
c.Assert(s3err.Message, Equals, "The specified bucket does not exist")
c.Assert(data, IsNil)
}
// Communicate with all endpoints to see if they are alive.
func (s *ClientTests) TestRegions(c *C) {
errs := make(chan error, len(aws.Regions))
for _, region := range aws.Regions {
go func(r aws.Region) {
s := s3.New(s.s3.Auth, r)
b := s.Bucket("goamz-" + s.Auth.AccessKey)
_, err := b.Get("non-existent")
errs <- err
}(region)
}
for _ = range aws.Regions {
err := <-errs
if err != nil {
s3_err, ok := err.(*s3.Error)
if ok {
c.Check(s3_err.Code, Matches, "NoSuchBucket")
} else if _, ok = err.(*net.DNSError); ok {
// Okay as well.
} else {
c.Errorf("Non-S3 error: %s", err)
}
} else {
c.Errorf("Test should have errored but it seems to have succeeded")
}
}
}
var objectNames = []string{
"index.html",
"index2.html",
"photos/2006/February/sample2.jpg",
"photos/2006/February/sample3.jpg",
"photos/2006/February/sample4.jpg",
"photos/2006/January/sample.jpg",
"test/bar",
"test/foo",
}
func keys(names ...string) []s3.Key {
ks := make([]s3.Key, len(names))
for i, name := range names {
ks[i].Key = name
}
return ks
}
// As the ListResp specifies all the parameters to the
// request too, we use it to specify request parameters
// and expected results. The Contents field is
// used only for the key names inside it.
var listTests = []s3.ListResp{
// normal list.
{
Contents: keys(objectNames...),
}, {
Marker: objectNames[0],
Contents: keys(objectNames[1:]...),
}, {
Marker: objectNames[0] + "a",
Contents: keys(objectNames[1:]...),
}, {
Marker: "z",
},
// limited results.
{
MaxKeys: 2,
Contents: keys(objectNames[0:2]...),
IsTruncated: true,
}, {
MaxKeys: 2,
Marker: objectNames[0],
Contents: keys(objectNames[1:3]...),
IsTruncated: true,
}, {
MaxKeys: 2,
Marker: objectNames[len(objectNames)-2],
Contents: keys(objectNames[len(objectNames)-1:]...),
},
// with delimiter
{
Delimiter: "/",
CommonPrefixes: []string{"photos/", "test/"},
Contents: keys("index.html", "index2.html"),
}, {
Delimiter: "/",
Prefix: "photos/2006/",
CommonPrefixes: []string{"photos/2006/February/", "photos/2006/January/"},
}, {
Delimiter: "/",
Prefix: "t",
CommonPrefixes: []string{"test/"},
}, {
Delimiter: "/",
MaxKeys: 1,
Contents: keys("index.html"),
IsTruncated: true,
}, {
Delimiter: "/",
MaxKeys: 1,
Marker: "index2.html",
CommonPrefixes: []string{"photos/"},
IsTruncated: true,
}, {
Delimiter: "/",
MaxKeys: 1,
Marker: "photos/",
CommonPrefixes: []string{"test/"},
IsTruncated: false,
}, {
Delimiter: "Feb",
CommonPrefixes: []string{"photos/2006/Feb"},
Contents: keys("index.html", "index2.html", "photos/2006/January/sample.jpg", "test/bar", "test/foo"),
},
}
func (s *ClientTests) TestDoublePutBucket(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
c.Assert(err, IsNil)
err = b.PutBucket(s3.PublicRead)
if err != nil {
c.Assert(err, FitsTypeOf, new(s3.Error))
c.Assert(err.(*s3.Error).Code, Equals, "BucketAlreadyOwnedByYou")
}
}
func (s *ClientTests) TestBucketList(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
objData := make(map[string][]byte)
for i, path := range objectNames {
data := []byte(strings.Repeat("a", i))
err := b.Put(path, data, "text/plain", s3.Private)
c.Assert(err, IsNil)
defer b.Del(path)
objData[path] = data
}
for i, t := range listTests {
c.Logf("test %d", i)
resp, err := b.List(t.Prefix, t.Delimiter, t.Marker, t.MaxKeys)
c.Assert(err, IsNil)
c.Check(resp.Name, Equals, b.Name)
c.Check(resp.Delimiter, Equals, t.Delimiter)
c.Check(resp.IsTruncated, Equals, t.IsTruncated)
c.Check(resp.CommonPrefixes, DeepEquals, t.CommonPrefixes)
checkContents(c, resp.Contents, objData, t.Contents)
}
}
func etag(data []byte) string {
sum := md5.New()
sum.Write(data)
return fmt.Sprintf(`"%x"`, sum.Sum(nil))
}
func checkContents(c *C, contents []s3.Key, data map[string][]byte, expected []s3.Key) {
c.Assert(contents, HasLen, len(expected))
for i, k := range contents {
c.Check(k.Key, Equals, expected[i].Key)
// TODO mtime
c.Check(k.Size, Equals, int64(len(data[k.Key])))
c.Check(k.ETag, Equals, etag(data[k.Key]))
}
}
func (s *ClientTests) TestMultiInitPutList(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
c.Assert(multi.UploadId, Matches, ".+")
defer multi.Abort()
var sent []s3.Part
for i := 0; i < 5; i++ {
p, err := multi.PutPart(i+1, strings.NewReader(fmt.Sprintf("<part %d>", i+1)))
c.Assert(err, IsNil)
c.Assert(p.N, Equals, i+1)
c.Assert(p.Size, Equals, int64(8))
c.Assert(p.ETag, Matches, ".+")
sent = append(sent, p)
}
s3.SetListPartsMax(2)
parts, err := multi.ListParts()
c.Assert(err, IsNil)
c.Assert(parts, HasLen, len(sent))
for i := range parts {
c.Assert(parts[i].N, Equals, sent[i].N)
c.Assert(parts[i].Size, Equals, sent[i].Size)
c.Assert(parts[i].ETag, Equals, sent[i].ETag)
}
err = multi.Complete(parts)
s3err, failed := err.(*s3.Error)
c.Assert(failed, Equals, true)
c.Assert(s3err.Code, Equals, "EntityTooSmall")
err = multi.Abort()
c.Assert(err, IsNil)
_, err = multi.ListParts()
s3err, ok := err.(*s3.Error)
c.Assert(ok, Equals, true)
c.Assert(s3err.Code, Equals, "NoSuchUpload")
}
// This may take a minute or more due to the minimum size accepted S3
// on multipart upload parts.
func (s *ClientTests) TestMultiComplete(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
c.Assert(multi.UploadId, Matches, ".+")
defer multi.Abort()
// Minimum size S3 accepts for all but the last part is 5MB.
data1 := make([]byte, 5*1024*1024)
data2 := []byte("<part 2>")
part1, err := multi.PutPart(1, bytes.NewReader(data1))
c.Assert(err, IsNil)
part2, err := multi.PutPart(2, bytes.NewReader(data2))
c.Assert(err, IsNil)
// Purposefully reversed. The order requirement must be handled.
err = multi.Complete([]s3.Part{part2, part1})
c.Assert(err, IsNil)
data, err := b.Get("multi")
c.Assert(err, IsNil)
c.Assert(len(data), Equals, len(data1)+len(data2))
for i := range data1 {
if data[i] != data1[i] {
c.Fatalf("uploaded object at byte %d: want %d, got %d", data1[i], data[i])
}
}
c.Assert(string(data[len(data1):]), Equals, string(data2))
}
type multiList []*s3.Multi
func (l multiList) Len() int { return len(l) }
func (l multiList) Less(i, j int) bool { return l[i].Key < l[j].Key }
func (l multiList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (s *ClientTests) TestListMulti(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
// Ensure an empty state before testing its behavior.
multis, _, err := b.ListMulti("", "")
for _, m := range multis {
err := m.Abort()
c.Assert(err, IsNil)
}
keys := []string{
"a/multi2",
"a/multi3",
"b/multi4",
"multi1",
}
for _, key := range keys {
m, err := b.InitMulti(key, "", s3.Private)
c.Assert(err, IsNil)
defer m.Abort()
}
// Amazon's implementation of the multiple-request listing for
// multipart uploads in progress seems broken in multiple ways.
// (next tokens are not provided, etc).
//s3.SetListMultiMax(2)
multis, prefixes, err := b.ListMulti("", "")
c.Assert(err, IsNil)
for attempt := attempts.Start(); attempt.Next() && len(multis) < len(keys); {
multis, prefixes, err = b.ListMulti("", "")
c.Assert(err, IsNil)
}
sort.Sort(multiList(multis))
c.Assert(prefixes, IsNil)
var gotKeys []string
for _, m := range multis {
gotKeys = append(gotKeys, m.Key)
}
c.Assert(gotKeys, DeepEquals, keys)
for _, m := range multis {
c.Assert(m.Bucket, Equals, b)
c.Assert(m.UploadId, Matches, ".+")
}
multis, prefixes, err = b.ListMulti("", "/")
for attempt := attempts.Start(); attempt.Next() && len(prefixes) < 2; {
multis, prefixes, err = b.ListMulti("", "")
c.Assert(err, IsNil)
}
c.Assert(err, IsNil)
c.Assert(prefixes, DeepEquals, []string{"a/", "b/"})
c.Assert(multis, HasLen, 1)
c.Assert(multis[0].Bucket, Equals, b)
c.Assert(multis[0].Key, Equals, "multi1")
c.Assert(multis[0].UploadId, Matches, ".+")
for attempt := attempts.Start(); attempt.Next() && len(multis) < 2; {
multis, prefixes, err = b.ListMulti("", "")
c.Assert(err, IsNil)
}
multis, prefixes, err = b.ListMulti("a/", "/")
c.Assert(err, IsNil)
c.Assert(prefixes, IsNil)
c.Assert(multis, HasLen, 2)
c.Assert(multis[0].Bucket, Equals, b)
c.Assert(multis[0].Key, Equals, "a/multi2")
c.Assert(multis[0].UploadId, Matches, ".+")
c.Assert(multis[1].Bucket, Equals, b)
c.Assert(multis[1].Key, Equals, "a/multi3")
c.Assert(multis[1].UploadId, Matches, ".+")
}
func (s *ClientTests) TestMultiPutAllZeroLength(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
defer multi.Abort()
// This tests an edge case. Amazon requires at least one
// part for multiprat uploads to work, even the part is empty.
parts, err := multi.PutAll(strings.NewReader(""), 5*1024*1024)
c.Assert(err, IsNil)
c.Assert(parts, HasLen, 1)
c.Assert(parts[0].Size, Equals, int64(0))
c.Assert(parts[0].ETag, Equals, `"d41d8cd98f00b204e9800998ecf8427e"`)
err = multi.Complete(parts)
c.Assert(err, IsNil)
}

View File

@ -0,0 +1,79 @@
package s3_test
import (
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"github.com/mitchellh/goamz/s3/s3test"
. "github.com/motain/gocheck"
)
type LocalServer struct {
auth aws.Auth
region aws.Region
srv *s3test.Server
config *s3test.Config
}
func (s *LocalServer) SetUp(c *C) {
srv, err := s3test.NewServer(s.config)
c.Assert(err, IsNil)
c.Assert(srv, NotNil)
s.srv = srv
s.region = aws.Region{
Name: "faux-region-1",
S3Endpoint: srv.URL(),
S3LocationConstraint: true, // s3test server requires a LocationConstraint
}
}
// LocalServerSuite defines tests that will run
// against the local s3test server. It includes
// selected tests from ClientTests;
// when the s3test functionality is sufficient, it should
// include all of them, and ClientTests can be simply embedded.
type LocalServerSuite struct {
srv LocalServer
clientTests ClientTests
}
var (
// run tests twice, once in us-east-1 mode, once not.
_ = Suite(&LocalServerSuite{})
_ = Suite(&LocalServerSuite{
srv: LocalServer{
config: &s3test.Config{
Send409Conflict: true,
},
},
})
)
func (s *LocalServerSuite) SetUpSuite(c *C) {
s.srv.SetUp(c)
s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region)
// TODO Sadly the fake server ignores auth completely right now. :-(
s.clientTests.authIsBroken = true
s.clientTests.Cleanup()
}
func (s *LocalServerSuite) TearDownTest(c *C) {
s.clientTests.Cleanup()
}
func (s *LocalServerSuite) TestBasicFunctionality(c *C) {
s.clientTests.TestBasicFunctionality(c)
}
func (s *LocalServerSuite) TestGetNotFound(c *C) {
s.clientTests.TestGetNotFound(c)
}
func (s *LocalServerSuite) TestBucketList(c *C) {
s.clientTests.TestBucketList(c)
}
func (s *LocalServerSuite) TestDoublePutBucket(c *C) {
s.clientTests.TestDoublePutBucket(c)
}

View File

@ -0,0 +1,666 @@
package s3test
import (
"bytes"
"crypto/md5"
"encoding/hex"
"encoding/xml"
"fmt"
"github.com/mitchellh/goamz/s3"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
)
const debug = false
type s3Error struct {
statusCode int
XMLName struct{} `xml:"Error"`
Code string
Message string
BucketName string
RequestId string
HostId string
}
type action struct {
srv *Server
w http.ResponseWriter
req *http.Request
reqId string
}
// Config controls the internal behaviour of the Server. A nil config is the default
// and behaves as if all configurations assume their default behaviour. Once passed
// to NewServer, the configuration must not be modified.
type Config struct {
// Send409Conflict controls how the Server will respond to calls to PUT on a
// previously existing bucket. The default is false, and corresponds to the
// us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of
// all other regions.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html
Send409Conflict bool
}
func (c *Config) send409Conflict() bool {
if c != nil {
return c.Send409Conflict
}
return false
}
// Server is a fake S3 server for testing purposes.
// All of the data for the server is kept in memory.
type Server struct {
url string
reqId int
listener net.Listener
mu sync.Mutex
buckets map[string]*bucket
config *Config
}
type bucket struct {
name string
acl s3.ACL
ctime time.Time
objects map[string]*object
}
type object struct {
name string
mtime time.Time
meta http.Header // metadata to return with requests.
checksum []byte // also held as Content-MD5 in meta.
data []byte
}
// A resource encapsulates the subject of an HTTP request.
// The resource referred to may or may not exist
// when the request is made.
type resource interface {
put(a *action) interface{}
get(a *action) interface{}
post(a *action) interface{}
delete(a *action) interface{}
}
func NewServer(config *Config) (*Server, error) {
l, err := net.Listen("tcp", "localhost:0")
if err != nil {
return nil, fmt.Errorf("cannot listen on localhost: %v", err)
}
srv := &Server{
listener: l,
url: "http://" + l.Addr().String(),
buckets: make(map[string]*bucket),
config: config,
}
go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
srv.serveHTTP(w, req)
}))
return srv, nil
}
// Quit closes down the server.
func (srv *Server) Quit() {
srv.listener.Close()
}
// URL returns a URL for the server.
func (srv *Server) URL() string {
return srv.url
}
func fatalf(code int, codeStr string, errf string, a ...interface{}) {
panic(&s3Error{
statusCode: code,
Code: codeStr,
Message: fmt.Sprintf(errf, a...),
})
}
// serveHTTP serves the S3 protocol.
func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) {
// ignore error from ParseForm as it's usually spurious.
req.ParseForm()
srv.mu.Lock()
defer srv.mu.Unlock()
if debug {
log.Printf("s3test %q %q", req.Method, req.URL)
}
a := &action{
srv: srv,
w: w,
req: req,
reqId: fmt.Sprintf("%09X", srv.reqId),
}
srv.reqId++
var r resource
defer func() {
switch err := recover().(type) {
case *s3Error:
switch r := r.(type) {
case objectResource:
err.BucketName = r.bucket.name
case bucketResource:
err.BucketName = r.name
}
err.RequestId = a.reqId
// TODO HostId
w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`)
w.WriteHeader(err.statusCode)
xmlMarshal(w, err)
case nil:
default:
panic(err)
}
}()
r = srv.resourceForURL(req.URL)
var resp interface{}
switch req.Method {
case "PUT":
resp = r.put(a)
case "GET", "HEAD":
resp = r.get(a)
case "DELETE":
resp = r.delete(a)
case "POST":
resp = r.post(a)
default:
fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method)
}
if resp != nil && req.Method != "HEAD" {
xmlMarshal(w, resp)
}
}
// xmlMarshal is the same as xml.Marshal except that
// it panics on error. The marshalling should not fail,
// but we want to know if it does.
func xmlMarshal(w io.Writer, x interface{}) {
if err := xml.NewEncoder(w).Encode(x); err != nil {
panic(fmt.Errorf("error marshalling %#v: %v", x, err))
}
}
// In a fully implemented test server, each of these would have
// its own resource type.
var unimplementedBucketResourceNames = map[string]bool{
"acl": true,
"lifecycle": true,
"policy": true,
"location": true,
"logging": true,
"notification": true,
"versions": true,
"requestPayment": true,
"versioning": true,
"website": true,
"uploads": true,
}
var unimplementedObjectResourceNames = map[string]bool{
"uploadId": true,
"acl": true,
"torrent": true,
"uploads": true,
}
var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?")
// resourceForURL returns a resource object for the given URL.
func (srv *Server) resourceForURL(u *url.URL) (r resource) {
if u.Path == "/" {
return serviceResource{
buckets: srv.buckets,
}
}
m := pathRegexp.FindStringSubmatch(u.Path)
if m == nil {
fatalf(404, "InvalidURI", "Couldn't parse the specified URI")
}
bucketName := m[2]
objectName := m[4]
if bucketName == "" {
return nullResource{} // root
}
b := bucketResource{
name: bucketName,
bucket: srv.buckets[bucketName],
}
q := u.Query()
if objectName == "" {
for name := range q {
if unimplementedBucketResourceNames[name] {
return nullResource{}
}
}
return b
}
if b.bucket == nil {
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
}
objr := objectResource{
name: objectName,
version: q.Get("versionId"),
bucket: b.bucket,
}
for name := range q {
if unimplementedObjectResourceNames[name] {
return nullResource{}
}
}
if obj := objr.bucket.objects[objr.name]; obj != nil {
objr.object = obj
}
return objr
}
// nullResource has error stubs for all resource methods.
type nullResource struct{}
func notAllowed() interface{} {
fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
return nil
}
func (nullResource) put(a *action) interface{} { return notAllowed() }
func (nullResource) get(a *action) interface{} { return notAllowed() }
func (nullResource) post(a *action) interface{} { return notAllowed() }
func (nullResource) delete(a *action) interface{} { return notAllowed() }
const timeFormat = "2006-01-02T15:04:05.000Z07:00"
type serviceResource struct {
buckets map[string]*bucket
}
func (serviceResource) put(a *action) interface{} { return notAllowed() }
func (serviceResource) post(a *action) interface{} { return notAllowed() }
func (serviceResource) delete(a *action) interface{} { return notAllowed() }
// GET on an s3 service lists the buckets.
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceGET.html
func (r serviceResource) get(a *action) interface{} {
type respBucket struct {
Name string
}
type response struct {
Buckets []respBucket `xml:">Bucket"`
}
resp := response{}
for _, bucketPtr := range r.buckets {
bkt := respBucket{
Name: bucketPtr.name,
}
resp.Buckets = append(resp.Buckets, bkt)
}
return &resp
}
type bucketResource struct {
name string
bucket *bucket // non-nil if the bucket already exists.
}
// GET on a bucket lists the objects in the bucket.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html
func (r bucketResource) get(a *action) interface{} {
if r.bucket == nil {
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
}
delimiter := a.req.Form.Get("delimiter")
marker := a.req.Form.Get("marker")
maxKeys := -1
if s := a.req.Form.Get("max-keys"); s != "" {
i, err := strconv.Atoi(s)
if err != nil || i < 0 {
fatalf(400, "invalid value for max-keys: %q", s)
}
maxKeys = i
}
prefix := a.req.Form.Get("prefix")
a.w.Header().Set("Content-Type", "application/xml")
if a.req.Method == "HEAD" {
return nil
}
var objs orderedObjects
// first get all matching objects and arrange them in alphabetical order.
for name, obj := range r.bucket.objects {
if strings.HasPrefix(name, prefix) {
objs = append(objs, obj)
}
}
sort.Sort(objs)
if maxKeys <= 0 {
maxKeys = 1000
}
resp := &s3.ListResp{
Name: r.bucket.name,
Prefix: prefix,
Delimiter: delimiter,
Marker: marker,
MaxKeys: maxKeys,
}
var prefixes []string
for _, obj := range objs {
if !strings.HasPrefix(obj.name, prefix) {
continue
}
name := obj.name
isPrefix := false
if delimiter != "" {
if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 {
name = obj.name[:len(prefix)+i+len(delimiter)]
if prefixes != nil && prefixes[len(prefixes)-1] == name {
continue
}
isPrefix = true
}
}
if name <= marker {
continue
}
if len(resp.Contents)+len(prefixes) >= maxKeys {
resp.IsTruncated = true
break
}
if isPrefix {
prefixes = append(prefixes, name)
} else {
// Contents contains only keys not found in CommonPrefixes
resp.Contents = append(resp.Contents, obj.s3Key())
}
}
resp.CommonPrefixes = prefixes
return resp
}
// orderedObjects holds a slice of objects that can be sorted
// by name.
type orderedObjects []*object
func (s orderedObjects) Len() int {
return len(s)
}
func (s orderedObjects) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s orderedObjects) Less(i, j int) bool {
return s[i].name < s[j].name
}
func (obj *object) s3Key() s3.Key {
return s3.Key{
Key: obj.name,
LastModified: obj.mtime.Format(timeFormat),
Size: int64(len(obj.data)),
ETag: fmt.Sprintf(`"%x"`, obj.checksum),
// TODO StorageClass
// TODO Owner
}
}
// DELETE on a bucket deletes the bucket if it's not empty.
func (r bucketResource) delete(a *action) interface{} {
b := r.bucket
if b == nil {
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
}
if len(b.objects) > 0 {
fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty")
}
delete(a.srv.buckets, b.name)
return nil
}
// PUT on a bucket creates the bucket.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html
func (r bucketResource) put(a *action) interface{} {
var created bool
if r.bucket == nil {
if !validBucketName(r.name) {
fatalf(400, "InvalidBucketName", "The specified bucket is not valid")
}
if loc := locationConstraint(a); loc == "" {
fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.")
}
// TODO validate acl
r.bucket = &bucket{
name: r.name,
// TODO default acl
objects: make(map[string]*object),
}
a.srv.buckets[r.name] = r.bucket
created = true
}
if !created && a.srv.config.send409Conflict() {
fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.")
}
r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl"))
return nil
}
func (bucketResource) post(a *action) interface{} {
fatalf(400, "Method", "bucket POST method not available")
return nil
}
// validBucketName returns whether name is a valid bucket name.
// Here are the rules, from:
// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html
//
// Can contain lowercase letters, numbers, periods (.), underscores (_),
// and dashes (-). You can use uppercase letters for buckets only in the
// US Standard region.
//
// Must start with a number or letter
//
// Must be between 3 and 255 characters long
//
// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4)
// but the real S3 server does not seem to check that rule, so we will not
// check it either.
//
func validBucketName(name string) bool {
if len(name) < 3 || len(name) > 255 {
return false
}
r := name[0]
if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') {
return false
}
for _, r := range name {
switch {
case r >= '0' && r <= '9':
case r >= 'a' && r <= 'z':
case r == '_' || r == '-':
case r == '.':
default:
return false
}
}
return true
}
var responseParams = map[string]bool{
"content-type": true,
"content-language": true,
"expires": true,
"cache-control": true,
"content-disposition": true,
"content-encoding": true,
}
type objectResource struct {
name string
version string
bucket *bucket // always non-nil.
object *object // may be nil.
}
// GET on an object gets the contents of the object.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html
func (objr objectResource) get(a *action) interface{} {
obj := objr.object
if obj == nil {
fatalf(404, "NoSuchKey", "The specified key does not exist.")
}
h := a.w.Header()
// add metadata
for name, d := range obj.meta {
h[name] = d
}
// override header values in response to request parameters.
for name, vals := range a.req.Form {
if strings.HasPrefix(name, "response-") {
name = name[len("response-"):]
if !responseParams[name] {
continue
}
h.Set(name, vals[0])
}
}
if r := a.req.Header.Get("Range"); r != "" {
fatalf(400, "NotImplemented", "range unimplemented")
}
// TODO Last-Modified-Since
// TODO If-Modified-Since
// TODO If-Unmodified-Since
// TODO If-Match
// TODO If-None-Match
// TODO Connection: close ??
// TODO x-amz-request-id
h.Set("Content-Length", fmt.Sprint(len(obj.data)))
h.Set("ETag", hex.EncodeToString(obj.checksum))
h.Set("Last-Modified", obj.mtime.Format(time.RFC1123))
if a.req.Method == "HEAD" {
return nil
}
// TODO avoid holding the lock when writing data.
_, err := a.w.Write(obj.data)
if err != nil {
// we can't do much except just log the fact.
log.Printf("error writing data: %v", err)
}
return nil
}
var metaHeaders = map[string]bool{
"Content-MD5": true,
"x-amz-acl": true,
"Content-Type": true,
"Content-Encoding": true,
"Content-Disposition": true,
}
// PUT on an object creates the object.
func (objr objectResource) put(a *action) interface{} {
// TODO Cache-Control header
// TODO Expires header
// TODO x-amz-server-side-encryption
// TODO x-amz-storage-class
// TODO is this correct, or should we erase all previous metadata?
obj := objr.object
if obj == nil {
obj = &object{
name: objr.name,
meta: make(http.Header),
}
}
var expectHash []byte
if c := a.req.Header.Get("Content-MD5"); c != "" {
var err error
expectHash, err = hex.DecodeString(c)
if err != nil || len(expectHash) != md5.Size {
fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid")
}
}
sum := md5.New()
// TODO avoid holding lock while reading data.
data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum))
if err != nil {
fatalf(400, "TODO", "read error")
}
gotHash := sum.Sum(nil)
if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 {
fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received")
}
if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength {
fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header")
}
// PUT request has been successful - save data and metadata
for key, values := range a.req.Header {
key = http.CanonicalHeaderKey(key)
if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") {
obj.meta[key] = values
}
}
obj.data = data
obj.checksum = gotHash
obj.mtime = time.Now()
objr.bucket.objects[objr.name] = obj
return nil
}
func (objr objectResource) delete(a *action) interface{} {
delete(objr.bucket.objects, objr.name)
return nil
}
func (objr objectResource) post(a *action) interface{} {
fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
return nil
}
type CreateBucketConfiguration struct {
LocationConstraint string
}
// locationConstraint parses the <CreateBucketConfiguration /> request body (if present).
// If there is no body, an empty string will be returned.
func locationConstraint(a *action) string {
var body bytes.Buffer
if _, err := io.Copy(&body, a.req.Body); err != nil {
fatalf(400, "InvalidRequest", err.Error())
}
if body.Len() == 0 {
return ""
}
var loc CreateBucketConfiguration
if err := xml.NewDecoder(&body).Decode(&loc); err != nil {
fatalf(400, "InvalidRequest", err.Error())
}
return loc.LocationConstraint
}

View File

@ -0,0 +1,126 @@
package s3
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"log"
"sort"
"strings"
"github.com/mitchellh/goamz/aws"
)
var b64 = base64.StdEncoding
// ----------------------------------------------------------------------------
// S3 signing (http://goo.gl/G1LrK)
var s3ParamsToSign = map[string]bool{
"acl": true,
"delete": true,
"location": true,
"logging": true,
"notification": true,
"partNumber": true,
"policy": true,
"requestPayment": true,
"torrent": true,
"uploadId": true,
"uploads": true,
"versionId": true,
"versioning": true,
"versions": true,
"response-content-type": true,
"response-content-language": true,
"response-expires": true,
"response-cache-control": true,
"response-content-disposition": true,
"response-content-encoding": true,
}
func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) {
var md5, ctype, date, xamz string
var xamzDate bool
var sarray []string
// add security token
if auth.Token != "" {
headers["x-amz-security-token"] = []string{auth.Token}
}
if auth.SecretKey == "" {
// no auth secret; skip signing, e.g. for public read-only buckets.
return
}
for k, v := range headers {
k = strings.ToLower(k)
switch k {
case "content-md5":
md5 = v[0]
case "content-type":
ctype = v[0]
case "date":
if !xamzDate {
date = v[0]
}
default:
if strings.HasPrefix(k, "x-amz-") {
vall := strings.Join(v, ",")
sarray = append(sarray, k+":"+vall)
if k == "x-amz-date" {
xamzDate = true
date = ""
}
}
}
}
if len(sarray) > 0 {
sort.StringSlice(sarray).Sort()
xamz = strings.Join(sarray, "\n") + "\n"
}
expires := false
if v, ok := params["Expires"]; ok {
// Query string request authentication alternative.
expires = true
date = v[0]
params["AWSAccessKeyId"] = []string{auth.AccessKey}
}
sarray = sarray[0:0]
for k, v := range params {
if s3ParamsToSign[k] {
for _, vi := range v {
if vi == "" {
sarray = append(sarray, k)
} else {
// "When signing you do not encode these values."
sarray = append(sarray, k+"="+vi)
}
}
}
}
if len(sarray) > 0 {
sort.StringSlice(sarray).Sort()
canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&")
}
payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath
hash := hmac.New(sha1.New, []byte(auth.SecretKey))
hash.Write([]byte(payload))
signature := make([]byte, b64.EncodedLen(hash.Size()))
b64.Encode(signature, hash.Sum(nil))
if expires {
params["Signature"] = []string{string(signature)}
} else {
headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)}
}
if debug {
log.Printf("Signature payload: %q", payload)
log.Printf("Signature: %q", signature)
}
}

View File

@ -0,0 +1,194 @@
package s3_test
import (
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
. "github.com/motain/gocheck"
)
// S3 ReST authentication docs: http://goo.gl/G1LrK
var testAuth = aws.Auth{"0PN5J17HBGZHT7JJ3X82", "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o", ""}
var emptyAuth = aws.Auth{"", "", ""}
func (s *S) TestSignExampleObjectGet(c *C) {
method := "GET"
path := "/johnsmith/photos/puppy.jpg"
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:36:42 +0000"},
}
s3.Sign(testAuth, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleObjectGetNoAuth(c *C) {
method := "GET"
path := "/johnsmith/photos/puppy.jpg"
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:36:42 +0000"},
}
s3.Sign(emptyAuth, method, path, nil, headers)
c.Assert(headers["Authorization"], IsNil)
}
func (s *S) TestSignExampleObjectPut(c *C) {
method := "PUT"
path := "/johnsmith/photos/puppy.jpg"
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 21:15:45 +0000"},
"Content-Type": {"image/jpeg"},
"Content-Length": {"94328"},
}
s3.Sign(testAuth, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleList(c *C) {
method := "GET"
path := "/johnsmith/"
params := map[string][]string{
"prefix": {"photos"},
"max-keys": {"50"},
"marker": {"puppy"},
}
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:42:41 +0000"},
"User-Agent": {"Mozilla/5.0"},
}
s3.Sign(testAuth, method, path, params, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleListNoAuth(c *C) {
method := "GET"
path := "/johnsmith/"
params := map[string][]string{
"prefix": {"photos"},
"max-keys": {"50"},
"marker": {"puppy"},
}
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:42:41 +0000"},
"User-Agent": {"Mozilla/5.0"},
}
s3.Sign(emptyAuth, method, path, params, headers)
c.Assert(headers["Authorization"], IsNil)
}
func (s *S) TestSignExampleFetch(c *C) {
method := "GET"
path := "/johnsmith/"
params := map[string][]string{
"acl": {""},
}
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:44:46 +0000"},
}
s3.Sign(testAuth, method, path, params, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleFetchNoAuth(c *C) {
method := "GET"
path := "/johnsmith/"
params := map[string][]string{
"acl": {""},
}
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:44:46 +0000"},
}
s3.Sign(emptyAuth, method, path, params, headers)
c.Assert(headers["Authorization"], IsNil)
}
func (s *S) TestSignExampleDelete(c *C) {
method := "DELETE"
path := "/johnsmith/photos/puppy.jpg"
params := map[string][]string{}
headers := map[string][]string{
"Host": {"s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 21:20:27 +0000"},
"User-Agent": {"dotnet"},
"x-amz-date": {"Tue, 27 Mar 2007 21:20:26 +0000"},
}
s3.Sign(testAuth, method, path, params, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleUpload(c *C) {
method := "PUT"
path := "/static.johnsmith.net/db-backup.dat.gz"
params := map[string][]string{}
headers := map[string][]string{
"Host": {"static.johnsmith.net:8080"},
"Date": {"Tue, 27 Mar 2007 21:06:08 +0000"},
"User-Agent": {"curl/7.15.5"},
"x-amz-acl": {"public-read"},
"content-type": {"application/x-download"},
"Content-MD5": {"4gJE4saaMU4BqNR0kLY+lw=="},
"X-Amz-Meta-ReviewedBy": {"joe@johnsmith.net,jane@johnsmith.net"},
"X-Amz-Meta-FileChecksum": {"0x02661779"},
"X-Amz-Meta-ChecksumAlgorithm": {"crc32"},
"Content-Disposition": {"attachment; filename=database.dat"},
"Content-Encoding": {"gzip"},
"Content-Length": {"5913339"},
}
s3.Sign(testAuth, method, path, params, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleListAllMyBuckets(c *C) {
method := "GET"
path := "/"
headers := map[string][]string{
"Host": {"s3.amazonaws.com"},
"Date": {"Wed, 28 Mar 2007 01:29:59 +0000"},
}
s3.Sign(testAuth, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:Db+gepJSUbZKwpx1FR0DLtEYoZA="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleUnicodeKeys(c *C) {
method := "GET"
path := "/dictionary/fran%C3%A7ais/pr%c3%a9f%c3%a8re"
headers := map[string][]string{
"Host": {"s3.amazonaws.com"},
"Date": {"Wed, 28 Mar 2007 01:49:49 +0000"},
}
s3.Sign(testAuth, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:dxhSBHoI6eVSPcXJqEghlUzZMnY="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
// Not included in AWS documentation
func (s *S) TestSignWithIAMToken(c *C) {
method := "GET"
path := "/"
headers := map[string][]string{
"Host": {"s3.amazonaws.com"},
"Date": {"Wed, 28 Mar 2007 01:29:59 +0000"},
}
authWithToken := testAuth
authWithToken.Token = "totallysecret"
s3.Sign(authWithToken, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:SJ0yQO7NpHyXJ7zkxY+/fGQ6aUw="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
c.Assert(headers["x-amz-security-token"], DeepEquals, []string{authWithToken.Token})
}

View File

@ -0,0 +1,14 @@
Copyright (c) 2013 Vaughan Newton
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,70 @@
go-ini
======
INI parsing library for Go (golang).
View the API documentation [here](http://godoc.org/github.com/vaughan0/go-ini).
Usage
-----
Parse an INI file:
```go
import "github.com/vaughan0/go-ini"
file, err := ini.LoadFile("myfile.ini")
```
Get data from the parsed file:
```go
name, ok := file.Get("person", "name")
if !ok {
panic("'name' variable missing from 'person' section")
}
```
Iterate through values in a section:
```go
for key, value := range file["mysection"] {
fmt.Printf("%s => %s\n", key, value)
}
```
Iterate through sections in a file:
```go
for name, section := range file {
fmt.Printf("Section name: %s\n", name)
}
```
File Format
-----------
INI files are parsed by go-ini line-by-line. Each line may be one of the following:
* A section definition: [section-name]
* A property: key = value
* A comment: #blahblah _or_ ;blahblah
* Blank. The line will be ignored.
Properties defined before any section headers are placed in the default section, which has
the empty string as it's key.
Example:
```ini
# I am a comment
; So am I!
[apples]
colour = red or green
shape = applish
[oranges]
shape = square
colour = blue
```

123
Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
// Package ini provides functions for parsing INI configuration files.
package ini
import (
"bufio"
"fmt"
"io"
"os"
"regexp"
"strings"
)
var (
sectionRegex = regexp.MustCompile(`^\[(.*)\]$`)
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
)
// ErrSyntax is returned when there is a syntax error in an INI file.
type ErrSyntax struct {
Line int
Source string // The contents of the erroneous line, without leading or trailing whitespace
}
func (e ErrSyntax) Error() string {
return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source)
}
// A File represents a parsed INI file.
type File map[string]Section
// A Section represents a single section of an INI file.
type Section map[string]string
// Returns a named Section. A Section will be created if one does not already exist for the given name.
func (f File) Section(name string) Section {
section := f[name]
if section == nil {
section = make(Section)
f[name] = section
}
return section
}
// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup.
func (f File) Get(section, key string) (value string, ok bool) {
if s := f[section]; s != nil {
value, ok = s[key]
}
return
}
// Loads INI data from a reader and stores the data in the File.
func (f File) Load(in io.Reader) (err error) {
bufin, ok := in.(*bufio.Reader)
if !ok {
bufin = bufio.NewReader(in)
}
return parseFile(bufin, f)
}
// Loads INI data from a named file and stores the data in the File.
func (f File) LoadFile(file string) (err error) {
in, err := os.Open(file)
if err != nil {
return
}
defer in.Close()
return f.Load(in)
}
func parseFile(in *bufio.Reader, file File) (err error) {
section := ""
lineNum := 0
for done := false; !done; {
var line string
if line, err = in.ReadString('\n'); err != nil {
if err == io.EOF {
done = true
} else {
return
}
}
lineNum++
line = strings.TrimSpace(line)
if len(line) == 0 {
// Skip blank lines
continue
}
if line[0] == ';' || line[0] == '#' {
// Skip comments
continue
}
if groups := assignRegex.FindStringSubmatch(line); groups != nil {
key, val := groups[1], groups[2]
key, val = strings.TrimSpace(key), strings.TrimSpace(val)
file.Section(section)[key] = val
} else if groups := sectionRegex.FindStringSubmatch(line); groups != nil {
name := strings.TrimSpace(groups[1])
section = name
// Create the section if it does not exist
file.Section(section)
} else {
return ErrSyntax{lineNum, line}
}
}
return nil
}
// Loads and returns a File from a reader.
func Load(in io.Reader) (File, error) {
file := make(File)
err := file.Load(in)
return file, err
}
// Loads and returns an INI File from a file on disk.
func LoadFile(filename string) (File, error) {
file := make(File)
err := file.LoadFile(filename)
return file, err
}

View File

@ -0,0 +1,43 @@
package ini
import (
"reflect"
"syscall"
"testing"
)
func TestLoadFile(t *testing.T) {
originalOpenFiles := numFilesOpen(t)
file, err := LoadFile("test.ini")
if err != nil {
t.Fatal(err)
}
if originalOpenFiles != numFilesOpen(t) {
t.Error("test.ini not closed")
}
if !reflect.DeepEqual(file, File{"default": {"stuff": "things"}}) {
t.Error("file not read correctly")
}
}
func numFilesOpen(t *testing.T) (num uint64) {
var rlimit syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)
if err != nil {
t.Fatal(err)
}
maxFds := int(rlimit.Cur)
var stat syscall.Stat_t
for i := 0; i < maxFds; i++ {
if syscall.Fstat(i, &stat) == nil {
num++
} else {
return
}
}
return
}

View File

@ -0,0 +1,89 @@
package ini
import (
"reflect"
"strings"
"testing"
)
func TestLoad(t *testing.T) {
src := `
# Comments are ignored
herp = derp
[foo]
hello=world
whitespace should = not matter
; sneaky semicolon-style comment
multiple = equals = signs
[bar]
this = that`
file, err := Load(strings.NewReader(src))
if err != nil {
t.Fatal(err)
}
check := func(section, key, expect string) {
if value, _ := file.Get(section, key); value != expect {
t.Errorf("Get(%q, %q): expected %q, got %q", section, key, expect, value)
}
}
check("", "herp", "derp")
check("foo", "hello", "world")
check("foo", "whitespace should", "not matter")
check("foo", "multiple", "equals = signs")
check("bar", "this", "that")
}
func TestSyntaxError(t *testing.T) {
src := `
# Line 2
[foo]
bar = baz
# Here's an error on line 6:
wut?
herp = derp`
_, err := Load(strings.NewReader(src))
t.Logf("%T: %v", err, err)
if err == nil {
t.Fatal("expected an error, got nil")
}
syntaxErr, ok := err.(ErrSyntax)
if !ok {
t.Fatal("expected an error of type ErrSyntax")
}
if syntaxErr.Line != 6 {
t.Fatal("incorrect line number")
}
if syntaxErr.Source != "wut?" {
t.Fatal("incorrect source")
}
}
func TestDefinedSectionBehaviour(t *testing.T) {
check := func(src string, expect File) {
file, err := Load(strings.NewReader(src))
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(file, expect) {
t.Errorf("expected %v, got %v", expect, file)
}
}
// No sections for an empty file
check("", File{})
// Default section only if there are actually values for it
check("foo=bar", File{"": {"foo": "bar"}})
// User-defined sections should always be present, even if empty
check("[a]\n[b]\nfoo=bar", File{
"a": {},
"b": {"foo": "bar"},
})
check("foo=bar\n[a]\nthis=that", File{
"": {"foo": "bar"},
"a": {"this": "that"},
})
}

View File

@ -0,0 +1,2 @@
[default]
stuff = things