2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-22 21:05:10 +00:00

Merge pull request #1025 from restic/fix-1013

s3: Switch back to high-level API for upload
This commit is contained in:
Alexander Neumann 2017-06-12 19:58:12 +02:00
commit bd7d5a429f
14 changed files with 56 additions and 181 deletions

View File

@ -9,6 +9,7 @@ import (
"restic"
"restic/debug"
"restic/options"
"runtime"
"github.com/spf13/cobra"
@ -57,6 +58,8 @@ func init() {
func main() {
debug.Log("main %#v", os.Args)
debug.Log("restic %s, compiled with %v on %v/%v",
version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
err := cmdRoot.Execute()
switch {

View File

@ -161,6 +161,7 @@ func (arch *Archiver) saveChunk(ctx context.Context, chunk chunker.Chunk, p *res
err := arch.Save(ctx, restic.DataBlob, chunk.Data, id)
// TODO handle error
if err != nil {
debug.Log("Save(%v) failed: %v", id.Str(), err)
panic(err)
}

View File

@ -123,7 +123,7 @@ var parseTests = []struct {
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "restic",
Connections: 20,
Connections: 5,
},
},
},
@ -134,7 +134,7 @@ var parseTests = []struct {
Endpoint: "hostname.foo",
Bucket: "bucketname",
Prefix: "restic",
Connections: 20,
Connections: 5,
},
},
},
@ -145,7 +145,7 @@ var parseTests = []struct {
Endpoint: "hostname.foo",
Bucket: "bucketname",
Prefix: "prefix/directory",
Connections: 20,
Connections: 5,
},
},
},
@ -156,7 +156,7 @@ var parseTests = []struct {
Endpoint: "eu-central-1",
Bucket: "repo",
Prefix: "restic",
Connections: 20,
Connections: 5,
},
},
},
@ -167,7 +167,7 @@ var parseTests = []struct {
Endpoint: "eu-central-1",
Bucket: "repo",
Prefix: "prefix/directory",
Connections: 20,
Connections: 5,
},
},
},
@ -178,7 +178,7 @@ var parseTests = []struct {
Endpoint: "hostname.foo",
Bucket: "repo",
Prefix: "restic",
Connections: 20,
Connections: 5,
},
},
},
@ -189,7 +189,7 @@ var parseTests = []struct {
Endpoint: "hostname.foo",
Bucket: "repo",
Prefix: "prefix/directory",
Connections: 20,
Connections: 5,
},
},
},
@ -201,7 +201,7 @@ var parseTests = []struct {
Bucket: "repo",
Prefix: "restic",
UseHTTP: true,
Connections: 20,
Connections: 5,
},
},
},
@ -211,7 +211,7 @@ var parseTests = []struct {
Config: swift.Config{
Container: "container17",
Prefix: "",
Connections: 20,
Connections: 5,
},
},
},
@ -221,7 +221,7 @@ var parseTests = []struct {
Config: swift.Config{
Container: "container17",
Prefix: "prefix97",
Connections: 20,
Connections: 5,
},
},
},
@ -230,7 +230,7 @@ var parseTests = []struct {
Location{Scheme: "rest",
Config: rest.Config{
URL: parseURL("http://hostname.foo:1234/"),
Connections: 20,
Connections: 5,
},
},
},

View File

@ -11,7 +11,7 @@ import (
// Config contains all configuration necessary to connect to a REST server.
type Config struct {
URL *url.URL
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 20)"`
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
}
func init() {
@ -21,7 +21,7 @@ func init() {
// NewConfig returns a new Config with the default values filled in.
func NewConfig() Config {
return Config{
Connections: 20,
Connections: 5,
}
}

View File

@ -21,7 +21,7 @@ var configTests = []struct {
}{
{"rest:http://localhost:1234", Config{
URL: parseURL("http://localhost:1234"),
Connections: 20,
Connections: 5,
}},
}

View File

@ -19,13 +19,13 @@ type Config struct {
Prefix string
Layout string `option:"layout" help:"use this backend layout (default: auto-detect)"`
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 20)"`
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
}
// NewConfig returns a new Config with the default values filled in.
func NewConfig() Config {
return Config{
Connections: 20,
Connections: 5,
}
}

View File

@ -10,89 +10,89 @@ var configTests = []struct {
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "restic",
Connections: 20,
Connections: 5,
}},
{"s3://eu-central-1/bucketname/", Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "restic",
Connections: 20,
Connections: 5,
}},
{"s3://eu-central-1/bucketname/prefix/directory", Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "prefix/directory",
Connections: 20,
Connections: 5,
}},
{"s3://eu-central-1/bucketname/prefix/directory/", Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "prefix/directory",
Connections: 20,
Connections: 5,
}},
{"s3:eu-central-1/foobar", Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "restic",
Connections: 20,
Connections: 5,
}},
{"s3:eu-central-1/foobar/", Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "restic",
Connections: 20,
Connections: 5,
}},
{"s3:eu-central-1/foobar/prefix/directory", Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "prefix/directory",
Connections: 20,
Connections: 5,
}},
{"s3:eu-central-1/foobar/prefix/directory/", Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "prefix/directory",
Connections: 20,
Connections: 5,
}},
{"s3:https://hostname:9999/foobar", Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "restic",
Connections: 20,
Connections: 5,
}},
{"s3:https://hostname:9999/foobar/", Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "restic",
Connections: 20,
Connections: 5,
}},
{"s3:http://hostname:9999/foobar", Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "restic",
UseHTTP: true,
Connections: 20,
Connections: 5,
}},
{"s3:http://hostname:9999/foobar/", Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "restic",
UseHTTP: true,
Connections: 20,
Connections: 5,
}},
{"s3:http://hostname:9999/bucket/prefix/directory", Config{
Endpoint: "hostname:9999",
Bucket: "bucket",
Prefix: "prefix/directory",
UseHTTP: true,
Connections: 20,
Connections: 5,
}},
{"s3:http://hostname:9999/bucket/prefix/directory/", Config{
Endpoint: "hostname:9999",
Bucket: "bucket",
Prefix: "prefix/directory",
UseHTTP: true,
Connections: 20,
Connections: 5,
}},
}

View File

@ -157,63 +157,15 @@ func (be *Backend) Path() string {
return be.prefix
}
// getRemainingSize returns number of bytes remaining. If it is not possible to
// determine the size, panic() is called.
func getRemainingSize(rd io.Reader) (size int64, err error) {
type Sizer interface {
Size() int64
}
type Lenner interface {
Len() int
}
if r, ok := rd.(Lenner); ok {
size = int64(r.Len())
} else if r, ok := rd.(Sizer); ok {
size = r.Size()
} else if f, ok := rd.(*os.File); ok {
fi, err := f.Stat()
if err != nil {
return 0, err
}
pos, err := f.Seek(0, io.SeekCurrent)
if err != nil {
return 0, err
}
size = fi.Size() - pos
} else {
panic(fmt.Sprintf("Save() got passed a reader without a method to determine the data size, type is %T", rd))
}
return size, nil
}
// preventCloser wraps an io.Reader to run a function instead of the original Close() function.
type preventCloser struct {
io.Reader
f func()
}
func (wr preventCloser) Close() error {
wr.f()
return nil
}
// Save stores data in the backend at the handle.
func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
debug.Log("Save %v", h)
if err := h.Valid(); err != nil {
return err
}
objName := be.Filename(h)
size, err := getRemainingSize(rd)
if err != nil {
return err
}
debug.Log("Save %v at %v", h, objName)
// Check key does not already exist
_, err = be.client.StatObject(be.bucketname, objName)
@ -223,22 +175,11 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err
}
be.sem.GetToken()
// wrap the reader so that net/http client cannot close the reader, return
// the token instead.
rd = preventCloser{
Reader: rd,
f: func() {
debug.Log("Close()")
},
}
debug.Log("PutObject(%v, %v)", be.bucketname, objName)
coreClient := minio.Core{Client: be.client}
info, err := coreClient.PutObject(be.bucketname, objName, size, rd, nil, nil, nil)
n, err := be.client.PutObject(be.bucketname, objName, rd, "application/octet-stream")
be.sem.ReleaseToken()
debug.Log("%v -> %v bytes, err %#v", objName, info.Size, err)
debug.Log("%v -> %v bytes, err %#v: %v", objName, n, err, err)
return errors.Wrap(err, "client.PutObject")
}
@ -274,14 +215,14 @@ func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset
objName := be.Filename(h)
be.sem.GetToken()
byteRange := fmt.Sprintf("bytes=%d-", offset)
if length > 0 {
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
}
headers := minio.NewGetReqHeaders()
headers.Add("Range", byteRange)
be.sem.GetToken()
debug.Log("Load(%v) send range %v", h, byteRange)
coreClient := minio.Core{Client: be.client}

View File

@ -1,71 +0,0 @@
package s3
import (
"bytes"
"io"
"io/ioutil"
"os"
"restic/test"
"testing"
)
func writeFile(t testing.TB, data []byte, offset int64) *os.File {
tempfile, err := ioutil.TempFile("", "restic-test-")
if err != nil {
t.Fatal(err)
}
if _, err = tempfile.Write(data); err != nil {
t.Fatal(err)
}
if _, err = tempfile.Seek(offset, io.SeekStart); err != nil {
t.Fatal(err)
}
return tempfile
}
func TestGetRemainingSize(t *testing.T) {
length := 18 * 1123
partialRead := 1005
data := test.Random(23, length)
partReader := bytes.NewReader(data)
buf := make([]byte, partialRead)
_, _ = io.ReadFull(partReader, buf)
partFileReader := writeFile(t, data, int64(partialRead))
defer func() {
if err := partFileReader.Close(); err != nil {
t.Fatal(err)
}
if err := os.Remove(partFileReader.Name()); err != nil {
t.Fatal(err)
}
}()
var tests = []struct {
io.Reader
size int64
}{
{bytes.NewReader([]byte("foobar test")), 11},
{partReader, int64(length - partialRead)},
{partFileReader, int64(length - partialRead)},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
size, err := getRemainingSize(test.Reader)
if err != nil {
t.Fatal(err)
}
if size != test.size {
t.Fatalf("invalid size returned, want %v, got %v", test.size, size)
}
})
}
}

View File

@ -53,13 +53,12 @@ func runMinio(ctx context.Context, t testing.TB, dir, key, secret string) func()
time.Sleep(200 * time.Millisecond)
c, err := net.Dial("tcp", "localhost:9000")
if err != nil {
continue
}
success = true
if err := c.Close(); err != nil {
t.Fatal(err)
if err == nil {
success = true
if err := c.Close(); err != nil {
t.Fatal(err)
}
break
}
}

View File

@ -26,7 +26,7 @@ type Config struct {
Prefix string
DefaultContainerPolicy string
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 20)"`
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
}
func init() {
@ -36,7 +36,7 @@ func init() {
// NewConfig returns a new config with the default values filled in.
func NewConfig() Config {
return Config{
Connections: 20,
Connections: 5,
}
}

View File

@ -11,21 +11,21 @@ var configTests = []struct {
Config{
Container: "cnt1",
Prefix: "",
Connections: 20,
Connections: 5,
},
},
{
"swift:cnt2:/prefix",
Config{Container: "cnt2",
Prefix: "prefix",
Connections: 20,
Connections: 5,
},
},
{
"swift:cnt3:/prefix/longer",
Config{Container: "cnt3",
Prefix: "prefix/longer",
Connections: 20,
Connections: 5,
},
},
}

View File

@ -52,7 +52,9 @@ func (rd *eofDetectReader) Close() error {
func (tr eofDetectRoundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) {
res, err = tr.RoundTripper.RoundTrip(req)
res.Body = &eofDetectReader{rd: res.Body}
if res != nil && res.Body != nil {
res.Body = &eofDetectReader{rd: res.Body}
}
return res, err
}

View File

@ -105,7 +105,7 @@ func (r *packerManager) insertPacker(p *Packer) {
// savePacker stores p in the backend.
func (r *Repository) savePacker(p *Packer) error {
debug.Log("save packer with %d blobs\n", p.Packer.Count())
debug.Log("save packer with %d blobs (%d bytes)\n", p.Packer.Count(), p.Packer.Size())
_, err := p.Packer.Finalize()
if err != nil {
return err