2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-23 19:38:57 +00:00

Fix s3 backend, add more tests

This commit is contained in:
Alexander Neumann 2016-01-02 14:38:45 +01:00
parent 0237b0d972
commit 5722ccfcda
3 changed files with 118 additions and 40 deletions

View File

@ -5,9 +5,12 @@ import (
"fmt"
"io"
"io/ioutil"
"math/rand"
"sort"
"testing"
crand "crypto/rand"
"github.com/restic/restic/backend"
. "github.com/restic/restic/test"
)
@ -37,6 +40,70 @@ func testBackendConfig(b backend.Backend, t *testing.T) {
}
}
func testGetReader(b backend.Backend, t testing.TB) {
length := rand.Intn(1<<23) + 2000
data := make([]byte, length)
_, err := io.ReadFull(crand.Reader, data)
OK(t, err)
blob, err := b.Create()
OK(t, err)
id := backend.Hash(data)
_, err = blob.Write([]byte(data))
OK(t, err)
OK(t, blob.Finalize(backend.Data, id.String()))
for i := 0; i < 500; i++ {
l := rand.Intn(length + 2000)
o := rand.Intn(length + 2000)
d := data
if o < len(d) {
d = d[o:]
} else {
o = len(d)
d = d[:0]
}
if l > 0 && l < len(d) {
d = d[:l]
}
rd, err := b.GetReader(backend.Data, id.String(), uint(o), uint(l))
OK(t, err)
buf, err := ioutil.ReadAll(rd)
OK(t, err)
if !bytes.Equal(buf, d) {
t.Fatalf("data not equal")
}
}
OK(t, b.Remove(backend.Data, id.String()))
}
func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) {
id := backend.Hash(data)
blob, err := b.Create()
OK(t, err)
_, err = blob.Write([]byte(data))
OK(t, err)
OK(t, blob.Finalize(tpe, id.String()))
}
func read(t testing.TB, rd io.Reader, expectedData []byte) {
buf, err := ioutil.ReadAll(rd)
OK(t, err)
if expectedData != nil {
Equals(t, expectedData, buf)
}
}
func testBackend(b backend.Backend, t *testing.T) {
testBackendConfig(b, t)
@ -70,41 +137,34 @@ func testBackend(b backend.Backend, t *testing.T) {
// add files
for _, test := range TestStrings {
// store string in backend
blob, err := b.Create()
OK(t, err)
store(t, b, tpe, []byte(test.data))
_, err = blob.Write([]byte(test.data))
OK(t, err)
OK(t, blob.Finalize(tpe, test.id))
// try to get it out again
// test Get()
rd, err := b.Get(tpe, test.id)
OK(t, err)
Assert(t, rd != nil, "Get() returned nil")
// try to read it out again
reader, err := b.GetReader(tpe, test.id, 0, uint(len(test.data)))
read(t, rd, []byte(test.data))
OK(t, rd.Close())
// test GetReader()
rd, err = b.GetReader(tpe, test.id, 0, uint(len(test.data)))
OK(t, err)
Assert(t, reader != nil, "GetReader() returned nil")
bytes := make([]byte, len(test.data))
reader.Read(bytes)
Assert(t, test.data == string(bytes), "Read() returned different content")
Assert(t, rd != nil, "GetReader() returned nil")
read(t, rd, []byte(test.data))
OK(t, rd.Close())
// try to read it out with an offset and a length
readerOffLen, err := b.GetReader(tpe, test.id, 1, uint(len(test.data)-2))
start := 1
end := len(test.data) - 2
length := end - start
rd, err = b.GetReader(tpe, test.id, uint(start), uint(length))
OK(t, err)
Assert(t, readerOffLen != nil, "GetReader() returned nil")
bytesOffLen := make([]byte, len(test.data)-2)
readerOffLen.Read(bytesOffLen)
Assert(t, test.data[1:len(test.data)-1] == string(bytesOffLen), "Read() with offset and length returned different content")
Assert(t, rd != nil, "GetReader() returned nil")
buf, err := ioutil.ReadAll(rd)
OK(t, err)
Equals(t, test.data, string(buf))
// compare content
Equals(t, test.data, string(buf))
read(t, rd, []byte(test.data[start:end]))
OK(t, rd.Close())
}
// test adding the first file again
@ -161,7 +221,6 @@ func testBackend(b backend.Backend, t *testing.T) {
found, err := b.Test(tpe, id.String())
OK(t, err)
Assert(t, found, fmt.Sprintf("id %q was not found before removal", id))
OK(t, b.Remove(tpe, id.String()))
@ -170,6 +229,7 @@ func testBackend(b backend.Backend, t *testing.T) {
Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
}
}
}
testGetReader(b, t)
}

View File

@ -3,6 +3,7 @@ package s3
import (
"bytes"
"errors"
"fmt"
"io"
"strings"
@ -116,14 +117,22 @@ func (bb *s3Blob) Finalize(t backend.Type, name string) error {
return errors.New("key already exists")
}
expectedBytes := bb.buf.Len()
<-bb.b.connChan
_, err = bb.b.client.PutObject(bb.b.bucketname, path, bb.buf, int64(bb.buf.Len()), "binary/octet-stream")
n, err := bb.b.client.PutObject(bb.b.bucketname, path, bb.buf, int64(bb.buf.Len()), "binary/octet-stream")
bb.b.connChan <- struct{}{}
bb.buf.Reset()
debug.Log("s3.Finalize", "finalized %v -> err %v", path, err)
debug.Log("s3.Finalize", "finalized %v -> n %v, err %v", path, n, err)
if err != nil {
return err
}
return err
if n != int64(expectedBytes) {
return errors.New("could not store all bytes")
}
return nil
}
// Create creates a new Blob. The data is available only after Finalize()
@ -160,24 +169,26 @@ func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint)
l, o := int64(length), int64(offset)
if l == 0 {
l = stat.Size - o
l = stat.Size
}
if l > stat.Size-o {
if o > stat.Size {
return nil, fmt.Errorf("offset beyond end of file (%v > %v)", o, stat.Size)
}
if o+l > stat.Size {
l = stat.Size - o
}
debug.Log("s3.GetReader", "%v %v, o %v l %v", t, name, o, l)
buf := make([]byte, l)
n, err := rd.ReadAt(buf, o)
debug.Log("s3.GetReader", " -> n %v err %v", n, err)
if err == io.EOF && int64(n) == l {
debug.Log("s3.GetReader", " ignoring EOF error")
err = nil
var r io.Reader
r = &ContinuousReader{R: rd, Offset: o}
if length > 0 {
r = io.LimitReader(r, int64(length))
}
return backend.ReadCloser(bytes.NewReader(buf[:n])), err
return backend.ReadCloser(r), nil
}
// Test returns true if a blob of the given type and name exists in the backend.

7
backend/s3/s3_test.go Normal file
View File

@ -0,0 +1,7 @@
package s3
import "testing"
func TestGetReader(t *testing.T) {
}