diff --git a/Dockerfile b/Dockerfile index 03d8c9628..5e6f91638 100644 --- a/Dockerfile +++ b/Dockerfile @@ -41,12 +41,15 @@ ENV PATH $PATH:$GOPATH/bin RUN mkdir -p $GOPATH/src/github.com/restic/restic -# install tools +# pre-install tools, this speeds up running the tests itself +RUN go get github.com/tools/godep RUN go get golang.org/x/tools/cmd/cover RUN go get github.com/mattn/goveralls RUN go get github.com/mitchellh/gox RUN go get github.com/pierrre/gotestcover -RUN GO15VENDOREXPERIMENT=1 go get github.com/minio/minio +RUN mkdir $HOME/bin \ + && wget -q -O $HOME/bin/minio https://dl.minio.io/server/minio/release/linux-${GOARCH}/minio \ + && chmod +x $HOME/bin/minio # set TRAVIS_BUILD_DIR for integration script ENV TRAVIS_BUILD_DIR $GOPATH/src/github.com/restic/restic diff --git a/backend/backend_test.go b/backend/backend_test.go deleted file mode 100644 index 66b7b3ea3..000000000 --- a/backend/backend_test.go +++ /dev/null @@ -1,283 +0,0 @@ -package backend_test - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "math/rand" - "sort" - "testing" - - crand "crypto/rand" - - "github.com/restic/restic/backend" - . "github.com/restic/restic/test" -) - -func testBackendConfig(b backend.Backend, t *testing.T) { - // create config and read it back - _, err := b.Get(backend.Config, "") - Assert(t, err != nil, "did not get expected error for non-existing config") - - blob, err := b.Create() - OK(t, err) - - _, err = blob.Write([]byte("Config")) - OK(t, err) - OK(t, blob.Finalize(backend.Config, "")) - - // try accessing the config with different names, should all return the - // same config - for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} { - rd, err := b.Get(backend.Config, name) - Assert(t, err == nil, "unable to read config") - - buf, err := ioutil.ReadAll(rd) - OK(t, err) - OK(t, rd.Close()) - Assert(t, string(buf) == "Config", "wrong data returned for config") - } -} - -func testGetReader(b backend.Backend, t testing.TB) { - length := rand.Intn(1<<24) + 2000 - - data := make([]byte, length) - _, err := io.ReadFull(crand.Reader, data) - OK(t, err) - - blob, err := b.Create() - OK(t, err) - - id := backend.Hash(data) - - _, err = blob.Write([]byte(data)) - OK(t, err) - OK(t, blob.Finalize(backend.Data, id.String())) - - for i := 0; i < 500; i++ { - l := rand.Intn(length + 2000) - o := rand.Intn(length + 2000) - - d := data - if o < len(d) { - d = d[o:] - } else { - o = len(d) - d = d[:0] - } - - if l > 0 && l < len(d) { - d = d[:l] - } - - rd, err := b.GetReader(backend.Data, id.String(), uint(o), uint(l)) - OK(t, err) - buf, err := ioutil.ReadAll(rd) - OK(t, err) - - if !bytes.Equal(buf, d) { - t.Fatalf("data not equal") - } - } - - OK(t, b.Remove(backend.Data, id.String())) -} - -func testWrite(b backend.Backend, t testing.TB) { - length := rand.Intn(1<<23) + 2000 - - data := make([]byte, length) - _, err := io.ReadFull(crand.Reader, data) - OK(t, err) - id := backend.Hash(data) - - for i := 0; i < 10; i++ { - blob, err := b.Create() - OK(t, err) - - o := 0 - for o < len(data) { - l := rand.Intn(len(data) - o) - if len(data)-o < 20 { - l = len(data) - o - } - - n, err := blob.Write(data[o : o+l]) - OK(t, err) - if n != l { - t.Fatalf("wrong number of bytes written, want %v, got %v", l, n) - } - - o += l - } - - name := fmt.Sprintf("%s-%d", id, i) - OK(t, blob.Finalize(backend.Data, name)) - - rd, err := b.Get(backend.Data, name) - OK(t, err) - - buf, err := ioutil.ReadAll(rd) - OK(t, err) - - if len(buf) != len(data) { - t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf)) - } - - if !bytes.Equal(buf, data) { - t.Fatalf("data not equal") - } - } -} - -func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) { - id := backend.Hash(data) - - blob, err := b.Create() - OK(t, err) - - _, err = blob.Write([]byte(data)) - OK(t, err) - OK(t, blob.Finalize(tpe, id.String())) -} - -func read(t testing.TB, rd io.Reader, expectedData []byte) { - buf, err := ioutil.ReadAll(rd) - OK(t, err) - if expectedData != nil { - Equals(t, expectedData, buf) - } -} - -func testBackend(b backend.Backend, t *testing.T) { - testBackendConfig(b, t) - - for _, tpe := range []backend.Type{ - backend.Data, backend.Key, backend.Lock, - backend.Snapshot, backend.Index, - } { - // detect non-existing files - for _, test := range TestStrings { - id, err := backend.ParseID(test.id) - OK(t, err) - - // test if blob is already in repository - ret, err := b.Test(tpe, id.String()) - OK(t, err) - Assert(t, !ret, "blob was found to exist before creating") - - // try to open not existing blob - _, err = b.Get(tpe, id.String()) - Assert(t, err != nil, "blob data could be extracted before creation") - - // try to read not existing blob - _, err = b.GetReader(tpe, id.String(), 0, 1) - Assert(t, err != nil, "blob reader could be obtained before creation") - - // try to get string out, should fail - ret, err = b.Test(tpe, id.String()) - OK(t, err) - Assert(t, !ret, "id %q was found (but should not have)", test.id) - } - - // add files - for _, test := range TestStrings { - store(t, b, tpe, []byte(test.data)) - - // test Get() - rd, err := b.Get(tpe, test.id) - OK(t, err) - Assert(t, rd != nil, "Get() returned nil") - - read(t, rd, []byte(test.data)) - OK(t, rd.Close()) - - // test GetReader() - rd, err = b.GetReader(tpe, test.id, 0, uint(len(test.data))) - OK(t, err) - Assert(t, rd != nil, "GetReader() returned nil") - - read(t, rd, []byte(test.data)) - OK(t, rd.Close()) - - // try to read it out with an offset and a length - start := 1 - end := len(test.data) - 2 - length := end - start - rd, err = b.GetReader(tpe, test.id, uint(start), uint(length)) - OK(t, err) - Assert(t, rd != nil, "GetReader() returned nil") - - read(t, rd, []byte(test.data[start:end])) - OK(t, rd.Close()) - } - - // test adding the first file again - test := TestStrings[0] - - // create blob - blob, err := b.Create() - OK(t, err) - - _, err = blob.Write([]byte(test.data)) - OK(t, err) - err = blob.Finalize(tpe, test.id) - Assert(t, err != nil, "expected error, got %v", err) - - // remove and recreate - err = b.Remove(tpe, test.id) - OK(t, err) - - // test that the blob is gone - ok, err := b.Test(tpe, test.id) - OK(t, err) - Assert(t, ok == false, "removed blob still present") - - // create blob - blob, err = b.Create() - OK(t, err) - - _, err = io.Copy(blob, bytes.NewReader([]byte(test.data))) - OK(t, err) - OK(t, blob.Finalize(tpe, test.id)) - - // list items - IDs := backend.IDs{} - - for _, test := range TestStrings { - id, err := backend.ParseID(test.id) - OK(t, err) - IDs = append(IDs, id) - } - - sort.Sort(IDs) - - i := 0 - for s := range b.List(tpe, nil) { - Equals(t, IDs[i].String(), s) - i++ - } - - // remove content if requested - if TestCleanup { - for _, test := range TestStrings { - id, err := backend.ParseID(test.id) - OK(t, err) - - found, err := b.Test(tpe, id.String()) - OK(t, err) - - OK(t, b.Remove(tpe, id.String())) - - found, err = b.Test(tpe, id.String()) - OK(t, err) - Assert(t, !found, fmt.Sprintf("id %q not found after removal", id)) - } - } - } - - testGetReader(b, t) - testWrite(b, t) -} diff --git a/backend/doc.go b/backend/doc.go index e85cef355..f82c3d671 100644 --- a/backend/doc.go +++ b/backend/doc.go @@ -1,2 +1,5 @@ // Package backend provides local and remote storage for restic repositories. +// All backends need to implement the Backend interface. There is a +// MockBackend, which can be used for mocking in tests, and a MemBackend, which +// stores all data in a hash internally. package backend diff --git a/backend/generic.go b/backend/generic.go index 4c736a6b2..c528f8998 100644 --- a/backend/generic.go +++ b/backend/generic.go @@ -1,30 +1,14 @@ package backend -import ( - "crypto/sha256" - "errors" - "io" -) +import "errors" -const ( - MinPrefixLength = 8 -) +// ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix +// could be found. +var ErrNoIDPrefixFound = errors.New("no ID found") -var ( - ErrNoIDPrefixFound = errors.New("no ID found") - ErrMultipleIDMatches = errors.New("multiple IDs with prefix found") -) - -var ( - hashData = sha256.Sum256 -) - -const hashSize = sha256.Size - -// Hash returns the ID for data. -func Hash(data []byte) ID { - return hashData(data) -} +// ErrMultipleIDMatches is returned by Find() when multiple IDs with the given +// prefix are found. +var ErrMultipleIDMatches = errors.New("multiple IDs with prefix found") // Find loads the list of all blobs of type t and searches for names which // start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. @@ -53,6 +37,8 @@ func Find(be Lister, t Type, prefix string) (string, error) { return "", ErrNoIDPrefixFound } +const minPrefixLength = 8 + // PrefixLength returns the number of bytes required so that all prefixes of // all names of type t are unique. func PrefixLength(be Lister, t Type) (int, error) { @@ -67,7 +53,7 @@ func PrefixLength(be Lister, t Type) (int, error) { // select prefixes of length l, test if the last one is the same as the current one outer: - for l := MinPrefixLength; l < IDSize; l++ { + for l := minPrefixLength; l < IDSize; l++ { var last string for _, name := range list { @@ -82,39 +68,3 @@ outer: return IDSize, nil } - -// wrap around io.LimitedReader that implements io.ReadCloser -type blobReader struct { - cl io.Closer - rd io.Reader - closed bool -} - -func (l *blobReader) Read(p []byte) (int, error) { - n, err := l.rd.Read(p) - if err == io.EOF { - l.Close() - } - - return n, err -} - -func (l *blobReader) Close() error { - if l == nil { - return nil - } - - if !l.closed { - err := l.cl.Close() - l.closed = true - return err - } - - return nil -} - -// LimitReadCloser returns a new reader wraps r in an io.LimitReader, but also -// implements the Close() method. -func LimitReadCloser(r io.ReadCloser, n int64) *blobReader { - return &blobReader{cl: r, rd: io.LimitReader(r, n)} -} diff --git a/backend/generic_test.go b/backend/generic_test.go index cd401516c..ca5b78982 100644 --- a/backend/generic_test.go +++ b/backend/generic_test.go @@ -7,15 +7,6 @@ import ( . "github.com/restic/restic/test" ) -func str2id(s string) backend.ID { - id, err := backend.ParseID(s) - if err != nil { - panic(err) - } - - return id -} - type mockBackend struct { list func(backend.Type, <-chan struct{}) <-chan string } @@ -25,14 +16,14 @@ func (m mockBackend) List(t backend.Type, done <-chan struct{}) <-chan string { } var samples = backend.IDs{ - str2id("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"), - str2id("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"), - str2id("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"), - str2id("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"), - str2id("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"), - str2id("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"), - str2id("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"), - str2id("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"), + ParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"), + ParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"), + ParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"), + ParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"), + ParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"), + ParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"), + ParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"), + ParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"), } func TestPrefixLength(t *testing.T) { diff --git a/backend/handle.go b/backend/handle.go new file mode 100644 index 000000000..6bdf6af23 --- /dev/null +++ b/backend/handle.go @@ -0,0 +1,48 @@ +package backend + +import ( + "errors" + "fmt" +) + +// Handle is used to store and access data in a backend. +type Handle struct { + Type Type + Name string +} + +func (h Handle) String() string { + name := h.Name + if len(name) > 10 { + name = name[:10] + } + return fmt.Sprintf("<%s/%s>", h.Type, name) +} + +// Valid returns an error if h is not valid. +func (h Handle) Valid() error { + if h.Type == "" { + return errors.New("type is empty") + } + + switch h.Type { + case Data: + case Key: + case Lock: + case Snapshot: + case Index: + case Config: + default: + return fmt.Errorf("invalid Type %q", h.Type) + } + + if h.Type == Config { + return nil + } + + if h.Name == "" { + return errors.New("invalid Name") + } + + return nil +} diff --git a/backend/handle_test.go b/backend/handle_test.go new file mode 100644 index 000000000..a477c0aec --- /dev/null +++ b/backend/handle_test.go @@ -0,0 +1,28 @@ +package backend + +import "testing" + +var handleTests = []struct { + h Handle + valid bool +}{ + {Handle{Name: "foo"}, false}, + {Handle{Type: "foobar"}, false}, + {Handle{Type: Config, Name: ""}, true}, + {Handle{Type: Data, Name: ""}, false}, + {Handle{Type: "", Name: "x"}, false}, + {Handle{Type: Lock, Name: "010203040506"}, true}, +} + +func TestHandleValid(t *testing.T) { + for i, test := range handleTests { + err := test.h.Valid() + if err != nil && test.valid { + t.Errorf("test %v failed: error returned for valid handle: %v", i, err) + } + + if !test.valid && err == nil { + t.Errorf("test %v failed: expected error for invalid handle not found", i) + } + } +} diff --git a/backend/id.go b/backend/id.go index 966cd7a4e..115792707 100644 --- a/backend/id.go +++ b/backend/id.go @@ -2,13 +2,19 @@ package backend import ( "bytes" + "crypto/sha256" "encoding/hex" "encoding/json" "errors" ) +// Hash returns the ID for data. +func Hash(data []byte) ID { + return sha256.Sum256(data) +} + // IDSize contains the size of an ID, in bytes. -const IDSize = hashSize +const IDSize = sha256.Size // ID references content within a repository. type ID [IDSize]byte @@ -80,10 +86,12 @@ func (id ID) Compare(other ID) int { return bytes.Compare(other[:], id[:]) } +// MarshalJSON returns the JSON encoding of id. func (id ID) MarshalJSON() ([]byte, error) { return json.Marshal(id.String()) } +// UnmarshalJSON parses the JSON-encoded data and stores the result in id. func (id *ID) UnmarshalJSON(b []byte) error { var s string err := json.Unmarshal(b, &s) @@ -98,7 +106,3 @@ func (id *ID) UnmarshalJSON(b []byte) error { return nil } - -func IDFromData(d []byte) ID { - return hashData(d) -} diff --git a/backend/ids_test.go b/backend/ids_test.go index 02647eba1..eac56d30c 100644 --- a/backend/ids_test.go +++ b/backend/ids_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/restic/restic/backend" + . "github.com/restic/restic/test" ) var uniqTests = []struct { @@ -12,37 +13,37 @@ var uniqTests = []struct { }{ { backend.IDs{ - str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), }, backend.IDs{ - str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), }, }, { backend.IDs{ - str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), }, backend.IDs{ - str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), }, }, { backend.IDs{ - str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - str2id("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), - str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), + ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), }, backend.IDs{ - str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - str2id("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), - str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), + ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), }, }, } diff --git a/backend/idset_test.go b/backend/idset_test.go index 7084c8abf..6659c2229 100644 --- a/backend/idset_test.go +++ b/backend/idset_test.go @@ -4,22 +4,23 @@ import ( "testing" "github.com/restic/restic/backend" + . "github.com/restic/restic/test" ) var idsetTests = []struct { id backend.ID seen bool }{ - {str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false}, - {str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false}, - {str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, - {str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, - {str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, - {str2id("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false}, - {str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, - {str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, - {str2id("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true}, - {str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, + {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false}, + {ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false}, + {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, + {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, + {ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, + {ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false}, + {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, + {ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, + {ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true}, + {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, } func TestIDSet(t *testing.T) { diff --git a/backend/interface.go b/backend/interface.go index 63a3a95f8..fb0927c6e 100644 --- a/backend/interface.go +++ b/backend/interface.go @@ -1,10 +1,9 @@ package backend -import "io" - // Type is the type of a Blob. type Type string +// These are the different data types a backend can store. const ( Data Type = "data" Key = "key" @@ -14,23 +13,12 @@ const ( Config = "config" ) -// A Backend manages data stored somewhere. +// Backend is used to store and access data. type Backend interface { - // Location returns a string that specifies the location of the repository, - // like a URL. + // Location returns a string that describes the type and location of the + // repository. Location() string - // Create creates a new Blob. The data is available only after Finalize() - // has been called on the returned Blob. - Create() (Blob, error) - - // Get returns an io.ReadCloser for the Blob with the given name of type t. - Get(t Type, name string) (io.ReadCloser, error) - - // GetReader returns an io.ReadCloser for the Blob with the given name of - // type t at offset and length. - GetReader(t Type, name string, offset, length uint) (io.ReadCloser, error) - // Test a boolean value whether a Blob with the name and type exists. Test(t Type, name string) (bool, error) @@ -41,26 +29,33 @@ type Backend interface { Close() error Lister + + // Load returns the data stored in the backend for h at the given offset + // and saves it in p. Load has the same semantics as io.ReaderAt. + Load(h Handle, p []byte, off int64) (int, error) + + // Save stores the data in the backend under the given handle. + Save(h Handle, p []byte) error + + // Stat returns information about the blob identified by h. + Stat(h Handle) (BlobInfo, error) } +// Lister implements listing data items stored in a backend. type Lister interface { - // List returns a channel that yields all names of blobs of type t in - // lexicographic order. A goroutine is started for this. If the channel - // done is closed, sending stops. + // List returns a channel that yields all names of blobs of type t in an + // arbitrary order. A goroutine is started for this. If the channel done is + // closed, sending stops. List(t Type, done <-chan struct{}) <-chan string } +// Deleter are backends that allow to self-delete all content stored in them. type Deleter interface { // Delete the complete repository. Delete() error } -type Blob interface { - io.Writer - - // Finalize moves the data blob to the final location for type and name. - Finalize(t Type, name string) error - - // Size returns the number of bytes written to the backend so far. - Size() uint +// BlobInfo is returned by Stat() and contains information about a stored blob. +type BlobInfo struct { + Size int64 } diff --git a/backend/local/backend_test.go b/backend/local/backend_test.go new file mode 100644 index 000000000..b2d5e7b0f --- /dev/null +++ b/backend/local/backend_test.go @@ -0,0 +1,87 @@ +// DO NOT EDIT, AUTOMATICALLY GENERATED +package local_test + +import ( + "testing" + + "github.com/restic/restic/backend/test" +) + +var SkipMessage string + +func TestLocalBackendCreate(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCreate(t) +} + +func TestLocalBackendOpen(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestOpen(t) +} + +func TestLocalBackendCreateWithConfig(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCreateWithConfig(t) +} + +func TestLocalBackendLocation(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLocation(t) +} + +func TestLocalBackendConfig(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestConfig(t) +} + +func TestLocalBackendLoad(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLoad(t) +} + +func TestLocalBackendSave(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestSave(t) +} + +func TestLocalBackendSaveFilenames(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestSaveFilenames(t) +} + +func TestLocalBackendBackend(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestBackend(t) +} + +func TestLocalBackendDelete(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestDelete(t) +} + +func TestLocalBackendCleanup(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCleanup(t) +} diff --git a/backend/local/local.go b/backend/local/local.go index d8291a8bd..521ffde79 100644 --- a/backend/local/local.go +++ b/backend/local/local.go @@ -7,23 +7,18 @@ import ( "io/ioutil" "os" "path/filepath" - "sort" - "sync" "github.com/restic/restic/backend" + "github.com/restic/restic/debug" ) -var ErrWrongData = errors.New("wrong data returned by backend, checksum does not match") - +// Local is a backend in a local directory. type Local struct { - p string - mu sync.Mutex - open map[string][]*os.File // Contains open files. Guarded by 'mu'. + p string } -// Open opens the local backend as specified by config. -func Open(dir string) (*Local, error) { - items := []string{ +func paths(dir string) []string { + return []string{ dir, filepath.Join(dir, backend.Paths.Data), filepath.Join(dir, backend.Paths.Snapshots), @@ -32,30 +27,23 @@ func Open(dir string) (*Local, error) { filepath.Join(dir, backend.Paths.Keys), filepath.Join(dir, backend.Paths.Temp), } +} +// Open opens the local backend as specified by config. +func Open(dir string) (*Local, error) { // test if all necessary dirs are there - for _, d := range items { + for _, d := range paths(dir) { if _, err := os.Stat(d); err != nil { return nil, fmt.Errorf("%s does not exist", d) } } - return &Local{p: dir, open: make(map[string][]*os.File)}, nil + return &Local{p: dir}, nil } // Create creates all the necessary files and directories for a new local // backend at dir. Afterwards a new config blob should be created. func Create(dir string) (*Local, error) { - dirs := []string{ - dir, - filepath.Join(dir, backend.Paths.Data), - filepath.Join(dir, backend.Paths.Snapshots), - filepath.Join(dir, backend.Paths.Index), - filepath.Join(dir, backend.Paths.Locks), - filepath.Join(dir, backend.Paths.Keys), - filepath.Join(dir, backend.Paths.Temp), - } - // test if config file already exists _, err := os.Lstat(filepath.Join(dir, backend.Paths.Config)) if err == nil { @@ -63,7 +51,7 @@ func Create(dir string) (*Local, error) { } // create paths for data, refs and temp - for _, d := range dirs { + for _, d := range paths(dir) { err := os.MkdirAll(d, backend.Modes.Dir) if err != nil { return nil, err @@ -79,93 +67,6 @@ func (b *Local) Location() string { return b.p } -// Return temp directory in correct directory for this backend. -func (b *Local) tempFile() (*os.File, error) { - return ioutil.TempFile(filepath.Join(b.p, backend.Paths.Temp), "temp-") -} - -type localBlob struct { - f *os.File - size uint - final bool - basedir string -} - -func (lb *localBlob) Write(p []byte) (int, error) { - if lb.final { - return 0, errors.New("blob already closed") - } - - n, err := lb.f.Write(p) - lb.size += uint(n) - return n, err -} - -func (lb *localBlob) Size() uint { - return lb.size -} - -func (lb *localBlob) Finalize(t backend.Type, name string) error { - if lb.final { - return errors.New("Already finalized") - } - - lb.final = true - - err := lb.f.Close() - if err != nil { - return fmt.Errorf("local: file.Close: %v", err) - } - - f := filename(lb.basedir, t, name) - - // create directories if necessary, ignore errors - if t == backend.Data { - os.MkdirAll(filepath.Dir(f), backend.Modes.Dir) - } - - // test if new path already exists - if _, err := os.Stat(f); err == nil { - return fmt.Errorf("Close(): file %v already exists", f) - } - - if err := os.Rename(lb.f.Name(), f); err != nil { - return err - } - - // set mode to read-only - fi, err := os.Stat(f) - if err != nil { - return err - } - - return setNewFileMode(f, fi) -} - -// Create creates a new Blob. The data is available only after Finalize() -// has been called on the returned Blob. -func (b *Local) Create() (backend.Blob, error) { - // TODO: make sure that tempfile is removed upon error - - // create tempfile in backend - file, err := b.tempFile() - if err != nil { - return nil, err - } - - blob := localBlob{ - f: file, - basedir: b.p, - } - - b.mu.Lock() - open, _ := b.open["blobs"] - b.open["blobs"] = append(open, file) - b.mu.Unlock() - - return &blob, nil -} - // Construct path for given Type and name. func filename(base string, t backend.Type, name string) string { if t == backend.Config { @@ -196,45 +97,116 @@ func dirname(base string, t backend.Type, name string) string { return filepath.Join(base, n) } -// Get returns a reader that yields the content stored under the given -// name. The reader should be closed after draining it. -func (b *Local) Get(t backend.Type, name string) (io.ReadCloser, error) { - file, err := os.Open(filename(b.p, t, name)) - if err != nil { - return nil, err - } - b.mu.Lock() - open, _ := b.open[filename(b.p, t, name)] - b.open[filename(b.p, t, name)] = append(open, file) - b.mu.Unlock() - return file, nil -} - -// GetReader returns an io.ReadCloser for the Blob with the given name of -// type t at offset and length. If length is 0, the reader reads until EOF. -func (b *Local) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) { - f, err := os.Open(filename(b.p, t, name)) - if err != nil { - return nil, err +// Load returns the data stored in the backend for h at the given offset +// and saves it in p. Load has the same semantics as io.ReaderAt. +func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) { + if err := h.Valid(); err != nil { + return 0, err } - b.mu.Lock() - open, _ := b.open[filename(b.p, t, name)] - b.open[filename(b.p, t, name)] = append(open, f) - b.mu.Unlock() + f, err := os.Open(filename(b.p, h.Type, h.Name)) + if err != nil { + return 0, err + } - if offset > 0 { - _, err = f.Seek(int64(offset), 0) + defer func() { + e := f.Close() + if err == nil && e != nil { + err = e + } + }() + + if off > 0 { + _, err = f.Seek(off, 0) if err != nil { - return nil, err + return 0, err } } - if length == 0 { - return f, nil + return io.ReadFull(f, p) +} + +// writeToTempfile saves p into a tempfile in tempdir. +func writeToTempfile(tempdir string, p []byte) (filename string, err error) { + tmpfile, err := ioutil.TempFile(tempdir, "temp-") + if err != nil { + return "", err } - return backend.LimitReadCloser(f, int64(length)), nil + n, err := tmpfile.Write(p) + if err != nil { + return "", err + } + + if n != len(p) { + return "", errors.New("not all bytes writen") + } + + if err = tmpfile.Sync(); err != nil { + return "", err + } + + err = tmpfile.Close() + if err != nil { + return "", err + } + + return tmpfile.Name(), nil +} + +// Save stores data in the backend at the handle. +func (b *Local) Save(h backend.Handle, p []byte) (err error) { + if err := h.Valid(); err != nil { + return err + } + + tmpfile, err := writeToTempfile(filepath.Join(b.p, backend.Paths.Temp), p) + debug.Log("local.Save", "saved %v (%d bytes) to %v", h, len(p), tmpfile) + + filename := filename(b.p, h.Type, h.Name) + + // test if new path already exists + if _, err := os.Stat(filename); err == nil { + return fmt.Errorf("Rename(): file %v already exists", filename) + } + + // create directories if necessary, ignore errors + if h.Type == backend.Data { + err = os.MkdirAll(filepath.Dir(filename), backend.Modes.Dir) + if err != nil { + return err + } + } + + err = os.Rename(tmpfile, filename) + debug.Log("local.Save", "save %v: rename %v -> %v: %v", + h, filepath.Base(tmpfile), filepath.Base(filename), err) + + if err != nil { + return err + } + + // set mode to read-only + fi, err := os.Stat(filename) + if err != nil { + return err + } + + return setNewFileMode(filename, fi) +} + +// Stat returns information about a blob. +func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) { + if err := h.Valid(); err != nil { + return backend.BlobInfo{}, err + } + + fi, err := os.Stat(filename(b.p, h.Type, h.Name)) + if err != nil { + return backend.BlobInfo{}, err + } + + return backend.BlobInfo{Size: fi.Size()}, nil } // Test returns true if a blob of the given type and name exists in the backend. @@ -252,15 +224,7 @@ func (b *Local) Test(t backend.Type, name string) (bool, error) { // Remove removes the blob with the given name and type. func (b *Local) Remove(t backend.Type, name string) error { - // close all open files we may have. fn := filename(b.p, t, name) - b.mu.Lock() - open, _ := b.open[fn] - for _, file := range open { - file.Close() - } - b.open[fn] = nil - b.mu.Unlock() // reset read-only flag err := os.Chmod(fn, 0666) @@ -275,7 +239,6 @@ func (b *Local) Remove(t backend.Type, name string) error { // goroutine is started for this. If the channel done is closed, sending // stops. func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { - // TODO: use os.Open() and d.Readdirnames() instead of Glob() var pattern string if t == backend.Data { pattern = filepath.Join(dirname(b.p, t, ""), "*", "*") @@ -294,8 +257,6 @@ func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { matches[i] = filepath.Base(matches[i]) } - sort.Strings(matches) - go func() { defer close(ch) for _, m := range matches { @@ -316,21 +277,12 @@ func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { // Delete removes the repository and all files. func (b *Local) Delete() error { - b.Close() return os.RemoveAll(b.p) } // Close closes all open files. -// They may have been closed already, -// so we ignore all errors. func (b *Local) Close() error { - b.mu.Lock() - for _, open := range b.open { - for _, file := range open { - file.Close() - } - } - b.open = make(map[string][]*os.File) - b.mu.Unlock() + // this does not need to do anything, all open files are closed within the + // same function. return nil } diff --git a/backend/local/local_test.go b/backend/local/local_test.go new file mode 100644 index 000000000..3335cbfa8 --- /dev/null +++ b/backend/local/local_test.go @@ -0,0 +1,59 @@ +package local_test + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/restic/restic/backend" + "github.com/restic/restic/backend/local" + "github.com/restic/restic/backend/test" +) + +var tempBackendDir string + +//go:generate go run ../test/generate_backend_tests.go + +func createTempdir() error { + if tempBackendDir != "" { + return nil + } + + tempdir, err := ioutil.TempDir("", "restic-local-test-") + if err != nil { + return err + } + + fmt.Printf("created new test backend at %v\n", tempdir) + tempBackendDir = tempdir + return nil +} + +func init() { + test.CreateFn = func() (backend.Backend, error) { + err := createTempdir() + if err != nil { + return nil, err + } + return local.Create(tempBackendDir) + } + + test.OpenFn = func() (backend.Backend, error) { + err := createTempdir() + if err != nil { + return nil, err + } + return local.Open(tempBackendDir) + } + + test.CleanupFn = func() error { + if tempBackendDir == "" { + return nil + } + + fmt.Printf("removing test backend at %v\n", tempBackendDir) + err := os.RemoveAll(tempBackendDir) + tempBackendDir = "" + return err + } +} diff --git a/backend/local_test.go b/backend/local_test.go deleted file mode 100644 index 462c4c3d6..000000000 --- a/backend/local_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package backend_test - -import ( - "fmt" - "io/ioutil" - "testing" - - "github.com/restic/restic/backend" - "github.com/restic/restic/backend/local" - . "github.com/restic/restic/test" -) - -func setupLocalBackend(t *testing.T) *local.Local { - tempdir, err := ioutil.TempDir("", "restic-test-") - OK(t, err) - - b, err := local.Create(tempdir) - OK(t, err) - - t.Logf("created local backend at %s", tempdir) - - return b -} - -func teardownLocalBackend(t *testing.T, b *local.Local) { - if !TestCleanup { - t.Logf("leaving local backend at %s\n", b.Location()) - return - } - - OK(t, b.Delete()) -} - -func TestLocalBackend(t *testing.T) { - // test for non-existing backend - b, err := local.Open("/invalid-restic-test") - Assert(t, err != nil, "opening invalid repository at /invalid-restic-test should have failed, but err is nil") - Assert(t, b == nil, fmt.Sprintf("opening invalid repository at /invalid-restic-test should have failed, but b is not nil: %v", b)) - - s := setupLocalBackend(t) - defer teardownLocalBackend(t, s) - - testBackend(s, t) -} - -func TestLocalBackendCreationFailures(t *testing.T) { - b := setupLocalBackend(t) - defer teardownLocalBackend(t, b) - - // create a fake config file - blob, err := b.Create() - OK(t, err) - fmt.Fprintf(blob, "config\n") - OK(t, blob.Finalize(backend.Config, "")) - - // test failure to create a new repository at the same location - b2, err := local.Create(b.Location()) - Assert(t, err != nil && b2 == nil, fmt.Sprintf("creating a repository at %s for the second time should have failed", b.Location())) -} diff --git a/backend/mem/backend_test.go b/backend/mem/backend_test.go new file mode 100644 index 000000000..31f86e4fc --- /dev/null +++ b/backend/mem/backend_test.go @@ -0,0 +1,87 @@ +// DO NOT EDIT, AUTOMATICALLY GENERATED +package mem_test + +import ( + "testing" + + "github.com/restic/restic/backend/test" +) + +var SkipMessage string + +func TestMemBackendCreate(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCreate(t) +} + +func TestMemBackendOpen(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestOpen(t) +} + +func TestMemBackendCreateWithConfig(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCreateWithConfig(t) +} + +func TestMemBackendLocation(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLocation(t) +} + +func TestMemBackendConfig(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestConfig(t) +} + +func TestMemBackendLoad(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLoad(t) +} + +func TestMemBackendSave(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestSave(t) +} + +func TestMemBackendSaveFilenames(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestSaveFilenames(t) +} + +func TestMemBackendBackend(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestBackend(t) +} + +func TestMemBackendDelete(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestDelete(t) +} + +func TestMemBackendCleanup(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCleanup(t) +} diff --git a/backend/mem/mem_backend.go b/backend/mem/mem_backend.go new file mode 100644 index 000000000..adac5b332 --- /dev/null +++ b/backend/mem/mem_backend.go @@ -0,0 +1,223 @@ +package mem + +import ( + "errors" + "io" + "sync" + + "github.com/restic/restic/backend" + "github.com/restic/restic/debug" +) + +type entry struct { + Type backend.Type + Name string +} + +type memMap map[entry][]byte + +// MemoryBackend is a mock backend that uses a map for storing all data in +// memory. This should only be used for tests. +type MemoryBackend struct { + data memMap + m sync.Mutex + + backend.MockBackend +} + +// New returns a new backend that saves all data in a map in memory. +func New() *MemoryBackend { + be := &MemoryBackend{ + data: make(memMap), + } + + be.MockBackend.TestFn = func(t backend.Type, name string) (bool, error) { + return memTest(be, t, name) + } + + be.MockBackend.LoadFn = func(h backend.Handle, p []byte, off int64) (int, error) { + return memLoad(be, h, p, off) + } + + be.MockBackend.SaveFn = func(h backend.Handle, p []byte) error { + return memSave(be, h, p) + } + + be.MockBackend.StatFn = func(h backend.Handle) (backend.BlobInfo, error) { + return memStat(be, h) + } + + be.MockBackend.RemoveFn = func(t backend.Type, name string) error { + return memRemove(be, t, name) + } + + be.MockBackend.ListFn = func(t backend.Type, done <-chan struct{}) <-chan string { + return memList(be, t, done) + } + + be.MockBackend.DeleteFn = func() error { + be.m.Lock() + defer be.m.Unlock() + + be.data = make(memMap) + return nil + } + + be.MockBackend.LocationFn = func() string { + return "Memory Backend" + } + + debug.Log("MemoryBackend.New", "created new memory backend") + + return be +} + +func (be *MemoryBackend) insert(t backend.Type, name string, data []byte) error { + be.m.Lock() + defer be.m.Unlock() + + if _, ok := be.data[entry{t, name}]; ok { + return errors.New("already present") + } + + be.data[entry{t, name}] = data + return nil +} + +func memTest(be *MemoryBackend, t backend.Type, name string) (bool, error) { + be.m.Lock() + defer be.m.Unlock() + + debug.Log("MemoryBackend.Test", "test %v %v", t, name) + + if _, ok := be.data[entry{t, name}]; ok { + return true, nil + } + + return false, nil +} + +func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, error) { + if err := h.Valid(); err != nil { + return 0, err + } + + be.m.Lock() + defer be.m.Unlock() + + if h.Type == backend.Config { + h.Name = "" + } + + debug.Log("MemoryBackend.Load", "get %v offset %v len %v", h, off, len(p)) + + if _, ok := be.data[entry{h.Type, h.Name}]; !ok { + return 0, errors.New("no such data") + } + + buf := be.data[entry{h.Type, h.Name}] + if off > int64(len(buf)) { + return 0, errors.New("offset beyond end of file") + } + + buf = buf[off:] + + n := copy(p, buf) + + if len(p) > len(buf) { + return n, io.ErrUnexpectedEOF + } + + return n, nil +} + +func memSave(be *MemoryBackend, h backend.Handle, p []byte) error { + if err := h.Valid(); err != nil { + return err + } + + be.m.Lock() + defer be.m.Unlock() + + if h.Type == backend.Config { + h.Name = "" + } + + if _, ok := be.data[entry{h.Type, h.Name}]; ok { + return errors.New("file already exists") + } + + debug.Log("MemoryBackend.Save", "save %v bytes at %v", len(p), h) + buf := make([]byte, len(p)) + copy(buf, p) + be.data[entry{h.Type, h.Name}] = buf + + return nil +} + +func memStat(be *MemoryBackend, h backend.Handle) (backend.BlobInfo, error) { + be.m.Lock() + defer be.m.Unlock() + + if err := h.Valid(); err != nil { + return backend.BlobInfo{}, err + } + + if h.Type == backend.Config { + h.Name = "" + } + + debug.Log("MemoryBackend.Stat", "stat %v", h) + + e, ok := be.data[entry{h.Type, h.Name}] + if !ok { + return backend.BlobInfo{}, errors.New("no such data") + } + + return backend.BlobInfo{Size: int64(len(e))}, nil +} + +func memRemove(be *MemoryBackend, t backend.Type, name string) error { + be.m.Lock() + defer be.m.Unlock() + + debug.Log("MemoryBackend.Remove", "get %v %v", t, name) + + if _, ok := be.data[entry{t, name}]; !ok { + return errors.New("no such data") + } + + delete(be.data, entry{t, name}) + + return nil +} + +func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan string { + be.m.Lock() + defer be.m.Unlock() + + ch := make(chan string) + + var ids []string + for entry := range be.data { + if entry.Type != t { + continue + } + ids = append(ids, entry.Name) + } + + debug.Log("MemoryBackend.List", "list %v: %v", t, ids) + + go func() { + defer close(ch) + for _, id := range ids { + select { + case ch <- id: + case <-done: + return + } + } + }() + + return ch +} diff --git a/backend/mem/mem_backend_test.go b/backend/mem/mem_backend_test.go new file mode 100644 index 000000000..8c3745aa2 --- /dev/null +++ b/backend/mem/mem_backend_test.go @@ -0,0 +1,38 @@ +package mem_test + +import ( + "errors" + + "github.com/restic/restic/backend" + "github.com/restic/restic/backend/mem" + "github.com/restic/restic/backend/test" +) + +var be backend.Backend + +//go:generate go run ../test/generate_backend_tests.go + +func init() { + test.CreateFn = func() (backend.Backend, error) { + if be != nil { + return nil, errors.New("temporary memory backend dir already exists") + } + + be = mem.New() + + return be, nil + } + + test.OpenFn = func() (backend.Backend, error) { + if be == nil { + return nil, errors.New("repository not initialized") + } + + return be, nil + } + + test.CleanupFn = func() error { + be = nil + return nil + } +} diff --git a/backend/mem_backend.go b/backend/mem_backend.go deleted file mode 100644 index e757566e6..000000000 --- a/backend/mem_backend.go +++ /dev/null @@ -1,240 +0,0 @@ -package backend - -import ( - "bytes" - "errors" - "io" - "sort" - "sync" - - "github.com/restic/restic/debug" -) - -type entry struct { - Type Type - Name string -} - -type memMap map[entry][]byte - -// MemoryBackend is a mock backend that uses a map for storing all data in -// memory. This should only be used for tests. -type MemoryBackend struct { - data memMap - m sync.Mutex - - MockBackend -} - -// NewMemoryBackend returns a new backend that saves all data in a map in -// memory. -func NewMemoryBackend() *MemoryBackend { - be := &MemoryBackend{ - data: make(memMap), - } - - be.MockBackend.TestFn = func(t Type, name string) (bool, error) { - return memTest(be, t, name) - } - - be.MockBackend.CreateFn = func() (Blob, error) { - return memCreate(be) - } - - be.MockBackend.GetFn = func(t Type, name string) (io.ReadCloser, error) { - return memGet(be, t, name) - } - - be.MockBackend.GetReaderFn = func(t Type, name string, offset, length uint) (io.ReadCloser, error) { - return memGetReader(be, t, name, offset, length) - } - - be.MockBackend.RemoveFn = func(t Type, name string) error { - return memRemove(be, t, name) - } - - be.MockBackend.ListFn = func(t Type, done <-chan struct{}) <-chan string { - return memList(be, t, done) - } - - be.MockBackend.DeleteFn = func() error { - be.m.Lock() - defer be.m.Unlock() - - be.data = make(memMap) - return nil - } - - debug.Log("MemoryBackend.New", "created new memory backend") - - return be -} - -func (be *MemoryBackend) insert(t Type, name string, data []byte) error { - be.m.Lock() - defer be.m.Unlock() - - if _, ok := be.data[entry{t, name}]; ok { - return errors.New("already present") - } - - be.data[entry{t, name}] = data - return nil -} - -func memTest(be *MemoryBackend, t Type, name string) (bool, error) { - be.m.Lock() - defer be.m.Unlock() - - debug.Log("MemoryBackend.Test", "test %v %v", t, name) - - if _, ok := be.data[entry{t, name}]; ok { - return true, nil - } - - return false, nil -} - -// tempMemEntry temporarily holds data written to the memory backend before it -// is finalized. -type tempMemEntry struct { - be *MemoryBackend - data bytes.Buffer -} - -func (e *tempMemEntry) Write(p []byte) (int, error) { - return e.data.Write(p) -} - -func (e *tempMemEntry) Size() uint { - return uint(len(e.data.Bytes())) -} - -func (e *tempMemEntry) Finalize(t Type, name string) error { - if t == Config { - name = "" - } - - debug.Log("MemoryBackend", "save blob %p (%d bytes) as %v %v", e, len(e.data.Bytes()), t, name) - return e.be.insert(t, name, e.data.Bytes()) -} - -func memCreate(be *MemoryBackend) (Blob, error) { - blob := &tempMemEntry{be: be} - debug.Log("MemoryBackend.Create", "create new blob %p", blob) - return blob, nil -} - -// ReadCloser wraps a reader and adds a noop Close method if rd does not implement io.Closer. -func ReadCloser(rd io.Reader) io.ReadCloser { - return readCloser{rd} -} - -// readCloser wraps a reader and adds a noop Close method if rd does not implement io.Closer. -type readCloser struct { - io.Reader -} - -func (rd readCloser) Close() error { - if r, ok := rd.Reader.(io.Closer); ok { - return r.Close() - } - - return nil -} - -func memGet(be *MemoryBackend, t Type, name string) (io.ReadCloser, error) { - be.m.Lock() - defer be.m.Unlock() - - if t == Config { - name = "" - } - - debug.Log("MemoryBackend.Get", "get %v %v", t, name) - - if _, ok := be.data[entry{t, name}]; !ok { - return nil, errors.New("no such data") - } - - return readCloser{bytes.NewReader(be.data[entry{t, name}])}, nil -} - -func memGetReader(be *MemoryBackend, t Type, name string, offset, length uint) (io.ReadCloser, error) { - be.m.Lock() - defer be.m.Unlock() - - if t == Config { - name = "" - } - - debug.Log("MemoryBackend.GetReader", "get %v %v offset %v len %v", t, name, offset, length) - - if _, ok := be.data[entry{t, name}]; !ok { - return nil, errors.New("no such data") - } - - buf := be.data[entry{t, name}] - if offset > uint(len(buf)) { - return nil, errors.New("offset beyond end of file") - } - - buf = buf[offset:] - - if length > 0 { - if length > uint(len(buf)) { - length = uint(len(buf)) - } - - buf = buf[:length] - } - - return readCloser{bytes.NewReader(buf)}, nil -} - -func memRemove(be *MemoryBackend, t Type, name string) error { - be.m.Lock() - defer be.m.Unlock() - - debug.Log("MemoryBackend.Remove", "get %v %v", t, name) - - if _, ok := be.data[entry{t, name}]; !ok { - return errors.New("no such data") - } - - delete(be.data, entry{t, name}) - - return nil -} - -func memList(be *MemoryBackend, t Type, done <-chan struct{}) <-chan string { - be.m.Lock() - defer be.m.Unlock() - - ch := make(chan string) - - var ids []string - for entry := range be.data { - if entry.Type != t { - continue - } - ids = append(ids, entry.Name) - } - - sort.Strings(ids) - - debug.Log("MemoryBackend.List", "list %v: %v", t, ids) - - go func() { - defer close(ch) - for _, id := range ids { - select { - case ch <- id: - case <-done: - return - } - } - }() - - return ch -} diff --git a/backend/mem_backend_test.go b/backend/mem_backend_test.go deleted file mode 100644 index c5b43415c..000000000 --- a/backend/mem_backend_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package backend_test - -import ( - "testing" - - "github.com/restic/restic/backend" -) - -func TestMemoryBackend(t *testing.T) { - be := backend.NewMemoryBackend() - testBackend(be, t) -} diff --git a/backend/mock_backend.go b/backend/mock_backend.go index 92d5521c7..921af4d6c 100644 --- a/backend/mock_backend.go +++ b/backend/mock_backend.go @@ -1,24 +1,22 @@ package backend -import ( - "errors" - "io" -) +import "errors" // MockBackend implements a backend whose functions can be specified. This // should only be used for tests. type MockBackend struct { - CloseFn func() error - CreateFn func() (Blob, error) - GetFn func(Type, string) (io.ReadCloser, error) - GetReaderFn func(Type, string, uint, uint) (io.ReadCloser, error) - ListFn func(Type, <-chan struct{}) <-chan string - RemoveFn func(Type, string) error - TestFn func(Type, string) (bool, error) - DeleteFn func() error - LocationFn func() string + CloseFn func() error + LoadFn func(h Handle, p []byte, off int64) (int, error) + SaveFn func(h Handle, p []byte) error + StatFn func(h Handle) (BlobInfo, error) + ListFn func(Type, <-chan struct{}) <-chan string + RemoveFn func(Type, string) error + TestFn func(Type, string) (bool, error) + DeleteFn func() error + LocationFn func() string } +// Close the backend. func (m *MockBackend) Close() error { if m.CloseFn == nil { return nil @@ -27,6 +25,7 @@ func (m *MockBackend) Close() error { return m.CloseFn() } +// Location returns a location string. func (m *MockBackend) Location() string { if m.LocationFn == nil { return "" @@ -35,30 +34,34 @@ func (m *MockBackend) Location() string { return m.LocationFn() } -func (m *MockBackend) Create() (Blob, error) { - if m.CreateFn == nil { - return nil, errors.New("not implemented") +// Load loads data from the backend. +func (m *MockBackend) Load(h Handle, p []byte, off int64) (int, error) { + if m.LoadFn == nil { + return 0, errors.New("not implemented") } - return m.CreateFn() + return m.LoadFn(h, p, off) } -func (m *MockBackend) Get(t Type, name string) (io.ReadCloser, error) { - if m.GetFn == nil { - return nil, errors.New("not implemented") +// Save data in the backend. +func (m *MockBackend) Save(h Handle, p []byte) error { + if m.SaveFn == nil { + return errors.New("not implemented") } - return m.GetFn(t, name) + return m.SaveFn(h, p) } -func (m *MockBackend) GetReader(t Type, name string, offset, len uint) (io.ReadCloser, error) { - if m.GetReaderFn == nil { - return nil, errors.New("not implemented") +// Stat an object in the backend. +func (m *MockBackend) Stat(h Handle) (BlobInfo, error) { + if m.StatFn == nil { + return BlobInfo{}, errors.New("not implemented") } - return m.GetReaderFn(t, name, offset, len) + return m.StatFn(h) } +// List items of type t. func (m *MockBackend) List(t Type, done <-chan struct{}) <-chan string { if m.ListFn == nil { ch := make(chan string) @@ -69,6 +72,7 @@ func (m *MockBackend) List(t Type, done <-chan struct{}) <-chan string { return m.ListFn(t, done) } +// Remove data from the backend. func (m *MockBackend) Remove(t Type, name string) error { if m.RemoveFn == nil { return errors.New("not implemented") @@ -77,6 +81,7 @@ func (m *MockBackend) Remove(t Type, name string) error { return m.RemoveFn(t, name) } +// Test for the existence of a specific item. func (m *MockBackend) Test(t Type, name string) (bool, error) { if m.TestFn == nil { return false, errors.New("not implemented") @@ -85,6 +90,7 @@ func (m *MockBackend) Test(t Type, name string) (bool, error) { return m.TestFn(t, name) } +// Delete all data. func (m *MockBackend) Delete() error { if m.DeleteFn == nil { return errors.New("not implemented") diff --git a/backend/paths.go b/backend/paths.go index 8e29e6950..940e9fcb9 100644 --- a/backend/paths.go +++ b/backend/paths.go @@ -2,7 +2,7 @@ package backend import "os" -// Default paths for file-based backends (e.g. local) +// Paths contains the default paths for file-based backends (e.g. local). var Paths = struct { Data string Snapshots string @@ -21,5 +21,6 @@ var Paths = struct { "config", } -// Default modes for file-based backends +// Modes holds the default modes for directories and files for file-based +// backends. var Modes = struct{ Dir, File os.FileMode }{0700, 0600} diff --git a/backend/readcloser.go b/backend/readcloser.go new file mode 100644 index 000000000..6467e0dfc --- /dev/null +++ b/backend/readcloser.go @@ -0,0 +1,21 @@ +package backend + +import "io" + +// ReadCloser wraps a reader and adds a noop Close method if rd does not implement io.Closer. +func ReadCloser(rd io.Reader) io.ReadCloser { + return readCloser{rd} +} + +// readCloser wraps a reader and adds a noop Close method if rd does not implement io.Closer. +type readCloser struct { + io.Reader +} + +func (rd readCloser) Close() error { + if r, ok := rd.Reader.(io.Closer); ok { + return r.Close() + } + + return nil +} diff --git a/backend/reader.go b/backend/reader.go deleted file mode 100644 index eabe9527b..000000000 --- a/backend/reader.go +++ /dev/null @@ -1,73 +0,0 @@ -package backend - -import ( - "hash" - "io" -) - -type HashAppendReader struct { - r io.Reader - h hash.Hash - sum []byte - closed bool -} - -func NewHashAppendReader(r io.Reader, h hash.Hash) *HashAppendReader { - return &HashAppendReader{ - h: h, - r: io.TeeReader(r, h), - sum: make([]byte, 0, h.Size()), - } -} - -func (h *HashAppendReader) Read(p []byte) (n int, err error) { - if !h.closed { - n, err = h.r.Read(p) - - if err == io.EOF { - h.closed = true - h.sum = h.h.Sum(h.sum) - } else if err != nil { - return - } - } - - if h.closed { - // output hash - r := len(p) - n - - if r > 0 { - c := copy(p[n:], h.sum) - h.sum = h.sum[c:] - - n += c - err = nil - } - - if len(h.sum) == 0 { - err = io.EOF - } - } - - return -} - -type HashingReader struct { - r io.Reader - h hash.Hash -} - -func NewHashingReader(r io.Reader, h hash.Hash) *HashingReader { - return &HashingReader{ - h: h, - r: io.TeeReader(r, h), - } -} - -func (h *HashingReader) Read(p []byte) (int, error) { - return h.r.Read(p) -} - -func (h *HashingReader) Sum(d []byte) []byte { - return h.h.Sum(d) -} diff --git a/backend/reader_test.go b/backend/reader_test.go deleted file mode 100644 index b4a23eaea..000000000 --- a/backend/reader_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package backend_test - -import ( - "bytes" - "crypto/rand" - "crypto/sha256" - "io" - "io/ioutil" - "testing" - - "github.com/restic/restic/backend" - . "github.com/restic/restic/test" -) - -func TestHashAppendReader(t *testing.T) { - tests := []int{5, 23, 2<<18 + 23, 1 << 20} - - for _, size := range tests { - data := make([]byte, size) - _, err := io.ReadFull(rand.Reader, data) - if err != nil { - t.Fatalf("ReadFull: %v", err) - } - - expectedHash := sha256.Sum256(data) - - rd := backend.NewHashAppendReader(bytes.NewReader(data), sha256.New()) - - target := bytes.NewBuffer(nil) - n, err := io.Copy(target, rd) - OK(t, err) - - Assert(t, n == int64(size)+int64(len(expectedHash)), - "HashAppendReader: invalid number of bytes read: got %d, expected %d", - n, size+len(expectedHash)) - - r := target.Bytes() - resultingHash := r[len(r)-len(expectedHash):] - Assert(t, bytes.Equal(expectedHash[:], resultingHash), - "HashAppendReader: hashes do not match: expected %02x, got %02x", - expectedHash, resultingHash) - - // try to read again, must return io.EOF - n2, err := rd.Read(make([]byte, 100)) - Assert(t, n2 == 0, "HashAppendReader returned %d additional bytes", n) - Assert(t, err == io.EOF, "HashAppendReader returned %v instead of EOF", err) - } -} - -func TestHashingReader(t *testing.T) { - tests := []int{5, 23, 2<<18 + 23, 1 << 20} - - for _, size := range tests { - data := make([]byte, size) - _, err := io.ReadFull(rand.Reader, data) - if err != nil { - t.Fatalf("ReadFull: %v", err) - } - - expectedHash := sha256.Sum256(data) - - rd := backend.NewHashingReader(bytes.NewReader(data), sha256.New()) - - n, err := io.Copy(ioutil.Discard, rd) - OK(t, err) - - Assert(t, n == int64(size), - "HashAppendReader: invalid number of bytes read: got %d, expected %d", - n, size) - - resultingHash := rd.Sum(nil) - Assert(t, bytes.Equal(expectedHash[:], resultingHash), - "HashAppendReader: hashes do not match: expected %02x, got %02x", - expectedHash, resultingHash) - - // try to read again, must return io.EOF - n2, err := rd.Read(make([]byte, 100)) - Assert(t, n2 == 0, "HashAppendReader returned %d additional bytes", n) - Assert(t, err == io.EOF, "HashAppendReader returned %v instead of EOF", err) - } -} diff --git a/backend/s3/backend_test.go b/backend/s3/backend_test.go new file mode 100644 index 000000000..8db0a31f9 --- /dev/null +++ b/backend/s3/backend_test.go @@ -0,0 +1,87 @@ +// DO NOT EDIT, AUTOMATICALLY GENERATED +package s3_test + +import ( + "testing" + + "github.com/restic/restic/backend/test" +) + +var SkipMessage string + +func TestS3BackendCreate(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCreate(t) +} + +func TestS3BackendOpen(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestOpen(t) +} + +func TestS3BackendCreateWithConfig(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCreateWithConfig(t) +} + +func TestS3BackendLocation(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLocation(t) +} + +func TestS3BackendConfig(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestConfig(t) +} + +func TestS3BackendLoad(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLoad(t) +} + +func TestS3BackendSave(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestSave(t) +} + +func TestS3BackendSaveFilenames(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestSaveFilenames(t) +} + +func TestS3BackendBackend(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestBackend(t) +} + +func TestS3BackendDelete(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestDelete(t) +} + +func TestS3BackendCleanup(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCleanup(t) +} diff --git a/backend/s3/cont_reader.go b/backend/s3/cont_reader.go deleted file mode 100644 index 77bd1dca1..000000000 --- a/backend/s3/cont_reader.go +++ /dev/null @@ -1,16 +0,0 @@ -package s3 - -import "io" - -// ContinuousReader implements an io.Reader on top of an io.ReaderAt, advancing -// an offset. -type ContinuousReader struct { - R io.ReaderAt - Offset int64 -} - -func (c *ContinuousReader) Read(p []byte) (int, error) { - n, err := c.R.ReadAt(p, c.Offset) - c.Offset += int64(n) - return n, err -} diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 87b41e72e..ccb6cd42c 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -12,7 +12,6 @@ import ( "github.com/restic/restic/debug" ) -const maxKeysInList = 1000 const connLimit = 10 const backendPrefix = "restic" @@ -23,7 +22,8 @@ func s3path(t backend.Type, name string) string { return backendPrefix + "/" + string(t) + "/" + name } -type S3Backend struct { +// s3 is a backend which stores the data on an S3 endpoint. +type s3 struct { client minio.CloudStorageClient connChan chan struct{} bucketname string @@ -39,7 +39,7 @@ func Open(cfg Config) (backend.Backend, error) { return nil, err } - be := &S3Backend{client: client, bucketname: cfg.Bucket} + be := &s3{client: client, bucketname: cfg.Bucket} be.createConnections() if err := client.BucketExists(cfg.Bucket); err != nil { @@ -56,7 +56,7 @@ func Open(cfg Config) (backend.Backend, error) { return be, nil } -func (be *S3Backend) createConnections() { +func (be *s3) createConnections() { be.connChan = make(chan struct{}, connLimit) for i := 0; i < connLimit; i++ { be.connChan <- struct{}{} @@ -64,127 +64,86 @@ func (be *S3Backend) createConnections() { } // Location returns this backend's location (the bucket name). -func (be *S3Backend) Location() string { +func (be *s3) Location() string { return be.bucketname } -type s3Blob struct { - b *S3Backend - buf *bytes.Buffer - final bool -} - -func (bb *s3Blob) Write(p []byte) (int, error) { - if bb.final { - return 0, errors.New("blob already closed") - } - - n, err := bb.buf.Write(p) - return n, err -} - -func (bb *s3Blob) Read(p []byte) (int, error) { - return bb.buf.Read(p) -} - -func (bb *s3Blob) Close() error { - bb.final = true - bb.buf.Reset() - return nil -} - -func (bb *s3Blob) Size() uint { - return uint(bb.buf.Len()) -} - -func (bb *s3Blob) Finalize(t backend.Type, name string) error { - debug.Log("s3.blob.Finalize()", "bucket %v, finalize %v, %d bytes", bb.b.bucketname, name, bb.buf.Len()) - if bb.final { - return errors.New("Already finalized") - } - - bb.final = true - - path := s3path(t, name) - - // Check key does not already exist - _, err := bb.b.client.StatObject(bb.b.bucketname, path) - if err == nil { - debug.Log("s3.blob.Finalize()", "%v already exists", name) - return errors.New("key already exists") - } - - expectedBytes := bb.buf.Len() - - <-bb.b.connChan - debug.Log("s3.Finalize", "PutObject(%v, %v, %v, %v)", - bb.b.bucketname, path, int64(bb.buf.Len()), "binary/octet-stream") - n, err := bb.b.client.PutObject(bb.b.bucketname, path, bb.buf, "binary/octet-stream") - debug.Log("s3.Finalize", "finalized %v -> n %v, err %#v", path, n, err) - bb.b.connChan <- struct{}{} - - if err != nil { - return err - } - - if n != int64(expectedBytes) { - return errors.New("could not store all bytes") - } - - return nil -} - -// Create creates a new Blob. The data is available only after Finalize() -// has been called on the returned Blob. -func (be *S3Backend) Create() (backend.Blob, error) { - blob := s3Blob{ - b: be, - buf: &bytes.Buffer{}, - } - - return &blob, nil -} - -// Get returns a reader that yields the content stored under the given -// name. The reader should be closed after draining it. -func (be *S3Backend) Get(t backend.Type, name string) (io.ReadCloser, error) { - path := s3path(t, name) - rc, err := be.client.GetObject(be.bucketname, path) - debug.Log("s3.Get", "%v %v -> err %v", t, name, err) - if err != nil { - return nil, err - } - - return rc, nil -} - -// GetReader returns an io.ReadCloser for the Blob with the given name of -// type t at offset and length. If length is 0, the reader reads until EOF. -func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) { - debug.Log("s3.GetReader", "%v %v, offset %v len %v", t, name, offset, length) - path := s3path(t, name) +// Load returns the data stored in the backend for h at the given offset +// and saves it in p. Load has the same semantics as io.ReaderAt. +func (be s3) Load(h backend.Handle, p []byte, off int64) (int, error) { + debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p)) + path := s3path(h.Type, h.Name) obj, err := be.client.GetObject(be.bucketname, path) if err != nil { debug.Log("s3.GetReader", " err %v", err) - return nil, err + return 0, err } - if offset > 0 { - _, err = obj.Seek(int64(offset), 0) + if off > 0 { + _, err = obj.Seek(off, 0) if err != nil { - return nil, err + return 0, err } } - if length == 0 { - return obj, nil + <-be.connChan + defer func() { + be.connChan <- struct{}{} + }() + return io.ReadFull(obj, p) +} + +// Save stores data in the backend at the handle. +func (be s3) Save(h backend.Handle, p []byte) (err error) { + if err := h.Valid(); err != nil { + return err } - return backend.LimitReadCloser(obj, int64(length)), nil + debug.Log("s3.Save", "%v bytes at %d", len(p), h) + + path := s3path(h.Type, h.Name) + + // Check key does not already exist + _, err = be.client.StatObject(be.bucketname, path) + if err == nil { + debug.Log("s3.blob.Finalize()", "%v already exists", h) + return errors.New("key already exists") + } + + <-be.connChan + defer func() { + be.connChan <- struct{}{} + }() + + debug.Log("s3.Save", "PutObject(%v, %v, %v, %v)", + be.bucketname, path, int64(len(p)), "binary/octet-stream") + n, err := be.client.PutObject(be.bucketname, path, bytes.NewReader(p), "binary/octet-stream") + debug.Log("s3.Save", "%v -> %v bytes, err %#v", path, n, err) + + return err +} + +// Stat returns information about a blob. +func (be s3) Stat(h backend.Handle) (backend.BlobInfo, error) { + debug.Log("s3.Stat", "%v") + path := s3path(h.Type, h.Name) + obj, err := be.client.GetObject(be.bucketname, path) + if err != nil { + debug.Log("s3.Stat", "GetObject() err %v", err) + return backend.BlobInfo{}, err + } + + fi, err := obj.Stat() + if err != nil { + debug.Log("s3.Stat", "Stat() err %v", err) + return backend.BlobInfo{}, err + } + + return backend.BlobInfo{Size: fi.Size}, nil } // Test returns true if a blob of the given type and name exists in the backend. -func (be *S3Backend) Test(t backend.Type, name string) (bool, error) { +func (be *s3) Test(t backend.Type, name string) (bool, error) { found := false path := s3path(t, name) _, err := be.client.StatObject(be.bucketname, path) @@ -197,7 +156,7 @@ func (be *S3Backend) Test(t backend.Type, name string) (bool, error) { } // Remove removes the blob with the given name and type. -func (be *S3Backend) Remove(t backend.Type, name string) error { +func (be *s3) Remove(t backend.Type, name string) error { path := s3path(t, name) err := be.client.RemoveObject(be.bucketname, path) debug.Log("s3.Remove", "%v %v -> err %v", t, name, err) @@ -207,7 +166,7 @@ func (be *S3Backend) Remove(t backend.Type, name string) error { // List returns a channel that yields all names of blobs of type t. A // goroutine is started for this. If the channel done is closed, sending // stops. -func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string { +func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string { debug.Log("s3.List", "listing %v", t) ch := make(chan string) @@ -235,7 +194,7 @@ func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string { } // Remove keys for a specified backend type. -func (be *S3Backend) removeKeys(t backend.Type) error { +func (be *s3) removeKeys(t backend.Type) error { done := make(chan struct{}) defer close(done) for key := range be.List(backend.Data, done) { @@ -249,7 +208,7 @@ func (be *S3Backend) removeKeys(t backend.Type) error { } // Delete removes all restic keys in the bucket. It will not remove the bucket itself. -func (be *S3Backend) Delete() error { +func (be *s3) Delete() error { alltypes := []backend.Type{ backend.Data, backend.Key, @@ -268,4 +227,4 @@ func (be *S3Backend) Delete() error { } // Close does nothing -func (be *S3Backend) Close() error { return nil } +func (be *s3) Close() error { return nil } diff --git a/backend/s3/s3_test.go b/backend/s3/s3_test.go index 289748485..050e4300a 100644 --- a/backend/s3/s3_test.go +++ b/backend/s3/s3_test.go @@ -1,7 +1,72 @@ -package s3 +package s3_test -import "testing" +import ( + "errors" + "fmt" + "net/url" + "os" -func TestGetReader(t *testing.T) { + "github.com/restic/restic/backend" + "github.com/restic/restic/backend/s3" + "github.com/restic/restic/backend/test" + . "github.com/restic/restic/test" +) +//go:generate go run ../test/generate_backend_tests.go + +func init() { + if TestS3Server == "" { + SkipMessage = "s3 test server not available" + return + } + + url, err := url.Parse(TestS3Server) + if err != nil { + fmt.Fprintf(os.Stderr, "invalid url: %v\n", err) + return + } + + cfg := s3.Config{ + Endpoint: url.Host, + Bucket: "restictestbucket", + KeyID: os.Getenv("AWS_ACCESS_KEY_ID"), + Secret: os.Getenv("AWS_SECRET_ACCESS_KEY"), + } + + if url.Scheme == "http" { + cfg.UseHTTP = true + } + + test.CreateFn = func() (backend.Backend, error) { + be, err := s3.Open(cfg) + if err != nil { + return nil, err + } + + exists, err := be.Test(backend.Config, "") + if err != nil { + return nil, err + } + + if exists { + return nil, errors.New("config already exists") + } + + return be, nil + } + + test.OpenFn = func() (backend.Backend, error) { + return s3.Open(cfg) + } + + // test.CleanupFn = func() error { + // if tempBackendDir == "" { + // return nil + // } + + // fmt.Printf("removing test backend at %v\n", tempBackendDir) + // err := os.RemoveAll(tempBackendDir) + // tempBackendDir = "" + // return err + // } } diff --git a/backend/s3_test.go b/backend/s3_test.go deleted file mode 100644 index 6d79f9cd8..000000000 --- a/backend/s3_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package backend_test - -import ( - "net/url" - "os" - "testing" - - "github.com/restic/restic/backend/s3" - . "github.com/restic/restic/test" -) - -type deleter interface { - Delete() error -} - -func TestS3Backend(t *testing.T) { - if TestS3Server == "" { - t.Skip("s3 test server not available") - } - - url, err := url.Parse(TestS3Server) - OK(t, err) - - cfg := s3.Config{ - Endpoint: url.Host, - Bucket: "restictestbucket", - KeyID: os.Getenv("AWS_ACCESS_KEY_ID"), - Secret: os.Getenv("AWS_SECRET_ACCESS_KEY"), - } - - if url.Scheme == "http" { - cfg.UseHTTP = true - } - - be, err := s3.Open(cfg) - OK(t, err) - - testBackend(be, t) - - del := be.(deleter) - OK(t, del.Delete()) -} diff --git a/backend/sftp/backend_test.go b/backend/sftp/backend_test.go new file mode 100644 index 000000000..afab17e60 --- /dev/null +++ b/backend/sftp/backend_test.go @@ -0,0 +1,87 @@ +// DO NOT EDIT, AUTOMATICALLY GENERATED +package sftp_test + +import ( + "testing" + + "github.com/restic/restic/backend/test" +) + +var SkipMessage string + +func TestSftpBackendCreate(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCreate(t) +} + +func TestSftpBackendOpen(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestOpen(t) +} + +func TestSftpBackendCreateWithConfig(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCreateWithConfig(t) +} + +func TestSftpBackendLocation(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLocation(t) +} + +func TestSftpBackendConfig(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestConfig(t) +} + +func TestSftpBackendLoad(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLoad(t) +} + +func TestSftpBackendSave(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestSave(t) +} + +func TestSftpBackendSaveFilenames(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestSaveFilenames(t) +} + +func TestSftpBackendBackend(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestBackend(t) +} + +func TestSftpBackendDelete(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestDelete(t) +} + +func TestSftpBackendCleanup(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCleanup(t) +} diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go index f52c85c53..fb10c3854 100644 --- a/backend/sftp/sftp.go +++ b/backend/sftp/sftp.go @@ -9,18 +9,19 @@ import ( "os" "os/exec" "path/filepath" - "sort" "strings" "github.com/juju/errors" "github.com/pkg/sftp" "github.com/restic/restic/backend" + "github.com/restic/restic/debug" ) const ( tempfileRandomSuffixLength = 10 ) +// SFTP is a backend in a directory accessed via SFTP. type SFTP struct { c *sftp.Client p string @@ -63,6 +64,18 @@ func startClient(program string, args ...string) (*SFTP, error) { return &SFTP{c: client, cmd: cmd}, nil } +func paths(dir string) []string { + return []string{ + dir, + Join(dir, backend.Paths.Data), + Join(dir, backend.Paths.Snapshots), + Join(dir, backend.Paths.Index), + Join(dir, backend.Paths.Locks), + Join(dir, backend.Paths.Keys), + Join(dir, backend.Paths.Temp), + } +} + // Open opens an sftp backend. When the command is started via // exec.Command, it is expected to speak sftp on stdin/stdout. The backend // is expected at the given path. @@ -73,16 +86,7 @@ func Open(dir string, program string, args ...string) (*SFTP, error) { } // test if all necessary dirs and files are there - items := []string{ - dir, - Join(dir, backend.Paths.Data), - Join(dir, backend.Paths.Snapshots), - Join(dir, backend.Paths.Index), - Join(dir, backend.Paths.Locks), - Join(dir, backend.Paths.Keys), - Join(dir, backend.Paths.Temp), - } - for _, d := range items { + for _, d := range paths(dir) { if _, err := sftp.c.Lstat(d); err != nil { return nil, fmt.Errorf("%s does not exist", d) } @@ -117,16 +121,6 @@ func Create(dir string, program string, args ...string) (*SFTP, error) { return nil, err } - dirs := []string{ - dir, - Join(dir, backend.Paths.Data), - Join(dir, backend.Paths.Snapshots), - Join(dir, backend.Paths.Index), - Join(dir, backend.Paths.Locks), - Join(dir, backend.Paths.Keys), - Join(dir, backend.Paths.Temp), - } - // test if config file already exists _, err = sftp.c.Lstat(Join(dir, backend.Paths.Config)) if err == nil { @@ -134,7 +128,7 @@ func Create(dir string, program string, args ...string) (*SFTP, error) { } // create paths for data, refs and temp blobs - for _, d := range dirs { + for _, d := range paths(dir) { err = sftp.mkdirAll(d, backend.Modes.Dir) if err != nil { return nil, err @@ -252,64 +246,7 @@ func (r *SFTP) renameFile(oldname string, t backend.Type, name string) error { return r.c.Chmod(filename, fi.Mode()&os.FileMode(^uint32(0222))) } -type sftpBlob struct { - f *sftp.File - tempname string - size uint - closed bool - backend *SFTP -} - -func (sb *sftpBlob) Finalize(t backend.Type, name string) error { - if sb.closed { - return errors.New("Close() called on closed file") - } - sb.closed = true - - err := sb.f.Close() - if err != nil { - return fmt.Errorf("sftp: file.Close: %v", err) - } - - // rename file - err = sb.backend.renameFile(sb.tempname, t, name) - if err != nil { - return fmt.Errorf("sftp: renameFile: %v", err) - } - - return nil -} - -func (sb *sftpBlob) Write(p []byte) (int, error) { - n, err := sb.f.Write(p) - sb.size += uint(n) - return n, err -} - -func (sb *sftpBlob) Size() uint { - return sb.size -} - -// Create creates a new Blob. The data is available only after Finalize() -// has been called on the returned Blob. -func (r *SFTP) Create() (backend.Blob, error) { - // TODO: make sure that tempfile is removed upon error - - // create tempfile in backend - filename, file, err := r.tempFile() - if err != nil { - return nil, errors.Annotate(err, "create tempfile") - } - - blob := sftpBlob{ - f: file, - tempname: filename, - backend: r, - } - - return &blob, nil -} - +// Join joins the given paths and cleans them afterwards. func Join(parts ...string) string { return filepath.Clean(strings.Join(parts, "/")) } @@ -344,38 +281,80 @@ func (r *SFTP) dirname(t backend.Type, name string) string { return Join(r.p, n) } -// Get returns a reader that yields the content stored under the given -// name. The reader should be closed after draining it. -func (r *SFTP) Get(t backend.Type, name string) (io.ReadCloser, error) { - // try to open file - file, err := r.c.Open(r.filename(t, name)) - if err != nil { - return nil, err +// Load returns the data stored in the backend for h at the given offset +// and saves it in p. Load has the same semantics as io.ReaderAt. +func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) { + if err := h.Valid(); err != nil { + return 0, err } - return file, nil -} - -// GetReader returns an io.ReadCloser for the Blob with the given name of -// type t at offset and length. If length is 0, the reader reads until EOF. -func (r *SFTP) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) { - f, err := r.c.Open(r.filename(t, name)) + f, err := r.c.Open(r.filename(h.Type, h.Name)) if err != nil { - return nil, err + return 0, err } - if offset > 0 { - _, err = f.Seek(int64(offset), 0) + defer func() { + e := f.Close() + if err == nil && e != nil { + err = e + } + }() + + if off > 0 { + _, err = f.Seek(off, 0) if err != nil { - return nil, err + return 0, err } } - if length == 0 { - return f, nil + return io.ReadFull(f, p) +} + +// Save stores data in the backend at the handle. +func (r *SFTP) Save(h backend.Handle, p []byte) (err error) { + if err := h.Valid(); err != nil { + return err } - return backend.LimitReadCloser(f, int64(length)), nil + filename, tmpfile, err := r.tempFile() + debug.Log("sftp.Save", "save %v (%d bytes) to %v", h, len(p), filename) + + n, err := tmpfile.Write(p) + if err != nil { + return err + } + + if n != len(p) { + return errors.New("not all bytes writen") + } + + err = tmpfile.Close() + if err != nil { + return err + } + + err = r.renameFile(filename, h.Type, h.Name) + debug.Log("sftp.Save", "save %v: rename %v: %v", + h, filepath.Base(filename), err) + if err != nil { + return fmt.Errorf("sftp: renameFile: %v", err) + } + + return nil +} + +// Stat returns information about a blob. +func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) { + if err := h.Valid(); err != nil { + return backend.BlobInfo{}, err + } + + fi, err := r.c.Lstat(r.filename(h.Type, h.Name)) + if err != nil { + return backend.BlobInfo{}, err + } + + return backend.BlobInfo{Size: fi.Size()}, nil } // Test returns true if a blob of the given type and name exists in the backend. @@ -420,8 +399,6 @@ func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string { dirs = append(dirs, d.Name()) } - sort.Strings(dirs) - // read files for _, dir := range dirs { entries, err := r.c.ReadDir(Join(basedir, dir)) @@ -434,8 +411,6 @@ func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string { items = append(items, entry.Name()) } - sort.Strings(items) - for _, file := range items { select { case ch <- file: @@ -455,8 +430,6 @@ func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string { items = append(items, entry.Name()) } - sort.Strings(items) - for _, file := range items { select { case ch <- file: @@ -472,16 +445,17 @@ func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string { } // Close closes the sftp connection and terminates the underlying command. -func (s *SFTP) Close() error { - if s == nil { +func (r *SFTP) Close() error { + if r == nil { return nil } - s.c.Close() + err := r.c.Close() + debug.Log("sftp.Close", "Close returned error %v", err) - if err := s.cmd.Process.Kill(); err != nil { + if err := r.cmd.Process.Kill(); err != nil { return err } - return s.cmd.Wait() + return r.cmd.Wait() } diff --git a/backend/sftp/sftp_backend_test.go b/backend/sftp/sftp_backend_test.go new file mode 100644 index 000000000..bfb8e4e75 --- /dev/null +++ b/backend/sftp/sftp_backend_test.go @@ -0,0 +1,80 @@ +package sftp_test + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/restic/restic/backend" + "github.com/restic/restic/backend/sftp" + "github.com/restic/restic/backend/test" + + . "github.com/restic/restic/test" +) + +var tempBackendDir string + +//go:generate go run ../test/generate_backend_tests.go + +func createTempdir() error { + if tempBackendDir != "" { + return nil + } + + tempdir, err := ioutil.TempDir("", "restic-local-test-") + if err != nil { + return err + } + + fmt.Printf("created new test backend at %v\n", tempdir) + tempBackendDir = tempdir + return nil +} + +func init() { + sftpserver := "" + + for _, dir := range strings.Split(TestSFTPPath, ":") { + testpath := filepath.Join(dir, "sftp-server") + _, err := os.Stat(testpath) + if !os.IsNotExist(err) { + sftpserver = testpath + break + } + } + + if sftpserver == "" { + SkipMessage = "sftp server binary not found, skipping tests" + return + } + + test.CreateFn = func() (backend.Backend, error) { + err := createTempdir() + if err != nil { + return nil, err + } + + return sftp.Create(tempBackendDir, sftpserver) + } + + test.OpenFn = func() (backend.Backend, error) { + err := createTempdir() + if err != nil { + return nil, err + } + return sftp.Open(tempBackendDir, sftpserver) + } + + test.CleanupFn = func() error { + if tempBackendDir == "" { + return nil + } + + fmt.Printf("removing test backend at %v\n", tempBackendDir) + err := os.RemoveAll(tempBackendDir) + tempBackendDir = "" + return err + } +} diff --git a/backend/sftp_test.go b/backend/sftp_test.go deleted file mode 100644 index b678e8ea9..000000000 --- a/backend/sftp_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package backend_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/restic/restic/backend/sftp" - . "github.com/restic/restic/test" -) - -func setupSFTPBackend(t *testing.T) *sftp.SFTP { - sftpserver := "" - - for _, dir := range strings.Split(TestSFTPPath, ":") { - testpath := filepath.Join(dir, "sftp-server") - fd, err := os.Open(testpath) - fd.Close() - if !os.IsNotExist(err) { - sftpserver = testpath - break - } - } - - if sftpserver == "" { - return nil - } - - tempdir, err := ioutil.TempDir("", "restic-test-") - OK(t, err) - - b, err := sftp.Create(tempdir, sftpserver) - OK(t, err) - - t.Logf("created sftp backend locally at %s", tempdir) - - return b -} - -func teardownSFTPBackend(t *testing.T, b *sftp.SFTP) { - if !TestCleanup { - t.Logf("leaving backend at %s\n", b.Location()) - return - } - - err := os.RemoveAll(b.Location()) - OK(t, err) -} - -func TestSFTPBackend(t *testing.T) { - if !RunIntegrationTest { - t.Skip("integration tests disabled") - } - - s := setupSFTPBackend(t) - if s == nil { - t.Skip("unable to find sftp-server binary") - return - } - defer teardownSFTPBackend(t, s) - - testBackend(s, t) -} diff --git a/backend/test/backend_test.go b/backend/test/backend_test.go new file mode 100644 index 000000000..c1bee84c7 --- /dev/null +++ b/backend/test/backend_test.go @@ -0,0 +1,87 @@ +// DO NOT EDIT, AUTOMATICALLY GENERATED +package test_test + +import ( + "testing" + + "github.com/restic/restic/backend/test" +) + +var SkipMessage string + +func TestTestBackendCreate(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCreate(t) +} + +func TestTestBackendOpen(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestOpen(t) +} + +func TestTestBackendCreateWithConfig(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCreateWithConfig(t) +} + +func TestTestBackendLocation(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLocation(t) +} + +func TestTestBackendConfig(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestConfig(t) +} + +func TestTestBackendLoad(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLoad(t) +} + +func TestTestBackendSave(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestSave(t) +} + +func TestTestBackendSaveFilenames(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestSaveFilenames(t) +} + +func TestTestBackendBackend(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestBackend(t) +} + +func TestTestBackendDelete(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestDelete(t) +} + +func TestTestBackendCleanup(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestCleanup(t) +} diff --git a/backend/test/generate_backend_tests.go b/backend/test/generate_backend_tests.go new file mode 100644 index 000000000..0631f72c8 --- /dev/null +++ b/backend/test/generate_backend_tests.go @@ -0,0 +1,140 @@ +// +build ignore + +package main + +import ( + "bufio" + "flag" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "text/template" + "unicode" + "unicode/utf8" +) + +var data struct { + Package string + PackagePrefix string + Funcs []string +} + +var testTemplate = ` +// DO NOT EDIT, AUTOMATICALLY GENERATED +package {{ .Package }} + +import ( + "testing" + + "github.com/restic/restic/backend/test" +) + +var SkipMessage string + +{{ $prefix := .PackagePrefix }} +{{ range $f := .Funcs }} +func Test{{ $prefix }}{{ $f }}(t *testing.T){ + if SkipMessage != "" { t.Skip(SkipMessage) } + test.Test{{ $f }}(t) +} + +{{ end }} +` + +var testFile = flag.String("testfile", "../test/tests.go", "file to search test functions in") +var outputFile = flag.String("output", "backend_test.go", "output file to write generated code to") +var packageName = flag.String("package", "", "the package name to use") +var prefix = flag.String("prefix", "", "test function prefix") +var quiet = flag.Bool("quiet", false, "be quiet") + +func errx(err error) { + if err == nil { + return + } + + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) +} + +var funcRegex = regexp.MustCompile(`^func\s+Test(.+)\s*\(`) + +func findTestFunctions() (funcs []string) { + f, err := os.Open(*testFile) + errx(err) + + sc := bufio.NewScanner(f) + for sc.Scan() { + match := funcRegex.FindStringSubmatch(sc.Text()) + if len(match) > 0 { + funcs = append(funcs, match[1]) + } + } + + if err := sc.Err(); err != nil { + log.Fatalf("Error scanning file: %v", err) + } + + errx(f.Close()) + return funcs +} + +func generateOutput(wr io.Writer, data interface{}) { + t := template.Must(template.New("backendtest").Parse(testTemplate)) + + cmd := exec.Command("gofmt") + cmd.Stdout = wr + in, err := cmd.StdinPipe() + errx(err) + errx(cmd.Start()) + errx(t.Execute(in, data)) + errx(in.Close()) + errx(cmd.Wait()) +} + +func packageTestFunctionPrefix(pkg string) string { + if pkg == "" { + return "" + } + + r, n := utf8.DecodeRuneInString(pkg) + return string(unicode.ToUpper(r)) + pkg[n:] +} + +func init() { + flag.Parse() +} + +func main() { + dir, err := os.Getwd() + if err != nil { + fmt.Fprintf(os.Stderr, "Getwd() %v\n", err) + os.Exit(1) + } + + pkg := *packageName + if pkg == "" { + pkg = filepath.Base(dir) + } + + f, err := os.Create(*outputFile) + errx(err) + + data.Package = pkg + "_test" + if *prefix != "" { + data.PackagePrefix = *prefix + } else { + data.PackagePrefix = packageTestFunctionPrefix(pkg) + "Backend" + } + data.Funcs = findTestFunctions() + generateOutput(f, data) + + errx(f.Close()) + + if !*quiet { + fmt.Printf("wrote backend tests for package %v to %v\n", data.Package, *outputFile) + } +} diff --git a/backend/test/tests.go b/backend/test/tests.go new file mode 100644 index 000000000..e71412381 --- /dev/null +++ b/backend/test/tests.go @@ -0,0 +1,514 @@ +package test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "reflect" + "sort" + "testing" + + "github.com/restic/restic/backend" + . "github.com/restic/restic/test" +) + +// CreateFn is a function that creates a temporary repository for the tests. +var CreateFn func() (backend.Backend, error) + +// OpenFn is a function that opens a previously created temporary repository. +var OpenFn func() (backend.Backend, error) + +// CleanupFn removes temporary files and directories created during the tests. +var CleanupFn func() error + +var but backend.Backend // backendUnderTest +var butInitialized bool + +func open(t testing.TB) backend.Backend { + if OpenFn == nil { + t.Fatal("OpenFn not set") + } + + if CreateFn == nil { + t.Fatalf("CreateFn not set") + } + + if !butInitialized { + be, err := CreateFn() + if err != nil { + t.Fatalf("Create returned unexpected error: %v", err) + } + + but = be + butInitialized = true + } + + if but == nil { + var err error + but, err = OpenFn() + if err != nil { + t.Fatalf("Open returned unexpected error: %v", err) + } + } + + return but +} + +func close(t testing.TB) { + if but == nil { + t.Fatalf("trying to close non-existing backend") + } + + err := but.Close() + if err != nil { + t.Fatalf("Close returned unexpected error: %v", err) + } + + but = nil +} + +// TestCreate creates a backend. +func TestCreate(t testing.TB) { + if CreateFn == nil { + t.Fatalf("CreateFn not set!") + } + + be, err := CreateFn() + if err != nil { + fmt.Printf("foo\n") + t.Fatalf("Create returned error: %v", err) + } + + butInitialized = true + + err = be.Close() + if err != nil { + t.Fatalf("Close returned error: %v", err) + } +} + +// TestOpen opens a previously created backend. +func TestOpen(t testing.TB) { + if OpenFn == nil { + t.Fatalf("OpenFn not set!") + } + + be, err := OpenFn() + if err != nil { + t.Fatalf("Open returned error: %v", err) + } + + err = be.Close() + if err != nil { + t.Fatalf("Close returned error: %v", err) + } +} + +// TestCreateWithConfig tests that creating a backend in a location which already +// has a config file fails. +func TestCreateWithConfig(t testing.TB) { + if CreateFn == nil { + t.Fatalf("CreateFn not set") + } + + b := open(t) + defer close(t) + + // save a config + store(t, b, backend.Config, []byte("test config")) + + // now create the backend again, this must fail + _, err := CreateFn() + if err == nil { + t.Fatalf("expected error not found for creating a backend with an existing config file") + } + + // remove config + err = b.Remove(backend.Config, "") + if err != nil { + t.Fatalf("unexpected error removing config: %v", err) + } +} + +// TestLocation tests that a location string is returned. +func TestLocation(t testing.TB) { + b := open(t) + defer close(t) + + l := b.Location() + if l == "" { + t.Fatalf("invalid location string %q", l) + } +} + +// TestConfig saves and loads a config from the backend. +func TestConfig(t testing.TB) { + b := open(t) + defer close(t) + + var testString = "Config" + + // create config and read it back + _, err := backend.LoadAll(b, backend.Handle{Type: backend.Config}, nil) + if err == nil { + t.Fatalf("did not get expected error for non-existing config") + } + + err = b.Save(backend.Handle{Type: backend.Config}, []byte(testString)) + if err != nil { + t.Fatalf("Save() error: %v", err) + } + + // try accessing the config with different names, should all return the + // same config + for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} { + buf, err := backend.LoadAll(b, backend.Handle{Type: backend.Config}, nil) + if err != nil { + t.Fatalf("unable to read config with name %q: %v", name, err) + } + + if string(buf) != testString { + t.Fatalf("wrong data returned, want %q, got %q", testString, string(buf)) + } + } +} + +// TestLoad tests the backend's Load function. +func TestLoad(t testing.TB) { + b := open(t) + defer close(t) + + _, err := b.Load(backend.Handle{}, nil, 0) + if err == nil { + t.Fatalf("Load() did not return an error for invalid handle") + } + + _, err = b.Load(backend.Handle{Type: backend.Data, Name: "foobar"}, nil, 0) + if err == nil { + t.Fatalf("Load() did not return an error for non-existing blob") + } + + length := rand.Intn(1<<24) + 2000 + + data := Random(23, length) + id := backend.Hash(data) + + handle := backend.Handle{Type: backend.Data, Name: id.String()} + err = b.Save(handle, data) + if err != nil { + t.Fatalf("Save() error: %v", err) + } + + for i := 0; i < 50; i++ { + l := rand.Intn(length + 2000) + o := rand.Intn(length + 2000) + + d := data + if o < len(d) { + d = d[o:] + } else { + o = len(d) + d = d[:0] + } + + if l > 0 && l < len(d) { + d = d[:l] + } + + buf := make([]byte, l) + n, err := b.Load(handle, buf, int64(o)) + + // if we requested data beyond the end of the file, ignore + // ErrUnexpectedEOF error + if l > len(d) && err == io.ErrUnexpectedEOF { + err = nil + buf = buf[:len(d)] + } + + if err != nil { + t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), int64(o), err) + continue + } + + if n != len(buf) { + t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d", + len(buf), int64(o), len(buf), n) + continue + } + + buf = buf[:n] + if !bytes.Equal(buf, d) { + t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), int64(o)) + continue + } + } + + OK(t, b.Remove(backend.Data, id.String())) +} + +// TestSave tests saving data in the backend. +func TestSave(t testing.TB) { + b := open(t) + defer close(t) + var id backend.ID + + for i := 0; i < 10; i++ { + length := rand.Intn(1<<23) + 200000 + data := Random(23, length) + // use the first 32 byte as the ID + copy(id[:], data) + + h := backend.Handle{ + Type: backend.Data, + Name: fmt.Sprintf("%s-%d", id, i), + } + err := b.Save(h, data) + OK(t, err) + + buf, err := backend.LoadAll(b, h, nil) + OK(t, err) + if len(buf) != len(data) { + t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf)) + } + + if !bytes.Equal(buf, data) { + t.Fatalf("data not equal") + } + + fi, err := b.Stat(h) + OK(t, err) + + if fi.Size != int64(len(data)) { + t.Fatalf("Stat() returned different size, want %q, got %d", len(data), fi.Size) + } + + err = b.Remove(h.Type, h.Name) + if err != nil { + t.Fatalf("error removing item: %v", err) + } + } +} + +var filenameTests = []struct { + name string + data string +}{ + {"1dfc6bc0f06cb255889e9ea7860a5753e8eb9665c9a96627971171b444e3113e", "x"}, + {"foobar", "foobar"}, + { + "1dfc6bc0f06cb255889e9ea7860a5753e8eb9665c9a96627971171b444e3113e4bf8f2d9144cc5420a80f04a4880ad6155fc58903a4fb6457c476c43541dcaa6-5", + "foobar content of data blob", + }, +} + +// TestSaveFilenames tests saving data with various file names in the backend. +func TestSaveFilenames(t testing.TB) { + b := open(t) + defer close(t) + + for i, test := range filenameTests { + h := backend.Handle{Name: test.name, Type: backend.Data} + err := b.Save(h, []byte(test.data)) + if err != nil { + t.Errorf("test %d failed: Save() returned %v", i, err) + continue + } + + buf, err := backend.LoadAll(b, h, nil) + if err != nil { + t.Errorf("test %d failed: Load() returned %v", i, err) + continue + } + + if !bytes.Equal(buf, []byte(test.data)) { + t.Errorf("test %d: returned wrong bytes", i) + } + + err = b.Remove(h.Type, h.Name) + if err != nil { + t.Errorf("test %d failed: Remove() returned %v", i, err) + continue + } + } +} + +var testStrings = []struct { + id string + data string +}{ + {"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"}, + {"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"}, + {"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"}, + {"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"}, +} + +func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) { + id := backend.Hash(data) + err := b.Save(backend.Handle{Name: id.String(), Type: tpe}, data) + OK(t, err) +} + +func read(t testing.TB, rd io.Reader, expectedData []byte) { + buf, err := ioutil.ReadAll(rd) + OK(t, err) + if expectedData != nil { + Equals(t, expectedData, buf) + } +} + +// TestBackend tests all functions of the backend. +func TestBackend(t testing.TB) { + b := open(t) + defer close(t) + + for _, tpe := range []backend.Type{ + backend.Data, backend.Key, backend.Lock, + backend.Snapshot, backend.Index, + } { + // detect non-existing files + for _, test := range testStrings { + id, err := backend.ParseID(test.id) + OK(t, err) + + // test if blob is already in repository + ret, err := b.Test(tpe, id.String()) + OK(t, err) + Assert(t, !ret, "blob was found to exist before creating") + + // try to stat a not existing blob + h := backend.Handle{Type: tpe, Name: id.String()} + _, err = b.Stat(h) + Assert(t, err != nil, "blob data could be extracted before creation") + + // try to read not existing blob + _, err = b.Load(h, nil, 0) + Assert(t, err != nil, "blob reader could be obtained before creation") + + // try to get string out, should fail + ret, err = b.Test(tpe, id.String()) + OK(t, err) + Assert(t, !ret, "id %q was found (but should not have)", test.id) + } + + // add files + for _, test := range testStrings { + store(t, b, tpe, []byte(test.data)) + + // test Load() + h := backend.Handle{Type: tpe, Name: test.id} + buf, err := backend.LoadAll(b, h, nil) + OK(t, err) + Equals(t, test.data, string(buf)) + + // try to read it out with an offset and a length + start := 1 + end := len(test.data) - 2 + length := end - start + + buf2 := make([]byte, length) + n, err := b.Load(h, buf2, int64(start)) + OK(t, err) + Equals(t, length, n) + Equals(t, test.data[start:end], string(buf2)) + } + + // test adding the first file again + test := testStrings[0] + + // create blob + err := b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data)) + Assert(t, err != nil, "expected error, got %v", err) + + // remove and recreate + err = b.Remove(tpe, test.id) + OK(t, err) + + // test that the blob is gone + ok, err := b.Test(tpe, test.id) + OK(t, err) + Assert(t, ok == false, "removed blob still present") + + // create blob + err = b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data)) + OK(t, err) + + // list items + IDs := backend.IDs{} + + for _, test := range testStrings { + id, err := backend.ParseID(test.id) + OK(t, err) + IDs = append(IDs, id) + } + + list := backend.IDs{} + + for s := range b.List(tpe, nil) { + list = append(list, ParseID(s)) + } + + if len(IDs) != len(list) { + t.Fatalf("wrong number of IDs returned: want %d, got %d", len(IDs), len(list)) + } + + sort.Sort(IDs) + sort.Sort(list) + + if !reflect.DeepEqual(IDs, list) { + t.Fatalf("lists aren't equal, want:\n %v\n got:\n%v\n", IDs, list) + } + + // remove content if requested + if TestCleanupTempDirs { + for _, test := range testStrings { + id, err := backend.ParseID(test.id) + OK(t, err) + + found, err := b.Test(tpe, id.String()) + OK(t, err) + + OK(t, b.Remove(tpe, id.String())) + + found, err = b.Test(tpe, id.String()) + OK(t, err) + Assert(t, !found, fmt.Sprintf("id %q not found after removal", id)) + } + } + } +} + +// TestDelete tests the Delete function. +func TestDelete(t testing.TB) { + b := open(t) + defer close(t) + + be, ok := b.(backend.Deleter) + if !ok { + return + } + + err := be.Delete() + if err != nil { + t.Fatalf("error deleting backend: %v", err) + } +} + +// TestCleanup runs the cleanup function after all tests are run. +func TestCleanup(t testing.TB) { + if CleanupFn == nil { + t.Log("CleanupFn function not set") + return + } + + if !TestCleanupTempDirs { + t.Logf("not cleaning up backend") + return + } + + err := CleanupFn() + if err != nil { + t.Fatalf("Cleanup returned error: %v", err) + } +} diff --git a/backend/test/tests_test.go b/backend/test/tests_test.go new file mode 100644 index 000000000..22e769745 --- /dev/null +++ b/backend/test/tests_test.go @@ -0,0 +1,38 @@ +package test_test + +import ( + "errors" + + "github.com/restic/restic/backend" + "github.com/restic/restic/backend/mem" + "github.com/restic/restic/backend/test" +) + +var be backend.Backend + +//go:generate go run ../test/generate_backend_tests.go + +func init() { + test.CreateFn = func() (backend.Backend, error) { + if be != nil { + return nil, errors.New("temporary memory backend dir already exists") + } + + be = mem.New() + + return be, nil + } + + test.OpenFn = func() (backend.Backend, error) { + if be == nil { + return nil, errors.New("repository not initialized") + } + + return be, nil + } + + test.CleanupFn = func() error { + be = nil + return nil + } +} diff --git a/backend/utils.go b/backend/utils.go new file mode 100644 index 000000000..c40d35e12 --- /dev/null +++ b/backend/utils.go @@ -0,0 +1,18 @@ +package backend + +// LoadAll reads all data stored in the backend for the handle. The buffer buf +// is resized to accomodate all data in the blob. +func LoadAll(be Backend, h Handle, buf []byte) ([]byte, error) { + fi, err := be.Stat(h) + if err != nil { + return nil, err + } + + if fi.Size > int64(len(buf)) { + buf = make([]byte, int(fi.Size)) + } + + n, err := be.Load(h, buf, 0) + buf = buf[:n] + return buf, err +} diff --git a/backend/utils_test.go b/backend/utils_test.go new file mode 100644 index 000000000..98a0106ef --- /dev/null +++ b/backend/utils_test.go @@ -0,0 +1,39 @@ +package backend_test + +import ( + "bytes" + "math/rand" + "testing" + + "github.com/restic/restic/backend" + "github.com/restic/restic/backend/mem" + . "github.com/restic/restic/test" +) + +const KiB = 1 << 10 +const MiB = 1 << 20 + +func TestLoadAll(t *testing.T) { + b := mem.New() + + for i := 0; i < 20; i++ { + data := Random(23+i, rand.Intn(MiB)+500*KiB) + + id := backend.Hash(data) + err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data) + OK(t, err) + + buf, err := backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, nil) + OK(t, err) + + if len(buf) != len(data) { + t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf)) + continue + } + + if !bytes.Equal(buf, data) { + t.Errorf("wrong data returned") + continue + } + } +} diff --git a/backend/writer.go b/backend/writer.go deleted file mode 100644 index 5764b872c..000000000 --- a/backend/writer.go +++ /dev/null @@ -1,38 +0,0 @@ -package backend - -import ( - "hash" - "io" -) - -// HashingWriter wraps an io.Writer to hashes all data that is written to it. -type HashingWriter struct { - w io.Writer - h hash.Hash - size int -} - -// NewHashAppendWriter wraps the writer w and feeds all data written to the hash h. -func NewHashingWriter(w io.Writer, h hash.Hash) *HashingWriter { - return &HashingWriter{ - h: h, - w: io.MultiWriter(w, h), - } -} - -// Write wraps the write method of the underlying writer and also hashes all data. -func (h *HashingWriter) Write(p []byte) (int, error) { - n, err := h.w.Write(p) - h.size += n - return n, err -} - -// Sum returns the hash of all data written so far. -func (h *HashingWriter) Sum(d []byte) []byte { - return h.h.Sum(d) -} - -// Size returns the number of bytes written to the underlying writer. -func (h *HashingWriter) Size() int { - return h.size -} diff --git a/backend/writer_test.go b/backend/writer_test.go deleted file mode 100644 index 9fda2c06f..000000000 --- a/backend/writer_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package backend_test - -import ( - "bytes" - "crypto/rand" - "crypto/sha256" - "io" - "io/ioutil" - "testing" - - "github.com/restic/restic/backend" - . "github.com/restic/restic/test" -) - -func TestHashingWriter(t *testing.T) { - tests := []int{5, 23, 2<<18 + 23, 1 << 20} - - for _, size := range tests { - data := make([]byte, size) - _, err := io.ReadFull(rand.Reader, data) - if err != nil { - t.Fatalf("ReadFull: %v", err) - } - - expectedHash := sha256.Sum256(data) - - wr := backend.NewHashingWriter(ioutil.Discard, sha256.New()) - - n, err := io.Copy(wr, bytes.NewReader(data)) - OK(t, err) - - Assert(t, n == int64(size), - "HashAppendWriter: invalid number of bytes written: got %d, expected %d", - n, size) - - Assert(t, wr.Size() == size, - "HashAppendWriter: invalid number of bytes returned: got %d, expected %d", - wr.Size, size) - - resultingHash := wr.Sum(nil) - Assert(t, bytes.Equal(expectedHash[:], resultingHash), - "HashAppendWriter: hashes do not match: expected %02x, got %02x", - expectedHash, resultingHash) - } -} diff --git a/checker/checker.go b/checker/checker.go index ecd2ed626..97fe8df58 100644 --- a/checker/checker.go +++ b/checker/checker.go @@ -4,7 +4,6 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" "sync" "github.com/restic/restic" @@ -647,17 +646,8 @@ func (c *Checker) CountPacks() uint64 { // checkPack reads a pack and checks the integrity of all blobs. func checkPack(r *repository.Repository, id backend.ID) error { debug.Log("Checker.checkPack", "checking pack %v", id.Str()) - rd, err := r.Backend().Get(backend.Data, id.String()) - if err != nil { - return err - } - - buf, err := ioutil.ReadAll(rd) - if err != nil { - return err - } - - err = rd.Close() + h := backend.Handle{Type: backend.Data, Name: id.String()} + buf, err := backend.LoadAll(r.Backend(), h, nil) if err != nil { return err } diff --git a/checker/checker_test.go b/checker/checker_test.go index 99e89a22a..10b37e219 100644 --- a/checker/checker_test.go +++ b/checker/checker_test.go @@ -1,7 +1,7 @@ package checker_test import ( - "io" + "fmt" "math/rand" "path/filepath" "sort" @@ -9,6 +9,7 @@ import ( "github.com/restic/restic" "github.com/restic/restic/backend" + "github.com/restic/restic/backend/mem" "github.com/restic/restic/checker" "github.com/restic/restic/repository" . "github.com/restic/restic/test" @@ -212,37 +213,22 @@ func TestDuplicatePacksInIndex(t *testing.T) { // errorBackend randomly modifies data after reading. type errorBackend struct { backend.Backend + ProduceErrors bool } -func (b errorBackend) Get(t backend.Type, name string) (io.ReadCloser, error) { - rd, err := b.Backend.Get(t, name) - if err != nil { - return rd, err +func (b errorBackend) Load(h backend.Handle, p []byte, off int64) (int, error) { + fmt.Printf("load %v\n", h) + n, err := b.Backend.Load(h, p, off) + + if b.ProduceErrors { + induceError(p) } - - if t != backend.Data { - return rd, err - } - - return backend.ReadCloser(faultReader{rd}), nil -} - -func (b errorBackend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) { - rd, err := b.Backend.GetReader(t, name, offset, length) - if err != nil { - return rd, err - } - - if t != backend.Data { - return rd, err - } - - return backend.ReadCloser(faultReader{rd}), nil + return n, err } // induceError flips a bit in the slice. func induceError(data []byte) { - if rand.Float32() < 0.8 { + if rand.Float32() < 0.2 { return } @@ -250,22 +236,8 @@ func induceError(data []byte) { data[pos] ^= 1 } -// faultReader wraps a reader and randomly modifies data on read. -type faultReader struct { - rd io.Reader -} - -func (f faultReader) Read(p []byte) (int, error) { - n, err := f.rd.Read(p) - if n > 0 { - induceError(p) - } - - return n, err -} - func TestCheckerModifiedData(t *testing.T) { - be := backend.NewMemoryBackend() + be := mem.New() repo := repository.New(be) OK(t, repo.Init(TestPassword)) @@ -275,7 +247,8 @@ func TestCheckerModifiedData(t *testing.T) { OK(t, err) t.Logf("archived as %v", id.Str()) - checkRepo := repository.New(errorBackend{be}) + beError := &errorBackend{Backend: be} + checkRepo := repository.New(beError) OK(t, checkRepo.SearchKey(TestPassword)) chkr := checker.New(checkRepo) @@ -289,6 +262,7 @@ func TestCheckerModifiedData(t *testing.T) { t.Errorf("expected no hints, got %v: %v", len(hints), hints) } + beError.ProduceErrors = true errFound := false for _, err := range checkPacks(chkr) { t.Logf("pack error: %v", err) diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index c8d7bffd8..1a7423d18 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -4,7 +4,6 @@ import ( "encoding/json" "errors" "fmt" - "io" "os" "github.com/restic/restic" @@ -101,20 +100,19 @@ func (cmd CmdCat) Execute(args []string) error { return nil case "key": - rd, err := repo.Backend().Get(backend.Key, id.String()) + h := backend.Handle{Type: backend.Key, Name: id.String()} + buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err } - dec := json.NewDecoder(rd) - - var key repository.Key - err = dec.Decode(&key) + key := &repository.Key{} + err = json.Unmarshal(buf, key) if err != nil { return err } - buf, err := json.MarshalIndent(&key, "", " ") + buf, err = json.MarshalIndent(&key, "", " ") if err != nil { return err } @@ -153,12 +151,13 @@ func (cmd CmdCat) Execute(args []string) error { switch tpe { case "pack": - rd, err := repo.Backend().Get(backend.Data, id.String()) + h := backend.Handle{Type: backend.Data, Name: id.String()} + buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err } - _, err = io.Copy(os.Stdout, rd) + _, err = os.Stdout.Write(buf) return err case "blob": diff --git a/cmd/restic/cmd_rebuild_index.go b/cmd/restic/cmd_rebuild_index.go index 7a550354b..ee73992a6 100644 --- a/cmd/restic/cmd_rebuild_index.go +++ b/cmd/restic/cmd_rebuild_index.go @@ -2,8 +2,6 @@ package main import ( "bytes" - "io" - "io/ioutil" "github.com/restic/restic/backend" "github.com/restic/restic/debug" @@ -126,6 +124,7 @@ func (cmd CmdRebuildIndex) RebuildIndex() error { cmd.global.Printf("checking for additional packs\n") newPacks := 0 + var buf []byte for packID := range cmd.repo.List(backend.Data, done) { if packsDone.Has(packID) { continue @@ -134,27 +133,12 @@ func (cmd CmdRebuildIndex) RebuildIndex() error { debug.Log("RebuildIndex.RebuildIndex", "pack %v not indexed", packID.Str()) newPacks++ - rd, err := cmd.repo.Backend().GetReader(backend.Data, packID.String(), 0, 0) - if err != nil { - debug.Log("RebuildIndex.RebuildIndex", "GetReader returned error: %v", err) - return err - } + var err error - var readSeeker io.ReadSeeker - if r, ok := rd.(io.ReadSeeker); ok { - debug.Log("RebuildIndex.RebuildIndex", "reader is seekable") - readSeeker = r - } else { - debug.Log("RebuildIndex.RebuildIndex", "reader is not seekable, loading contents to ram") - buf, err := ioutil.ReadAll(rd) - if err != nil { - return err - } + h := backend.Handle{Type: backend.Data, Name: packID.String()} + buf, err = backend.LoadAll(cmd.repo.Backend(), h, buf) - readSeeker = bytes.NewReader(buf) - } - - up, err := pack.NewUnpacker(cmd.repo.Key(), readSeeker) + up, err := pack.NewUnpacker(cmd.repo.Key(), bytes.NewReader(buf)) if err != nil { debug.Log("RebuildIndex.RebuildIndex", "error while unpacking pack %v", packID.Str()) return err @@ -171,9 +155,6 @@ func (cmd CmdRebuildIndex) RebuildIndex() error { }) } - err = rd.Close() - debug.Log("RebuildIndex.RebuildIndex", "error closing reader for pack %v: %v", packID.Str(), err) - if repository.IndexFull(combinedIndex) { combinedIndex, err = cmd.storeIndex(combinedIndex) if err != nil { diff --git a/cmd/restic/integration_fuse_test.go b/cmd/restic/integration_fuse_test.go index 807b335fc..1e696706b 100644 --- a/cmd/restic/integration_fuse_test.go +++ b/cmd/restic/integration_fuse_test.go @@ -54,9 +54,13 @@ func waitForMount(dir string) error { } func cmdMount(t testing.TB, global GlobalOptions, dir string, ready, done chan struct{}) { + defer func() { + ready <- struct{}{} + }() + cmd := &CmdMount{global: &global, ready: ready, done: done} OK(t, cmd.Execute([]string{dir})) - if TestCleanup { + if TestCleanupTempDirs { RemoveAll(t, dir) } } @@ -104,7 +108,7 @@ func TestMount(t *testing.T) { // We remove the mountpoint now to check that cmdMount creates it RemoveAll(t, mountpoint) - ready := make(chan struct{}, 1) + ready := make(chan struct{}, 2) done := make(chan struct{}) go cmdMount(t, global, mountpoint, ready, done) <-ready diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index 734d974c1..b3bada889 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -178,7 +178,7 @@ func configureRestic(t testing.TB, cache, repo string) GlobalOptions { } func cleanupTempdir(t testing.TB, tempdir string) { - if !TestCleanup { + if !TestCleanupTempDirs { t.Logf("leaving temporary directory %v used for test", tempdir) return } @@ -209,7 +209,7 @@ func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions)) f(&env, configureRestic(t, env.cache, env.repo)) - if !TestCleanup { + if !TestCleanupTempDirs { t.Logf("leaving temporary directory %v used for test", tempdir) return } diff --git a/crypto/crypto_test.go b/crypto/crypto_test.go index a821e105f..311eb1835 100644 --- a/crypto/crypto_test.go +++ b/crypto/crypto_test.go @@ -23,10 +23,7 @@ func TestEncryptDecrypt(t *testing.T) { } for _, size := range tests { - data := make([]byte, size) - _, err := io.ReadFull(RandomReader(42, size), data) - OK(t, err) - + data := Random(42, size) buf := make([]byte, size+crypto.Extension) ciphertext, err := crypto.Encrypt(k, buf, data) @@ -140,7 +137,7 @@ func BenchmarkEncryptWriter(b *testing.B) { b.SetBytes(int64(size)) for i := 0; i < b.N; i++ { - rd := RandomReader(23, size) + rd := RandomLimitReader(23, size) wr := crypto.EncryptTo(k, ioutil.Discard) n, err := io.Copy(wr, rd) OK(b, err) @@ -200,7 +197,7 @@ func BenchmarkEncryptDecryptReader(b *testing.B) { buf := bytes.NewBuffer(nil) for i := 0; i < b.N; i++ { - rd := RandomReader(23, size) + rd := RandomLimitReader(23, size) buf.Reset() wr := crypto.EncryptTo(k, buf) _, err := io.Copy(wr, rd) @@ -245,14 +242,12 @@ func TestEncryptStreamWriter(t *testing.T) { } for _, size := range tests { - data := make([]byte, size) - _, err := io.ReadFull(RandomReader(42, size), data) - OK(t, err) + data := Random(42, size) ciphertext := bytes.NewBuffer(nil) wr := crypto.EncryptTo(k, ciphertext) - _, err = io.Copy(wr, bytes.NewReader(data)) + _, err := io.Copy(wr, bytes.NewReader(data)) OK(t, err) OK(t, wr.Close()) @@ -279,10 +274,8 @@ func TestDecryptStreamReader(t *testing.T) { } for _, size := range tests { - data := make([]byte, size) - _, err := io.ReadFull(RandomReader(42, size), data) - OK(t, err) - + data := Random(42, size) + var err error ciphertext := make([]byte, size+crypto.Extension) // encrypt with default function @@ -313,14 +306,12 @@ func TestEncryptWriter(t *testing.T) { } for _, size := range tests { - data := make([]byte, size) - _, err := io.ReadFull(RandomReader(42, size), data) - OK(t, err) + data := Random(42, size) buf := bytes.NewBuffer(nil) wr := crypto.EncryptTo(k, buf) - _, err = io.Copy(wr, bytes.NewReader(data)) + _, err := io.Copy(wr, bytes.NewReader(data)) OK(t, err) OK(t, wr.Close()) diff --git a/node_test.go b/node_test.go index ffb347e45..57bd6f550 100644 --- a/node_test.go +++ b/node_test.go @@ -145,7 +145,7 @@ func TestNodeRestoreAt(t *testing.T) { OK(t, err) defer func() { - if TestCleanup { + if TestCleanupTempDirs { RemoveAll(t, tempdir) } else { t.Logf("leaving tempdir at %v", tempdir) diff --git a/pack/pack.go b/pack/pack.go index 697adb8aa..8a96a942d 100644 --- a/pack/pack.go +++ b/pack/pack.go @@ -1,7 +1,7 @@ package pack import ( - "crypto/sha256" + "bytes" "encoding/binary" "errors" "fmt" @@ -12,8 +12,10 @@ import ( "github.com/restic/restic/crypto" ) +// BlobType specifies what a blob stored in a pack is. type BlobType uint8 +// These are the blob types that can be stored in a pack. const ( Data BlobType = 0 Tree = 1 @@ -30,6 +32,7 @@ func (t BlobType) String() string { return fmt.Sprintf("", t) } +// MarshalJSON encodes the BlobType into JSON. func (t BlobType) MarshalJSON() ([]byte, error) { switch t { case Data: @@ -41,6 +44,7 @@ func (t BlobType) MarshalJSON() ([]byte, error) { return nil, errors.New("unknown blob type") } +// UnmarshalJSON decodes the BlobType from JSON. func (t *BlobType) UnmarshalJSON(buf []byte) error { switch string(buf) { case `"data"`: @@ -79,16 +83,15 @@ type Packer struct { bytes uint k *crypto.Key - wr io.Writer - hw *backend.HashingWriter + buf *bytes.Buffer m sync.Mutex } // NewPacker returns a new Packer that can be used to pack blobs // together. -func NewPacker(k *crypto.Key, w io.Writer) *Packer { - return &Packer{k: k, wr: w, hw: backend.NewHashingWriter(w, sha256.New())} +func NewPacker(k *crypto.Key, buf []byte) *Packer { + return &Packer{k: k, buf: bytes.NewBuffer(buf)} } // Add saves the data read from rd as a new blob to the packer. Returned is the @@ -99,7 +102,7 @@ func (p *Packer) Add(t BlobType, id backend.ID, rd io.Reader) (int64, error) { c := Blob{Type: t, ID: id} - n, err := io.Copy(p.hw, rd) + n, err := io.Copy(p.buf, rd) c.Length = uint(n) c.Offset = p.bytes p.bytes += uint(n) @@ -118,45 +121,47 @@ type headerEntry struct { } // Finalize writes the header for all added blobs and finalizes the pack. -// Returned are the complete number of bytes written, including the header. -// After Finalize() has finished, the ID of this pack can be obtained by -// calling ID(). -func (p *Packer) Finalize() (bytesWritten uint, err error) { +// Returned are all bytes written, including the header. +func (p *Packer) Finalize() ([]byte, error) { p.m.Lock() defer p.m.Unlock() - bytesWritten = p.bytes + bytesWritten := p.bytes - // create writer to encrypt header - wr := crypto.EncryptTo(p.k, p.hw) - - bytesHeader, err := p.writeHeader(wr) + hdrBuf := bytes.NewBuffer(nil) + bytesHeader, err := p.writeHeader(hdrBuf) if err != nil { - wr.Close() - return bytesWritten + bytesHeader, err + return nil, err } - bytesWritten += bytesHeader - - // finalize encrypted header - err = wr.Close() + encryptedHeader, err := crypto.Encrypt(p.k, nil, hdrBuf.Bytes()) if err != nil { - return bytesWritten, err + return nil, err } - // account for crypto overhead - bytesWritten += crypto.Extension + // append the header + n, err := p.buf.Write(encryptedHeader) + if err != nil { + return nil, err + } + + hdrBytes := bytesHeader + crypto.Extension + if uint(n) != hdrBytes { + return nil, errors.New("wrong number of bytes written") + } + + bytesWritten += hdrBytes // write length - err = binary.Write(p.hw, binary.LittleEndian, uint32(uint(len(p.blobs))*entrySize+crypto.Extension)) + err = binary.Write(p.buf, binary.LittleEndian, uint32(uint(len(p.blobs))*entrySize+crypto.Extension)) if err != nil { - return bytesWritten, err + return nil, err } bytesWritten += uint(binary.Size(uint32(0))) p.bytes = uint(bytesWritten) - return bytesWritten, nil + return p.buf.Bytes(), nil } // writeHeader constructs and writes the header to wr. @@ -179,18 +184,6 @@ func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) { return } -// ID returns the ID of all data written so far. -func (p *Packer) ID() backend.ID { - p.m.Lock() - defer p.m.Unlock() - - hash := p.hw.Sum(nil) - id := backend.ID{} - copy(id[:], hash) - - return id -} - // Size returns the number of bytes written so far. func (p *Packer) Size() uint { p.m.Lock() @@ -215,11 +208,6 @@ func (p *Packer) Blobs() []Blob { return p.blobs } -// Writer returns the underlying writer. -func (p *Packer) Writer() io.Writer { - return p.wr -} - func (p *Packer) String() string { return fmt.Sprintf("", len(p.blobs), p.bytes) } diff --git a/pack/pack_test.go b/pack/pack_test.go index 28ef4c22c..0d5c1f155 100644 --- a/pack/pack_test.go +++ b/pack/pack_test.go @@ -34,23 +34,19 @@ func TestCreatePack(t *testing.T) { bufs = append(bufs, Buf{data: b, id: h}) } - file := bytes.NewBuffer(nil) - // create random keys k := crypto.NewRandomKey() // pack blobs - p := pack.NewPacker(k, file) + p := pack.NewPacker(k, nil) for _, b := range bufs { p.Add(pack.Tree, b.id, bytes.NewReader(b.data)) } - // write file - n, err := p.Finalize() + packData, err := p.Finalize() OK(t, err) written := 0 - // data for _, l := range lengths { written += l } @@ -62,11 +58,11 @@ func TestCreatePack(t *testing.T) { written += crypto.Extension // check length - Equals(t, uint(written), n) + Equals(t, written, len(packData)) Equals(t, uint(written), p.Size()) // read and parse it again - rd := bytes.NewReader(file.Bytes()) + rd := bytes.NewReader(packData) np, err := pack.NewUnpacker(k, rd) OK(t, err) Equals(t, len(np.Entries), len(bufs)) diff --git a/repository/index.go b/repository/index.go index 7bb4a273e..87a53c8ac 100644 --- a/repository/index.go +++ b/repository/index.go @@ -1,6 +1,7 @@ package repository import ( + "bytes" "encoding/json" "errors" "fmt" @@ -564,13 +565,12 @@ func LoadIndexWithDecoder(repo *Repository, id string, fn func(io.Reader) (*Inde return nil, err } - rd, err := repo.GetDecryptReader(backend.Index, idxID.String()) + buf, err := repo.LoadAndDecrypt(backend.Index, idxID) if err != nil { return nil, err } - defer closeOrErr(rd, &err) - idx, err = fn(rd) + idx, err = fn(bytes.NewReader(buf)) if err != nil { debug.Log("LoadIndexWithDecoder", "error while decoding index %v: %v", id, err) return nil, err @@ -594,33 +594,14 @@ func ConvertIndex(repo *Repository, id backend.ID) (backend.ID, error) { return id, err } - blob, err := repo.CreateEncryptedBlob(backend.Index) - if err != nil { - return id, err - } - + buf := bytes.NewBuffer(nil) idx.supersedes = backend.IDs{id} - err = idx.Encode(blob) + err = idx.Encode(buf) if err != nil { debug.Log("ConvertIndex", "oldIdx.Encode() returned error: %v", err) return id, err } - err = blob.Close() - if err != nil { - debug.Log("ConvertIndex", "blob.Close() returned error: %v", err) - return id, err - } - - newID := blob.ID() - debug.Log("ConvertIndex", "index %v converted to new format as %v", id.Str(), newID.Str()) - - err = repo.be.Remove(backend.Index, id.String()) - if err != nil { - debug.Log("ConvertIndex", "backend.Remove(%v) returned error: %v", id.Str(), err) - return id, err - } - - return newID, nil + return repo.SaveUnpacked(backend.Index, buf.Bytes()) } diff --git a/repository/key.go b/repository/key.go index 22ed2ca2e..c44b43710 100644 --- a/repository/key.go +++ b/repository/key.go @@ -2,8 +2,6 @@ package repository import ( "crypto/rand" - "crypto/sha256" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -119,17 +117,14 @@ func SearchKey(s *Repository, password string) (*Key, error) { // LoadKey loads a key from the backend. func LoadKey(s *Repository, name string) (k *Key, err error) { - // extract data from repo - rd, err := s.be.Get(backend.Key, name) + h := backend.Handle{Type: backend.Key, Name: name} + data, err := backend.LoadAll(s.be, h, nil) if err != nil { return nil, err } - defer closeOrErr(rd, &err) - // restore json - dec := json.NewDecoder(rd) - k = new(Key) - err = dec.Decode(k) + k = &Key{} + err = json.Unmarshal(data, k) if err != nil { return nil, err } @@ -194,26 +189,17 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error) } // store in repository and return - blob, err := s.be.Create() + h := backend.Handle{ + Type: backend.Key, + Name: backend.Hash(buf).String(), + } + + err = s.be.Save(h, buf) if err != nil { return nil, err } - plainhw := backend.NewHashingWriter(blob, sha256.New()) - - _, err = plainhw.Write(buf) - if err != nil { - return nil, err - } - - name := hex.EncodeToString(plainhw.Sum(nil)) - - err = blob.Finalize(backend.Key, name) - if err != nil { - return nil, err - } - - newkey.name = name + newkey.name = h.Name return newkey, nil } @@ -225,6 +211,7 @@ func (k *Key) String() string { return fmt.Sprintf("", k.Username, k.Hostname, k.Created) } +// Name returns an identifier for the key. func (k Key) Name() string { return k.name } diff --git a/repository/packer_manager.go b/repository/packer_manager.go index 99b74cea4..42ffe96cb 100644 --- a/repository/packer_manager.go +++ b/repository/packer_manager.go @@ -42,12 +42,8 @@ func (r *packerManager) findPacker(size uint) (*pack.Packer, error) { } // no suitable packer found, return new - blob, err := r.be.Create() - if err != nil { - return nil, err - } - debug.Log("Repo.findPacker", "create new pack %p for %d bytes", blob, size) - return pack.NewPacker(r.key, blob), nil + debug.Log("Repo.findPacker", "create new pack for %d bytes", size) + return pack.NewPacker(r.key, nil), nil } // insertPacker appends p to s.packs. @@ -62,28 +58,29 @@ func (r *packerManager) insertPacker(p *pack.Packer) { // savePacker stores p in the backend. func (r *Repository) savePacker(p *pack.Packer) error { debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count()) - _, err := p.Finalize() + data, err := p.Finalize() if err != nil { return err } - // move file to the final location - sid := p.ID() - err = p.Writer().(backend.Blob).Finalize(backend.Data, sid.String()) + id := backend.Hash(data) + h := backend.Handle{Type: backend.Data, Name: id.String()} + + err = r.be.Save(h, data) if err != nil { - debug.Log("Repo.savePacker", "blob Finalize() error: %v", err) + debug.Log("Repo.savePacker", "Save(%v) error: %v", h, err) return err } - debug.Log("Repo.savePacker", "saved as %v", sid.Str()) + debug.Log("Repo.savePacker", "saved as %v", h) // update blobs in the index for _, b := range p.Blobs() { - debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), sid.Str()) + debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), id.Str()) r.idx.Current().Store(PackedBlob{ Type: b.Type, ID: b.ID, - PackID: sid, + PackID: id, Offset: b.Offset, Length: uint(b.Length), }) diff --git a/repository/repository.go b/repository/repository.go index bc5e380ac..88f289338 100644 --- a/repository/repository.go +++ b/repository/repository.go @@ -2,7 +2,6 @@ package repository import ( "bytes" - "crypto/sha256" "encoding/json" "errors" "fmt" @@ -56,24 +55,14 @@ func (r *Repository) PrefixLength(t backend.Type) (int, error) { func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, error) { debug.Log("Repo.Load", "load %v with id %v", t, id.Str()) - rd, err := r.be.Get(t, id.String()) + h := backend.Handle{Type: t, Name: id.String()} + buf, err := backend.LoadAll(r.be, h, nil) if err != nil { debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err) return nil, err } - buf, err := ioutil.ReadAll(rd) - if err != nil { - return nil, err - } - - err = rd.Close() - if err != nil { - return nil, err - } - - // check hash - if !backend.Hash(buf).Equal(id) { + if t != backend.Config && !backend.Hash(buf).Equal(id) { return nil, errors.New("invalid data returned") } @@ -100,7 +89,9 @@ func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byt plaintextBufSize := uint(cap(plaintextBuf)) if blob.PlaintextLength() > plaintextBufSize { - return nil, fmt.Errorf("buf is too small, need %d more bytes", blob.PlaintextLength()-plaintextBufSize) + debug.Log("Repo.LoadBlob", "need to expand buffer: want %d bytes, got %d", + blob.PlaintextLength(), plaintextBufSize) + plaintextBuf = make([]byte, blob.PlaintextLength()) } if blob.Type != t { @@ -111,22 +102,18 @@ func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byt debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob) // load blob from pack - rd, err := r.be.GetReader(backend.Data, blob.PackID.String(), blob.Offset, blob.Length) + h := backend.Handle{Type: backend.Data, Name: blob.PackID.String()} + ciphertextBuf := make([]byte, blob.Length) + n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset)) if err != nil { debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err) return nil, err } - // make buffer that is large enough for the complete blob - ciphertextBuf := make([]byte, blob.Length) - _, err = io.ReadFull(rd, ciphertextBuf) - if err != nil { - return nil, err - } - - err = rd.Close() - if err != nil { - return nil, err + if uint(n) != blob.Length { + debug.Log("Repo.LoadBlob", "error loading blob %v: wrong length returned, want %d, got %d", + blob.Length, uint(n)) + return nil, errors.New("wrong length returned") } // decrypt @@ -156,61 +143,23 @@ func closeOrErr(cl io.Closer, err *error) { // LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on // the item. func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) (err error) { - // load blob from backend - rd, err := r.be.Get(t, id.String()) - if err != nil { - return err - } - defer closeOrErr(rd, &err) - - // decrypt - decryptRd, err := crypto.DecryptFrom(r.key, rd) - defer closeOrErr(decryptRd, &err) + buf, err := r.LoadAndDecrypt(t, id) if err != nil { return err } - // decode - decoder := json.NewDecoder(decryptRd) - err = decoder.Decode(item) - if err != nil { - return err - } - - return nil + return json.Unmarshal(buf, item) } // LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the // data and afterwards call json.Unmarshal on the item. func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) (err error) { - // lookup pack - blob, err := r.idx.Lookup(id) + buf, err := r.LoadBlob(t, id, nil) if err != nil { return err } - // load blob from pack - rd, err := r.be.GetReader(backend.Data, blob.PackID.String(), blob.Offset, blob.Length) - if err != nil { - return err - } - defer closeOrErr(rd, &err) - - // decrypt - decryptRd, err := crypto.DecryptFrom(r.key, rd) - defer closeOrErr(decryptRd, &err) - if err != nil { - return err - } - - // decode - decoder := json.NewDecoder(decryptRd) - err = decoder.Decode(item) - if err != nil { - return err - } - - return nil + return json.Unmarshal(buf, item) } // LookupBlobSize returns the size of blob id. @@ -315,44 +264,35 @@ func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, er // SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the // backend as type t, without a pack. It returns the storage hash. func (r *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) { - // create file - blob, err := r.be.Create() - if err != nil { - return backend.ID{}, err - } - debug.Log("Repo.SaveJSONUnpacked", "create new blob %v", t) - - // hash - hw := backend.NewHashingWriter(blob, sha256.New()) - - // encrypt blob - ewr := crypto.EncryptTo(r.key, hw) - - enc := json.NewEncoder(ewr) - err = enc.Encode(item) + debug.Log("Repo.SaveJSONUnpacked", "save new blob %v", t) + plaintext, err := json.Marshal(item) if err != nil { return backend.ID{}, fmt.Errorf("json.Encode: %v", err) } - err = ewr.Close() + return r.SaveUnpacked(t, plaintext) +} + +// SaveUnpacked encrypts data and stores it in the backend. Returned is the +// storage hash. +func (r *Repository) SaveUnpacked(t backend.Type, p []byte) (id backend.ID, err error) { + ciphertext := make([]byte, len(p)+crypto.Extension) + ciphertext, err = r.Encrypt(ciphertext, p) if err != nil { return backend.ID{}, err } - // finalize blob in the backend - hash := hw.Sum(nil) - sid := backend.ID{} - copy(sid[:], hash) + id = backend.Hash(ciphertext) + h := backend.Handle{Type: t, Name: id.String()} - err = blob.Finalize(t, sid.String()) + err = r.be.Save(h, ciphertext) if err != nil { - debug.Log("Repo.SaveJSONUnpacked", "error saving blob %v as %v: %v", t, sid, err) + debug.Log("Repo.SaveJSONUnpacked", "error saving blob %v: %v", h, err) return backend.ID{}, err } - debug.Log("Repo.SaveJSONUnpacked", "new blob %v saved as %v", t, sid) - - return sid, nil + debug.Log("Repo.SaveJSONUnpacked", "blob %v saved", h) + return id, nil } // Flush saves all remaining packs. @@ -388,80 +328,16 @@ func (r *Repository) SetIndex(i *MasterIndex) { r.idx = i } -// BlobWriter encrypts and saves the data written to it in a backend. After -// Close() was called, ID() returns the backend.ID. -type BlobWriter struct { - id backend.ID - blob backend.Blob - hw *backend.HashingWriter - ewr io.WriteCloser - t backend.Type - closed bool -} - -// CreateEncryptedBlob returns a BlobWriter that encrypts and saves the data -// written to it in the backend. After Close() was called, ID() returns the -// backend.ID. -func (r *Repository) CreateEncryptedBlob(t backend.Type) (*BlobWriter, error) { - blob, err := r.be.Create() - if err != nil { - return nil, err - } - - // hash - hw := backend.NewHashingWriter(blob, sha256.New()) - - // encrypt blob - ewr := crypto.EncryptTo(r.key, hw) - - return &BlobWriter{t: t, blob: blob, hw: hw, ewr: ewr}, nil -} - -func (bw *BlobWriter) Write(buf []byte) (int, error) { - return bw.ewr.Write(buf) -} - -// Close finalizes the blob in the backend, afterwards ID() can be used to retrieve the ID. -func (bw *BlobWriter) Close() error { - if bw.closed { - return errors.New("BlobWriter already closed") - } - bw.closed = true - - err := bw.ewr.Close() - if err != nil { - return err - } - - copy(bw.id[:], bw.hw.Sum(nil)) - return bw.blob.Finalize(bw.t, bw.id.String()) -} - -// ID returns the Id the blob has been written to after Close() was called. -func (bw *BlobWriter) ID() backend.ID { - return bw.id -} - -// SaveIndex saves an index to repo's backend. +// SaveIndex saves an index in the repository. func SaveIndex(repo *Repository, index *Index) (backend.ID, error) { - blob, err := repo.CreateEncryptedBlob(backend.Index) + buf := bytes.NewBuffer(nil) + + err := index.Finalize(buf) if err != nil { return backend.ID{}, err } - err = index.Finalize(blob) - if err != nil { - return backend.ID{}, err - } - - err = blob.Close() - if err != nil { - return backend.ID{}, err - } - - sid := blob.ID() - err = index.SetID(sid) - return sid, err + return repo.SaveUnpacked(backend.Index, buf.Bytes()) } // saveIndex saves all indexes in the backend. @@ -545,17 +421,6 @@ func LoadIndex(repo *Repository, id string) (*Index, error) { return nil, err } -// GetDecryptReader opens the file id stored in the backend and returns a -// reader that yields the decrypted content. The reader must be closed. -func (r *Repository) GetDecryptReader(t backend.Type, id string) (io.ReadCloser, error) { - rd, err := r.be.Get(t, id) - if err != nil { - return nil, err - } - - return newDecryptReadCloser(r.key, rd) -} - // SearchKey finds a key with the supplied password, afterwards the config is // read and parsed. func (r *Repository) SearchKey(password string) error { diff --git a/run_integration_tests.go b/run_integration_tests.go index 5ea7957bc..a771e274a 100644 --- a/run_integration_tests.go +++ b/run_integration_tests.go @@ -6,7 +6,9 @@ import ( "bytes" "flag" "fmt" + "io" "io/ioutil" + "net/http" "os" "os/exec" "path/filepath" @@ -17,6 +19,7 @@ import ( ) var runCrossCompile = flag.Bool("cross-compile", true, "run cross compilation tests") +var minioServer = flag.String("minio", "", "path to the minio server binary") func init() { flag.Parse() @@ -30,10 +33,57 @@ type CIEnvironment interface { type TravisEnvironment struct { goxArch []string goxOS []string + minio string } -var envVendorExperiment = map[string]string{ - "GO15VENDOREXPERIMENT": "1", +func (env *TravisEnvironment) getMinio() { + if *minioServer != "" { + msg("using minio server at %q\n", *minioServer) + env.minio = *minioServer + return + } + + tempfile, err := ioutil.TempFile("", "minio-server-") + if err != nil { + fmt.Fprintf(os.Stderr, "create tempfile failed: %v\n", err) + os.Exit(10) + } + + url := fmt.Sprintf("https://dl.minio.io/server/minio/release/%s-%s/minio", + runtime.GOOS, runtime.GOARCH) + msg("downloading %v\n", url) + res, err := http.Get(url) + if err != nil { + msg("downloading minio failed: %v\n", err) + return + } + + _, err = io.Copy(tempfile, res.Body) + if err != nil { + msg("downloading minio failed: %v\n", err) + return + } + + err = res.Body.Close() + if err != nil { + msg("saving minio failed: %v\n", err) + return + } + + err = tempfile.Close() + if err != nil { + msg("closing tempfile failed: %v\n", err) + return + } + + err = os.Chmod(tempfile.Name(), 0755) + if err != nil { + msg("making minio server executable failed: %v\n", err) + return + } + + msg("downloaded minio server to %v\n", tempfile.Name()) + env.minio = tempfile.Name() } func (env *TravisEnvironment) Prepare() { @@ -42,7 +92,7 @@ func (env *TravisEnvironment) Prepare() { run("go", "get", "golang.org/x/tools/cmd/cover") run("go", "get", "github.com/mattn/goveralls") run("go", "get", "github.com/pierrre/gotestcover") - runWithEnv(envVendorExperiment, "go", "get", "github.com/minio/minio") + env.getMinio() if runtime.GOOS == "darwin" { // install the libraries necessary for fuse @@ -125,8 +175,8 @@ func (env *TravisEnvironment) RunTests() { err error ) - if goVersionAtLeast151() { - srv, err = NewMinioServer() + if env.minio != "" { + srv, err = NewMinioServer(env.minio) if err != nil { fmt.Fprintf(os.Stderr, "error running minio server: %v", err) os.Exit(8) @@ -273,7 +323,7 @@ var minioEnv = map[string]string{ // NewMinioServer prepares and runs a minio server for the s3 backend tests in // a temporary directory. -func NewMinioServer() (*MinioServer, error) { +func NewMinioServer(minio string) (*MinioServer, error) { msg("running minio server\n") cfgdir, err := ioutil.TempDir("", "minio-config-") if err != nil { @@ -302,7 +352,7 @@ func NewMinioServer() (*MinioServer, error) { out := bytes.NewBuffer(nil) - cmd := exec.Command("minio", + cmd := exec.Command(minio, "--config-folder", cfgdir, "--address", "127.0.0.1:9000", "server", dir) diff --git a/test/backend.go b/test/backend.go index 8dea18578..4da1f24f9 100644 --- a/test/backend.go +++ b/test/backend.go @@ -15,7 +15,7 @@ import ( var ( TestPassword = getStringVar("RESTIC_TEST_PASSWORD", "geheim") - TestCleanup = getBoolVar("RESTIC_TEST_CLEANUP", true) + TestCleanupTempDirs = getBoolVar("RESTIC_TEST_CLEANUP", true) TestTempDir = getStringVar("RESTIC_TEST_TMPDIR", "") RunIntegrationTest = getBoolVar("RESTIC_TEST_INTEGRATION", true) RunFuseTest = getBoolVar("RESTIC_TEST_FUSE", true) @@ -70,7 +70,7 @@ func SetupRepo() *repository.Repository { } func TeardownRepo(repo *repository.Repository) { - if !TestCleanup { + if !TestCleanupTempDirs { l := repo.Backend().(*local.Local) fmt.Printf("leaving local backend at %s\n", l.Location()) return diff --git a/test/helpers.go b/test/helpers.go index bfab04cc9..636dddd8f 100644 --- a/test/helpers.go +++ b/test/helpers.go @@ -75,14 +75,33 @@ func ParseID(s string) backend.ID { // Random returns size bytes of pseudo-random data derived from the seed. func Random(seed, count int) []byte { - buf := make([]byte, count) + p := make([]byte, count) rnd := mrand.New(mrand.NewSource(int64(seed))) - for i := 0; i < count; i++ { - buf[i] = byte(rnd.Uint32()) + + for i := 0; i < len(p); i += 8 { + val := rnd.Int63() + var data = []byte{ + byte((val >> 0) & 0xff), + byte((val >> 8) & 0xff), + byte((val >> 16) & 0xff), + byte((val >> 24) & 0xff), + byte((val >> 32) & 0xff), + byte((val >> 40) & 0xff), + byte((val >> 48) & 0xff), + byte((val >> 56) & 0xff), + } + + for j := range data { + cur := i + j + if len(p) >= cur { + break + } + p[cur] = data[j] + } } - return buf + return p } type rndReader struct { @@ -90,18 +109,41 @@ type rndReader struct { } func (r *rndReader) Read(p []byte) (int, error) { - for i := range p { - p[i] = byte(r.src.Uint32()) + for i := 0; i < len(p); i += 8 { + val := r.src.Int63() + var data = []byte{ + byte((val >> 0) & 0xff), + byte((val >> 8) & 0xff), + byte((val >> 16) & 0xff), + byte((val >> 24) & 0xff), + byte((val >> 32) & 0xff), + byte((val >> 40) & 0xff), + byte((val >> 48) & 0xff), + byte((val >> 56) & 0xff), + } + + for j := range data { + cur := i + j + if len(p) >= cur { + break + } + p[cur] = data[j] + } } return len(p), nil } -// RandomReader returns a reader that returns size bytes of pseudo-random data +// RandomReader returns a reader that returns deterministic pseudo-random data // derived from the seed. -func RandomReader(seed, size int) io.Reader { - r := &rndReader{src: mrand.New(mrand.NewSource(int64(seed)))} - return io.LimitReader(r, int64(size)) +func RandomReader(seed int) io.Reader { + return &rndReader{src: mrand.New(mrand.NewSource(int64(seed)))} +} + +// RandomLimitReader returns a reader that returns size bytes of deterministic +// pseudo-random data derived from the seed. +func RandomLimitReader(seed, size int) io.Reader { + return io.LimitReader(RandomReader(seed), int64(size)) } // GenRandom returns a []byte filled with up to 1000 random bytes. @@ -158,7 +200,7 @@ func WithTestEnvironment(t testing.TB, repoFixture string, f func(repodir string f(filepath.Join(tempdir, "repo")) - if !TestCleanup { + if !TestCleanupTempDirs { t.Logf("leaving temporary directory %v used for test", tempdir) return } diff --git a/tree_test.go b/tree_test.go index 725e80b14..ad8f7f5fa 100644 --- a/tree_test.go +++ b/tree_test.go @@ -49,7 +49,7 @@ func createTempDir(t *testing.T) string { func TestTree(t *testing.T) { dir := createTempDir(t) defer func() { - if TestCleanup { + if TestCleanupTempDirs { RemoveAll(t, dir) } }()