2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-26 06:46:34 +00:00

backend: Relax requirement for new files

Before, all backend implementations were required to return an error if
the file that is to be written already exists in the backend. For most
backends, that means making a request (e.g. via HTTP) and returning an
error when the file already exists.

This is not accurate, the file could have been created between the HTTP
request testing for it, and when writing starts. In addition, apart from
the `config` file in the repo, all other file names have pseudo-random
names with a very very low probability of a collision. And even if a
file name is written again, the way the restic repo is structured this
just means that the same content is placed there again. Which is not a
problem, just not very efficient.

So, this commit relaxes the requirement to return an error when the file
in the backend already exists, which allows reducing the number of API
requests and thereby the latency for remote backends.
This commit is contained in:
Alexander Neumann 2018-02-17 22:39:18 +01:00
parent 2fb4d44a4d
commit b5062959c8
6 changed files with 1 additions and 48 deletions

View File

@ -135,16 +135,6 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err
debug.Log("Save %v at %v", h, objName)
// Check key does not already exist
found, err := be.container.GetBlobReference(objName).Exists()
if err != nil {
return errors.Wrap(err, "GetBlobReference().Exists()")
}
if found {
debug.Log("%v already exists", h)
return errors.New("key already exists")
}
be.sem.GetToken()
// wrap the reader so that net/http client cannot close the reader, return

View File

@ -196,12 +196,6 @@ func (be *b2Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) er
debug.Log("Save %v, name %v", h, name)
obj := be.bucket.Object(name)
_, err := obj.Attrs(ctx)
if err == nil {
debug.Log(" %v already exists", h)
return errors.New("key already exists")
}
w := obj.NewWriter(ctx)
n, err := io.Copy(w, rd)
debug.Log(" saved %d bytes, err %v", n, err)

View File

@ -218,13 +218,6 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err
be.sem.GetToken()
// Check key does not already exist
if _, err := be.service.Objects.Get(be.bucketName, objName).Do(); err == nil {
debug.Log("%v already exists", h)
be.sem.ReleaseToken()
return errors.New("key already exists")
}
debug.Log("InsertObject(%v, %v)", be.bucketName, objName)
// Set chunk size to zero to disable resumable uploads.

View File

@ -235,13 +235,6 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err
be.sem.GetToken()
defer be.sem.ReleaseToken()
// Check key does not already exist
_, err = be.client.StatObject(be.cfg.Bucket, objName, minio.StatObjectOptions{})
if err == nil {
debug.Log("%v already exists", h)
return errors.New("key already exists")
}
var size int64 = -1
type lenner interface {

View File

@ -165,19 +165,6 @@ func (be *beSwift) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err
be.sem.GetToken()
defer be.sem.ReleaseToken()
// Check key does not already exist
switch _, _, err = be.conn.Object(be.container, objName); err {
case nil:
debug.Log("%v already exists", h)
return errors.New("key already exists")
case swift.ObjectNotFound:
// Ok, that's what we want
default:
return errors.Wrap(err, "conn.Object")
}
encoding := "binary/octet-stream"
debug.Log("PutObject(%v, %v, %v)", be.container, objName, encoding)

View File

@ -793,14 +793,10 @@ func (s *Suite) TestBackend(t *testing.T) {
// test adding the first file again
ts := testStrings[0]
// create blob
h := restic.Handle{Type: tpe, Name: ts.id}
err := b.Save(context.TODO(), h, strings.NewReader(ts.data))
test.Assert(t, err != nil, "backend has allowed overwrite of existing blob: expected error for %v, got %v", h, err)
// remove and recreate
err = s.delayedRemove(t, b, h)
err := s.delayedRemove(t, b, h)
test.OK(t, err)
// test that the blob is gone