2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-26 06:46:34 +00:00

make Lookup() return all blobs

+ simplify syntax
This commit is contained in:
Alexander Weiss 2020-06-14 13:26:10 +02:00 committed by Michael Eischer
parent 020cab8e08
commit 9d1fb94c6c
13 changed files with 134 additions and 72 deletions

View File

@ -165,8 +165,7 @@ func runCat(gopts GlobalOptions, args []string) error {
case "blob": case "blob":
for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
_, found := repo.Index().Lookup(id, t) if !repo.Index().Has(id, t) {
if !found {
continue continue
} }

View File

@ -465,8 +465,8 @@ func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobTyp
return return
} }
blobs, found := idx.Lookup(rid, t) blobs := idx.Lookup(rid, t)
if !found { if len(blobs) == 0 {
Printf("Object %s not found in the index\n", rid.Str()) Printf("Object %s not found in the index\n", rid.Str())
return return
} }

View File

@ -40,7 +40,7 @@ func TestBlobSaver(t *testing.T) {
tmb, ctx := tomb.WithContext(ctx) tmb, ctx := tomb.WithContext(ctx)
saver := &saveFail{ saver := &saveFail{
idx: repository.NewIndex(), idx: repository.NewMasterIndex(),
} }
b := NewBlobSaver(ctx, tmb, saver, uint(runtime.NumCPU())) b := NewBlobSaver(ctx, tmb, saver, uint(runtime.NumCPU()))
@ -86,7 +86,7 @@ func TestBlobSaverError(t *testing.T) {
tmb, ctx := tomb.WithContext(ctx) tmb, ctx := tomb.WithContext(ctx)
saver := &saveFail{ saver := &saveFail{
idx: repository.NewIndex(), idx: repository.NewMasterIndex(),
failAt: int32(test.failAt), failAt: int32(test.failAt),
} }

View File

@ -174,8 +174,9 @@ func (idx *Index) toPackedBlob(e *indexEntry, typ restic.BlobType) restic.Packed
} }
} }
// Lookup queries the index for the blob ID and returns a restic.PackedBlob. // Lookup queries the index for the blob ID and returns all entries including
func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, found bool) { // duplicates. Adds found entries to blobs and returns the result.
func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType, blobs []restic.PackedBlob) []restic.PackedBlob {
idx.m.Lock() idx.m.Lock()
defer idx.m.Unlock() defer idx.m.Unlock()
@ -183,7 +184,7 @@ func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.Pack
blobs = append(blobs, idx.toPackedBlob(e, tpe)) blobs = append(blobs, idx.toPackedBlob(e, tpe))
}) })
return blobs, len(blobs) > 0 return blobs
} }
// ListPack returns a list of blobs contained in a pack. // ListPack returns a list of blobs contained in a pack.

View File

@ -66,9 +66,7 @@ func TestIndexSerialize(t *testing.T) {
rtest.OK(t, err) rtest.OK(t, err)
for _, testBlob := range tests { for _, testBlob := range tests {
list, found := idx.Lookup(testBlob.id, testBlob.tpe) list := idx.Lookup(testBlob.id, testBlob.tpe, nil)
rtest.Assert(t, found, "Expected to find blob id %v", testBlob.id.Str())
if len(list) != 1 { if len(list) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list), list) t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list), list)
} }
@ -79,9 +77,7 @@ func TestIndexSerialize(t *testing.T) {
rtest.Equals(t, testBlob.offset, result.Offset) rtest.Equals(t, testBlob.offset, result.Offset)
rtest.Equals(t, testBlob.length, result.Length) rtest.Equals(t, testBlob.length, result.Length)
list2, found := idx2.Lookup(testBlob.id, testBlob.tpe) list2 := idx2.Lookup(testBlob.id, testBlob.tpe, nil)
rtest.Assert(t, found, "Expected to find blob id %v", testBlob.id)
if len(list2) != 1 { if len(list2) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list2), list2) t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list2), list2)
} }
@ -148,9 +144,7 @@ func TestIndexSerialize(t *testing.T) {
// all new blobs must be in the index // all new blobs must be in the index
for _, testBlob := range newtests { for _, testBlob := range newtests {
list, found := idx3.Lookup(testBlob.id, testBlob.tpe) list := idx3.Lookup(testBlob.id, testBlob.tpe, nil)
rtest.Assert(t, found, "Expected to find blob id %v", testBlob.id.Str())
if len(list) != 1 { if len(list) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list), list) t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list), list)
} }
@ -295,9 +289,7 @@ func TestIndexUnserialize(t *testing.T) {
rtest.OK(t, err) rtest.OK(t, err)
for _, test := range exampleTests { for _, test := range exampleTests {
list, found := idx.Lookup(test.id, test.tpe) list := idx.Lookup(test.id, test.tpe, nil)
rtest.Assert(t, found, "Expected to find blob id %v", test.id.Str())
if len(list) != 1 { if len(list) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list) t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list)
} }
@ -368,9 +360,7 @@ func TestIndexUnserializeOld(t *testing.T) {
rtest.OK(t, err) rtest.OK(t, err)
for _, test := range exampleTests { for _, test := range exampleTests {
list, found := idx.Lookup(test.id, test.tpe) list := idx.Lookup(test.id, test.tpe, nil)
rtest.Assert(t, found, "Expected to find blob id %v", test.id.Str())
if len(list) != 1 { if len(list) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list) t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list)
} }

View File

@ -26,19 +26,16 @@ func NewMasterIndex() *MasterIndex {
return &MasterIndex{idx: idx, pendingBlobs: restic.NewBlobSet()} return &MasterIndex{idx: idx, pendingBlobs: restic.NewBlobSet()}
} }
// Lookup queries all known Indexes for the ID and returns the first match. // Lookup queries all known Indexes for the ID and returns all matches.
func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, found bool) { func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob) {
mi.idxMutex.RLock() mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock() defer mi.idxMutex.RUnlock()
for _, idx := range mi.idx { for _, idx := range mi.idx {
blobs, found = idx.Lookup(id, tpe) blobs = idx.Lookup(id, tpe, blobs)
if found {
return
}
} }
return nil, false return blobs
} }
// LookupSize queries all known Indexes for the ID and returns the first match. // LookupSize queries all known Indexes for the ID and returns the first match.

View File

@ -10,16 +10,17 @@ import (
rtest "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test"
) )
func TestMasterIndexLookup(t *testing.T) { func TestMasterIndex(t *testing.T) {
idInIdx1 := restic.NewRandomID() idInIdx1 := restic.NewRandomID()
idInIdx2 := restic.NewRandomID() idInIdx2 := restic.NewRandomID()
idInIdx12 := restic.NewRandomID()
blob1 := restic.PackedBlob{ blob1 := restic.PackedBlob{
PackID: restic.NewRandomID(), PackID: restic.NewRandomID(),
Blob: restic.Blob{ Blob: restic.Blob{
Type: restic.DataBlob, Type: restic.DataBlob,
ID: idInIdx1, ID: idInIdx1,
Length: 10, Length: uint(restic.CiphertextLength(10)),
Offset: 0, Offset: 0,
}, },
} }
@ -29,32 +30,103 @@ func TestMasterIndexLookup(t *testing.T) {
Blob: restic.Blob{ Blob: restic.Blob{
Type: restic.DataBlob, Type: restic.DataBlob,
ID: idInIdx2, ID: idInIdx2,
Length: 100, Length: uint(restic.CiphertextLength(100)),
Offset: 10, Offset: 10,
}, },
} }
blob12a := restic.PackedBlob{
PackID: restic.NewRandomID(),
Blob: restic.Blob{
Type: restic.TreeBlob,
ID: idInIdx12,
Length: uint(restic.CiphertextLength(123)),
Offset: 110,
},
}
blob12b := restic.PackedBlob{
PackID: restic.NewRandomID(),
Blob: restic.Blob{
Type: restic.TreeBlob,
ID: idInIdx12,
Length: uint(restic.CiphertextLength(123)),
Offset: 50,
},
}
idx1 := repository.NewIndex() idx1 := repository.NewIndex()
idx1.Store(blob1) idx1.Store(blob1)
idx1.Store(blob12a)
idx2 := repository.NewIndex() idx2 := repository.NewIndex()
idx2.Store(blob2) idx2.Store(blob2)
idx2.Store(blob12b)
mIdx := repository.NewMasterIndex() mIdx := repository.NewMasterIndex()
mIdx.Insert(idx1) mIdx.Insert(idx1)
mIdx.Insert(idx2) mIdx.Insert(idx2)
blobs, found := mIdx.Lookup(idInIdx1, restic.DataBlob) // test idInIdx1
rtest.Assert(t, found, "Expected to find blob id %v from index 1", idInIdx1) found := mIdx.Has(idInIdx1, restic.DataBlob)
rtest.Equals(t, true, found)
blobs := mIdx.Lookup(idInIdx1, restic.DataBlob)
rtest.Equals(t, []restic.PackedBlob{blob1}, blobs) rtest.Equals(t, []restic.PackedBlob{blob1}, blobs)
blobs, found = mIdx.Lookup(idInIdx2, restic.DataBlob) size, found := mIdx.LookupSize(idInIdx1, restic.DataBlob)
rtest.Assert(t, found, "Expected to find blob id %v from index 2", idInIdx2) rtest.Equals(t, true, found)
rtest.Equals(t, uint(10), size)
// test idInIdx2
found = mIdx.Has(idInIdx2, restic.DataBlob)
rtest.Equals(t, true, found)
blobs = mIdx.Lookup(idInIdx2, restic.DataBlob)
rtest.Equals(t, []restic.PackedBlob{blob2}, blobs) rtest.Equals(t, []restic.PackedBlob{blob2}, blobs)
blobs, found = mIdx.Lookup(restic.NewRandomID(), restic.DataBlob) size, found = mIdx.LookupSize(idInIdx2, restic.DataBlob)
rtest.Assert(t, !found, "Expected to not find a blob when fetching with a random id") rtest.Equals(t, true, found)
rtest.Equals(t, uint(100), size)
// test idInIdx12
found = mIdx.Has(idInIdx12, restic.TreeBlob)
rtest.Equals(t, true, found)
blobs = mIdx.Lookup(idInIdx12, restic.TreeBlob)
rtest.Equals(t, 2, len(blobs))
// test Lookup result for blob12a
found = false
if blobs[0] == blob12a || blobs[1] == blob12a {
found = true
}
rtest.Assert(t, found, "blob12a not found in result")
// test Lookup result for blob12b
found = false
if blobs[0] == blob12b || blobs[1] == blob12b {
found = true
}
rtest.Assert(t, found, "blob12a not found in result")
size, found = mIdx.LookupSize(idInIdx12, restic.TreeBlob)
rtest.Equals(t, true, found)
rtest.Equals(t, uint(123), size)
// test not in index
found = mIdx.Has(restic.NewRandomID(), restic.TreeBlob)
rtest.Assert(t, !found, "Expected no blobs when fetching with a random id")
blobs = mIdx.Lookup(restic.NewRandomID(), restic.DataBlob)
rtest.Assert(t, blobs == nil, "Expected no blobs when fetching with a random id") rtest.Assert(t, blobs == nil, "Expected no blobs when fetching with a random id")
size, found = mIdx.LookupSize(restic.NewRandomID(), restic.DataBlob)
rtest.Assert(t, !found, "Expected no blobs when fetching with a random id")
// Test Count
num := mIdx.Count(restic.DataBlob)
rtest.Equals(t, uint(2), num)
num = mIdx.Count(restic.TreeBlob)
rtest.Equals(t, uint(2), num)
} }
func TestMasterMergeFinalIndexes(t *testing.T) { func TestMasterMergeFinalIndexes(t *testing.T) {
@ -96,16 +168,13 @@ func TestMasterMergeFinalIndexes(t *testing.T) {
mIdx.MergeFinalIndexes() mIdx.MergeFinalIndexes()
blobs, found := mIdx.Lookup(idInIdx1, restic.DataBlob) blobs := mIdx.Lookup(idInIdx1, restic.DataBlob)
rtest.Assert(t, found, "Expected to find blob id %v from index 1", idInIdx1)
rtest.Equals(t, []restic.PackedBlob{blob1}, blobs) rtest.Equals(t, []restic.PackedBlob{blob1}, blobs)
blobs, found = mIdx.Lookup(idInIdx2, restic.DataBlob) blobs = mIdx.Lookup(idInIdx2, restic.DataBlob)
rtest.Assert(t, found, "Expected to find blob id %v from index 2", idInIdx2)
rtest.Equals(t, []restic.PackedBlob{blob2}, blobs) rtest.Equals(t, []restic.PackedBlob{blob2}, blobs)
blobs, found = mIdx.Lookup(restic.NewRandomID(), restic.DataBlob) blobs = mIdx.Lookup(restic.NewRandomID(), restic.DataBlob)
rtest.Assert(t, !found, "Expected to not find a blob when fetching with a random id")
rtest.Assert(t, blobs == nil, "Expected no blobs when fetching with a random id") rtest.Assert(t, blobs == nil, "Expected no blobs when fetching with a random id")
} }
@ -196,6 +265,16 @@ func BenchmarkMasterIndexLookupParallel(b *testing.B) {
} }
}) })
}) })
}
}
func BenchmarkMasterIndexLookupBlobSize(b *testing.B) {
rng := rand.New(rand.NewSource(0))
mIdx, lookupID := createRandomMasterIndex(rand.New(rng), 5, 200000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
mIdx.LookupSize(lookupID, restic.DataBlob)
} }
} }

View File

@ -110,8 +110,8 @@ func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSe
idx := repo.Index() idx := repo.Index()
for h := range blobs { for h := range blobs {
list, found := idx.Lookup(h.ID, h.Type) list := idx.Lookup(h.ID, h.Type)
if !found { if len(list) == 0 {
t.Fatal("Failed to find blob", h.ID.Str(), "with type", h.Type) t.Fatal("Failed to find blob", h.ID.Str(), "with type", h.Type)
} }
@ -215,8 +215,8 @@ func TestRepack(t *testing.T) {
idx := repo.Index() idx := repo.Index()
for h := range keepBlobs { for h := range keepBlobs {
list, found := idx.Lookup(h.ID, h.Type) list := idx.Lookup(h.ID, h.Type)
if !found { if len(list) == 0 {
t.Errorf("unable to find blob %v in repo", h.ID.Str()) t.Errorf("unable to find blob %v in repo", h.ID.Str())
continue continue
} }
@ -234,7 +234,7 @@ func TestRepack(t *testing.T) {
} }
for h := range removeBlobs { for h := range removeBlobs {
if _, found := idx.Lookup(h.ID, h.Type); found { if _, found := repo.LookupBlobSize(h.ID, h.Type); found {
t.Errorf("blob %v still contained in the repo", h) t.Errorf("blob %v still contained in the repo", h)
} }
} }

View File

@ -146,8 +146,8 @@ func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic.
debug.Log("load %v with id %v (buf len %v, cap %d)", t, id, len(buf), cap(buf)) debug.Log("load %v with id %v (buf len %v, cap %d)", t, id, len(buf), cap(buf))
// lookup packs // lookup packs
blobs, found := r.idx.Lookup(id, t) blobs := r.idx.Lookup(id, t)
if !found { if len(blobs) == 0 {
debug.Log("id %v not found in index", id) debug.Log("id %v not found in index", id)
return nil, errors.Errorf("id %v not found in repository", id) return nil, errors.Errorf("id %v not found in repository", id)
} }

View File

@ -60,7 +60,7 @@ type Lister interface {
// Index keeps track of the blobs are stored within files. // Index keeps track of the blobs are stored within files.
type Index interface { type Index interface {
Has(ID, BlobType) bool Has(ID, BlobType) bool
Lookup(ID, BlobType) ([]PackedBlob, bool) Lookup(ID, BlobType) []PackedBlob
Count(BlobType) uint Count(BlobType) uint
// Each returns a channel that yields all blobs known to the index. When // Each returns a channel that yields all blobs known to the index. When

View File

@ -51,7 +51,7 @@ type packInfo struct {
// fileRestorer restores set of files // fileRestorer restores set of files
type fileRestorer struct { type fileRestorer struct {
key *crypto.Key key *crypto.Key
idx func(restic.ID, restic.BlobType) ([]restic.PackedBlob, bool) idx func(restic.ID, restic.BlobType) []restic.PackedBlob
packLoader func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error packLoader func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error
filesWriter *filesWriter filesWriter *filesWriter
@ -63,7 +63,7 @@ type fileRestorer struct {
func newFileRestorer(dst string, func newFileRestorer(dst string,
packLoader func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error, packLoader func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error,
key *crypto.Key, key *crypto.Key,
idx func(restic.ID, restic.BlobType) ([]restic.PackedBlob, bool)) *fileRestorer { idx func(restic.ID, restic.BlobType) []restic.PackedBlob) *fileRestorer {
return &fileRestorer{ return &fileRestorer{
key: key, key: key,
@ -88,8 +88,8 @@ func (r *fileRestorer) forEachBlob(blobIDs []restic.ID, fn func(packID restic.ID
} }
for _, blobID := range blobIDs { for _, blobID := range blobIDs {
packs, found := r.idx(blobID, restic.DataBlob) packs := r.idx(blobID, restic.DataBlob)
if !found { if len(packs) == 0 {
return errors.Errorf("Unknown blob %s", blobID.String()) return errors.Errorf("Unknown blob %s", blobID.String())
} }
fn(packs[0].PackID, packs[0].Blob) fn(packs[0].PackID, packs[0].Blob)
@ -208,13 +208,11 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) {
}) })
} else if packsMap, ok := file.blobs.(map[restic.ID][]fileBlobInfo); ok { } else if packsMap, ok := file.blobs.(map[restic.ID][]fileBlobInfo); ok {
for _, blob := range packsMap[pack.id] { for _, blob := range packsMap[pack.id] {
idxPacks, found := r.idx(blob.id, restic.DataBlob) idxPacks := r.idx(blob.id, restic.DataBlob)
if found { for _, idxPack := range idxPacks {
for _, idxPack := range idxPacks { if idxPack.PackID.Equal(pack.id) {
if idxPack.PackID.Equal(pack.id) { addBlob(idxPack.Blob, blob.offset)
addBlob(idxPack.Blob, blob.offset) break
break
}
} }
} }
} }

View File

@ -39,9 +39,9 @@ type TestRepo struct {
loader func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error loader func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error
} }
func (i *TestRepo) Lookup(blobID restic.ID, _ restic.BlobType) ([]restic.PackedBlob, bool) { func (i *TestRepo) Lookup(blobID restic.ID, _ restic.BlobType) []restic.PackedBlob {
packs, found := i.blobs[blobID] packs := i.blobs[blobID]
return packs, found return packs
} }
func (i *TestRepo) packName(pack *packInfo) string { func (i *TestRepo) packName(pack *packInfo) string {

View File

@ -5,7 +5,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/debug"
@ -313,8 +312,7 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) {
offset := int64(0) offset := int64(0)
for _, blobID := range node.Content { for _, blobID := range node.Content {
blobs, _ := res.repo.Index().Lookup(blobID, restic.DataBlob) length, _ := res.repo.LookupBlobSize(blobID, restic.DataBlob)
length := blobs[0].Length - uint(crypto.Extension)
buf := make([]byte, length) // TODO do I want to reuse the buffer somehow? buf := make([]byte, length) // TODO do I want to reuse the buffer somehow?
_, err = file.ReadAt(buf, offset) _, err = file.ReadAt(buf, offset)
if err != nil { if err != nil {