2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-24 20:00:21 +00:00

Merge pull request #336 from restic/refactor-index

Refactor Index.Lookup() to return struct PackedBlob
This commit is contained in:
Alexander Neumann 2015-11-02 17:35:09 +01:00
commit ba8e6035b0
7 changed files with 96 additions and 73 deletions

View File

@ -162,16 +162,16 @@ func (cmd CmdCat) Execute(args []string) error {
return err return err
case "blob": case "blob":
_, blobType, _, length, err := repo.Index().Lookup(id) blob, err := repo.Index().Lookup(id)
if err != nil { if err != nil {
return err return err
} }
if blobType != pack.Data { if blob.Type != pack.Data {
return errors.New("wrong type for blob") return errors.New("wrong type for blob")
} }
buf := make([]byte, length) buf := make([]byte, blob.Length)
data, err := repo.LoadBlob(pack.Data, id, buf) data, err := repo.LoadBlob(pack.Data, id, buf)
if err != nil { if err != nil {
return err return err

View File

@ -212,14 +212,14 @@ func (node Node) createFileAt(path string, repo *repository.Repository) error {
var buf []byte var buf []byte
for _, id := range node.Content { for _, id := range node.Content {
_, _, _, length, err := repo.Index().Lookup(id) blob, err := repo.Index().Lookup(id)
if err != nil { if err != nil {
return err return err
} }
buf = buf[:cap(buf)] buf = buf[:cap(buf)]
if uint(len(buf)) < length { if uint(len(buf)) < blob.Length {
buf = make([]byte, length) buf = make([]byte, blob.Length)
} }
buf, err := repo.LoadBlob(pack.Data, id, buf) buf, err := repo.LoadBlob(pack.Data, id, buf)

View File

@ -15,7 +15,6 @@ import (
func BenchmarkNodeFillUser(t *testing.B) { func BenchmarkNodeFillUser(t *testing.B) {
tempfile, err := ioutil.TempFile("", "restic-test-temp-") tempfile, err := ioutil.TempFile("", "restic-test-temp-")
defer tempfile.Close()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -32,11 +31,13 @@ func BenchmarkNodeFillUser(t *testing.B) {
for i := 0; i < t.N; i++ { for i := 0; i < t.N; i++ {
restic.NodeFromFileInfo(path, fi) restic.NodeFromFileInfo(path, fi)
} }
OK(t, tempfile.Close())
RemoveAll(t, tempfile.Name())
} }
func BenchmarkNodeFromFileInfo(t *testing.B) { func BenchmarkNodeFromFileInfo(t *testing.B) {
tempfile, err := ioutil.TempFile("", "restic-test-temp-") tempfile, err := ioutil.TempFile("", "restic-test-temp-")
defer tempfile.Close()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -56,6 +57,9 @@ func BenchmarkNodeFromFileInfo(t *testing.B) {
t.Fatal(err) t.Fatal(err)
} }
} }
OK(t, tempfile.Close())
RemoveAll(t, tempfile.Name())
} }
func parseTime(s string) time.Time { func parseTime(s string) time.Time {

View File

@ -109,24 +109,32 @@ func (idx *Index) Store(t pack.BlobType, id backend.ID, pack backend.ID, offset,
idx.store(t, id, pack, offset, length) idx.store(t, id, pack, offset, length)
} }
// Lookup returns the pack for the id. // Lookup queries the index for the blob ID and returns a PackedBlob.
func (idx *Index) Lookup(id backend.ID) (packID backend.ID, tpe pack.BlobType, offset, length uint, err error) { func (idx *Index) Lookup(id backend.ID) (pb PackedBlob, err error) {
idx.m.Lock() idx.m.Lock()
defer idx.m.Unlock() defer idx.m.Unlock()
if p, ok := idx.pack[id]; ok { if p, ok := idx.pack[id]; ok {
debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d", debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d",
id.Str(), p.packID.Str(), p.offset, p.length) id.Str(), p.packID.Str(), p.offset, p.length)
return p.packID, p.tpe, p.offset, p.length, nil
pb := PackedBlob{
Type: p.tpe,
Length: p.length,
ID: id,
Offset: p.offset,
PackID: p.packID,
}
return pb, nil
} }
debug.Log("Index.Lookup", "id %v not found", id.Str()) debug.Log("Index.Lookup", "id %v not found", id.Str())
return backend.ID{}, pack.Data, 0, 0, fmt.Errorf("id %v not found in index", id) return PackedBlob{}, fmt.Errorf("id %v not found in index", id)
} }
// Has returns true iff the id is listed in the index. // Has returns true iff the id is listed in the index.
func (idx *Index) Has(id backend.ID) bool { func (idx *Index) Has(id backend.ID) bool {
_, _, _, _, err := idx.Lookup(id) _, err := idx.Lookup(id)
if err == nil { if err == nil {
return true return true
} }
@ -137,11 +145,11 @@ func (idx *Index) Has(id backend.ID) bool {
// LookupSize returns the length of the cleartext content behind the // LookupSize returns the length of the cleartext content behind the
// given id // given id
func (idx *Index) LookupSize(id backend.ID) (cleartextLength uint, err error) { func (idx *Index) LookupSize(id backend.ID) (cleartextLength uint, err error) {
_, _, _, encryptedLength, err := idx.Lookup(id) blob, err := idx.Lookup(id)
if err != nil { if err != nil {
return 0, err return 0, err
} }
return encryptedLength - crypto.Extension, nil return blob.PlaintextLength(), nil
} }
// Merge loads all items from other into idx. // Merge loads all items from other into idx.
@ -181,10 +189,23 @@ func (idx *Index) AddToSupersedes(ids ...backend.ID) error {
// PackedBlob is a blob already saved within a pack. // PackedBlob is a blob already saved within a pack.
type PackedBlob struct { type PackedBlob struct {
pack.Blob Type pack.BlobType
Length uint
ID backend.ID
Offset uint
PackID backend.ID PackID backend.ID
} }
func (pb PackedBlob) String() string {
return fmt.Sprintf("<PackedBlob %v type %v in pack %v: len %v, offset %v",
pb.ID.Str(), pb.Type, pb.PackID.Str(), pb.Length, pb.Offset)
}
// PlaintextLength returns the number of bytes the blob's plaintext occupies.
func (pb PackedBlob) PlaintextLength() uint {
return pb.Length - crypto.Extension
}
// Each returns a channel that yields all blobs known to the index. If done is // Each returns a channel that yields all blobs known to the index. If done is
// closed, the background goroutine terminates. This blocks any modification of // closed, the background goroutine terminates. This blocks any modification of
// the index. // the index.
@ -204,12 +225,10 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob {
case <-done: case <-done:
return return
case ch <- PackedBlob{ case ch <- PackedBlob{
Blob: pack.Blob{
ID: id, ID: id,
Offset: blob.offset, Offset: blob.offset,
Type: blob.tpe, Type: blob.tpe,
Length: blob.length, Length: blob.length,
},
PackID: blob.packID, PackID: blob.packID,
}: }:
} }

View File

@ -69,21 +69,21 @@ func TestIndexSerialize(t *testing.T) {
OK(t, err) OK(t, err)
for _, testBlob := range tests { for _, testBlob := range tests {
packID, tpe, offset, length, err := idx.Lookup(testBlob.id) result, err := idx.Lookup(testBlob.id)
OK(t, err) OK(t, err)
Equals(t, testBlob.pack, packID) Equals(t, testBlob.pack, result.PackID)
Equals(t, testBlob.tpe, tpe) Equals(t, testBlob.tpe, result.Type)
Equals(t, testBlob.offset, offset) Equals(t, testBlob.offset, result.Offset)
Equals(t, testBlob.length, length) Equals(t, testBlob.length, result.Length)
packID, tpe, offset, length, err = idx2.Lookup(testBlob.id) result2, err := idx2.Lookup(testBlob.id)
OK(t, err) OK(t, err)
Equals(t, testBlob.pack, packID) Equals(t, testBlob.pack, result2.PackID)
Equals(t, testBlob.tpe, tpe) Equals(t, testBlob.tpe, result2.Type)
Equals(t, testBlob.offset, offset) Equals(t, testBlob.offset, result2.Offset)
Equals(t, testBlob.length, length) Equals(t, testBlob.length, result2.Length)
} }
// add more blobs to idx // add more blobs to idx
@ -126,13 +126,13 @@ func TestIndexSerialize(t *testing.T) {
// all new blobs must be in the index // all new blobs must be in the index
for _, testBlob := range newtests { for _, testBlob := range newtests {
packID, tpe, offset, length, err := idx3.Lookup(testBlob.id) blob, err := idx3.Lookup(testBlob.id)
OK(t, err) OK(t, err)
Equals(t, testBlob.pack, packID) Equals(t, testBlob.pack, blob.PackID)
Equals(t, testBlob.tpe, tpe) Equals(t, testBlob.tpe, blob.Type)
Equals(t, testBlob.offset, offset) Equals(t, testBlob.offset, blob.Offset)
Equals(t, testBlob.length, length) Equals(t, testBlob.length, blob.Length)
} }
} }
@ -247,13 +247,13 @@ func TestIndexUnserialize(t *testing.T) {
OK(t, err) OK(t, err)
for _, test := range exampleTests { for _, test := range exampleTests {
packID, tpe, offset, length, err := idx.Lookup(test.id) blob, err := idx.Lookup(test.id)
OK(t, err) OK(t, err)
Equals(t, test.packID, packID) Equals(t, test.packID, blob.PackID)
Equals(t, test.tpe, tpe) Equals(t, test.tpe, blob.Type)
Equals(t, test.offset, offset) Equals(t, test.offset, blob.Offset)
Equals(t, test.length, length) Equals(t, test.length, blob.Length)
} }
Equals(t, oldIdx, idx.Supersedes()) Equals(t, oldIdx, idx.Supersedes())
@ -264,13 +264,13 @@ func TestIndexUnserializeOld(t *testing.T) {
OK(t, err) OK(t, err)
for _, test := range exampleTests { for _, test := range exampleTests {
packID, tpe, offset, length, err := idx.Lookup(test.id) blob, err := idx.Lookup(test.id)
OK(t, err) OK(t, err)
Equals(t, test.packID, packID) Equals(t, test.packID, blob.PackID)
Equals(t, test.tpe, tpe) Equals(t, test.tpe, blob.Type)
Equals(t, test.offset, offset) Equals(t, test.offset, blob.Offset)
Equals(t, test.length, length) Equals(t, test.length, blob.Length)
} }
Equals(t, 0, len(idx.Supersedes())) Equals(t, 0, len(idx.Supersedes()))
@ -310,17 +310,17 @@ func TestConvertIndex(t *testing.T) {
"Index %v count blobs %v: %v != %v", id.Str(), pack.Tree, idx.Count(pack.Tree), oldIndex.Count(pack.Tree)) "Index %v count blobs %v: %v != %v", id.Str(), pack.Tree, idx.Count(pack.Tree), oldIndex.Count(pack.Tree))
for packedBlob := range idx.Each(nil) { for packedBlob := range idx.Each(nil) {
packID, tpe, offset, length, err := oldIndex.Lookup(packedBlob.ID) blob, err := oldIndex.Lookup(packedBlob.ID)
OK(t, err) OK(t, err)
Assert(t, packID == packedBlob.PackID, Assert(t, blob.PackID == packedBlob.PackID,
"Check blob %v: pack ID %v != %v", packedBlob.ID, packID, packedBlob.PackID) "Check blob %v: pack ID %v != %v", packedBlob.ID, blob.PackID, packedBlob.PackID)
Assert(t, tpe == packedBlob.Type, Assert(t, blob.Type == packedBlob.Type,
"Check blob %v: Type %v != %v", packedBlob.ID, tpe, packedBlob.Type) "Check blob %v: Type %v != %v", packedBlob.ID, blob.Type, packedBlob.Type)
Assert(t, offset == packedBlob.Offset, Assert(t, blob.Offset == packedBlob.Offset,
"Check blob %v: Type %v != %v", packedBlob.ID, offset, packedBlob.Offset) "Check blob %v: Type %v != %v", packedBlob.ID, blob.Offset, packedBlob.Offset)
Assert(t, length == packedBlob.Length, Assert(t, blob.Length == packedBlob.Length,
"Check blob %v: Type %v != %v", packedBlob.ID, length, packedBlob.Length) "Check blob %v: Type %v != %v", packedBlob.ID, blob.Length, packedBlob.Length)
} }
} }
}) })

View File

@ -33,24 +33,23 @@ func NewMasterIndex() *MasterIndex {
} }
// Lookup queries all known Indexes for the ID and returns the first match. // Lookup queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) Lookup(id backend.ID) (packID backend.ID, tpe pack.BlobType, offset, length uint, err error) { func (mi *MasterIndex) Lookup(id backend.ID) (blob PackedBlob, err error) {
mi.idxMutex.RLock() mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock() defer mi.idxMutex.RUnlock()
debug.Log("MasterIndex.Lookup", "looking up id %v", id.Str()) debug.Log("MasterIndex.Lookup", "looking up id %v", id.Str())
for _, idx := range mi.idx { for _, idx := range mi.idx {
packID, tpe, offset, length, err = idx.Lookup(id) blob, err = idx.Lookup(id)
if err == nil { if err == nil {
debug.Log("MasterIndex.Lookup", debug.Log("MasterIndex.Lookup",
"found id %v in pack %v at offset %d with length %d", "found id %v: %v", blob)
id.Str(), packID.Str(), offset, length)
return return
} }
} }
debug.Log("MasterIndex.Lookup", "id %v not found in any index", id.Str()) debug.Log("MasterIndex.Lookup", "id %v not found in any index", id.Str())
return backend.ID{}, pack.Data, 0, 0, fmt.Errorf("id %v not found in any index", id) return PackedBlob{}, fmt.Errorf("id %v not found in any index", id)
} }
// LookupSize queries all known Indexes for the ID and returns the first match. // LookupSize queries all known Indexes for the ID and returns the first match.

View File

@ -92,32 +92,33 @@ func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, erro
func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) { func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) {
debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str()) debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
// lookup pack // lookup pack
packID, tpe, offset, length, err := r.idx.Lookup(id) blob, err := r.idx.Lookup(id)
if err != nil { if err != nil {
debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err) debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err)
return nil, err return nil, err
} }
if length > uint(cap(plaintextBuf))+crypto.Extension { plaintextBufSize := uint(cap(plaintextBuf))
return nil, fmt.Errorf("buf is too small, need %d more bytes", length-uint(cap(plaintextBuf))-crypto.Extension) if blob.PlaintextLength() > plaintextBufSize {
return nil, fmt.Errorf("buf is too small, need %d more bytes", blob.PlaintextLength()-plaintextBufSize)
} }
if tpe != t { if blob.Type != t {
debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, tpe) debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, blob.Type)
return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", tpe, t) return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", blob.Type, t)
} }
debug.Log("Repo.LoadBlob", "id %v found in pack %v at offset %v (length %d)", id.Str(), packID.Str(), offset, length) debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob)
// load blob from pack // load blob from pack
rd, err := r.be.GetReader(backend.Data, packID.String(), offset, length) rd, err := r.be.GetReader(backend.Data, blob.PackID.String(), blob.Offset, blob.Length)
if err != nil { if err != nil {
debug.Log("Repo.LoadBlob", "error loading pack %v for %v: %v", packID.Str(), id.Str(), err) debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err)
return nil, err return nil, err
} }
// make buffer that is large enough for the complete blob // make buffer that is large enough for the complete blob
ciphertextBuf := make([]byte, length) ciphertextBuf := make([]byte, blob.Length)
_, err = io.ReadFull(rd, ciphertextBuf) _, err = io.ReadFull(rd, ciphertextBuf)
if err != nil { if err != nil {
return nil, err return nil, err
@ -173,13 +174,13 @@ func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interf
// data and afterwards call json.Unmarshal on the item. // data and afterwards call json.Unmarshal on the item.
func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) error { func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) error {
// lookup pack // lookup pack
packID, _, offset, length, err := r.idx.Lookup(id) blob, err := r.idx.Lookup(id)
if err != nil { if err != nil {
return err return err
} }
// load blob from pack // load blob from pack
rd, err := r.be.GetReader(backend.Data, packID.String(), offset, length) rd, err := r.be.GetReader(backend.Data, blob.PackID.String(), blob.Offset, blob.Length)
if err != nil { if err != nil {
return err return err
} }