2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-27 12:42:49 +00:00

Refactor Index.Lookup() to return struct PackedBlob

This commit is contained in:
Alexander Neumann 2015-10-31 14:47:42 +01:00
parent fc0f5d8f72
commit fccde030d5
6 changed files with 90 additions and 71 deletions

View File

@ -162,16 +162,16 @@ func (cmd CmdCat) Execute(args []string) error {
return err
case "blob":
_, blobType, _, length, err := repo.Index().Lookup(id)
blob, err := repo.Index().Lookup(id)
if err != nil {
return err
}
if blobType != pack.Data {
if blob.Type != pack.Data {
return errors.New("wrong type for blob")
}
buf := make([]byte, length)
buf := make([]byte, blob.Length)
data, err := repo.LoadBlob(pack.Data, id, buf)
if err != nil {
return err

View File

@ -212,14 +212,14 @@ func (node Node) createFileAt(path string, repo *repository.Repository) error {
var buf []byte
for _, id := range node.Content {
_, _, _, length, err := repo.Index().Lookup(id)
blob, err := repo.Index().Lookup(id)
if err != nil {
return err
}
buf = buf[:cap(buf)]
if uint(len(buf)) < length {
buf = make([]byte, length)
if uint(len(buf)) < blob.Length {
buf = make([]byte, blob.Length)
}
buf, err := repo.LoadBlob(pack.Data, id, buf)

View File

@ -109,24 +109,32 @@ func (idx *Index) Store(t pack.BlobType, id backend.ID, pack backend.ID, offset,
idx.store(t, id, pack, offset, length)
}
// Lookup returns the pack for the id.
func (idx *Index) Lookup(id backend.ID) (packID backend.ID, tpe pack.BlobType, offset, length uint, err error) {
// Lookup queries the index for the blob ID and returns a PackedBlob.
func (idx *Index) Lookup(id backend.ID) (pb PackedBlob, err error) {
idx.m.Lock()
defer idx.m.Unlock()
if p, ok := idx.pack[id]; ok {
debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d",
id.Str(), p.packID.Str(), p.offset, p.length)
return p.packID, p.tpe, p.offset, p.length, nil
pb := PackedBlob{
Type: p.tpe,
Length: p.length,
ID: id,
Offset: p.offset,
PackID: p.packID,
}
return pb, nil
}
debug.Log("Index.Lookup", "id %v not found", id.Str())
return backend.ID{}, pack.Data, 0, 0, fmt.Errorf("id %v not found in index", id)
return PackedBlob{}, fmt.Errorf("id %v not found in index", id)
}
// Has returns true iff the id is listed in the index.
func (idx *Index) Has(id backend.ID) bool {
_, _, _, _, err := idx.Lookup(id)
_, err := idx.Lookup(id)
if err == nil {
return true
}
@ -137,11 +145,11 @@ func (idx *Index) Has(id backend.ID) bool {
// LookupSize returns the length of the cleartext content behind the
// given id
func (idx *Index) LookupSize(id backend.ID) (cleartextLength uint, err error) {
_, _, _, encryptedLength, err := idx.Lookup(id)
blob, err := idx.Lookup(id)
if err != nil {
return 0, err
}
return encryptedLength - crypto.Extension, nil
return blob.PlaintextLength(), nil
}
// Merge loads all items from other into idx.
@ -181,10 +189,23 @@ func (idx *Index) AddToSupersedes(ids ...backend.ID) error {
// PackedBlob is a blob already saved within a pack.
type PackedBlob struct {
pack.Blob
Type pack.BlobType
Length uint
ID backend.ID
Offset uint
PackID backend.ID
}
func (pb PackedBlob) String() string {
return fmt.Sprintf("<PackedBlob %v type %v in pack %v: len %v, offset %v",
pb.ID.Str(), pb.Type, pb.PackID.Str(), pb.Length, pb.Offset)
}
// PlaintextLength returns the number of bytes the blob's plaintext occupies.
func (pb PackedBlob) PlaintextLength() uint {
return pb.Length - crypto.Extension
}
// Each returns a channel that yields all blobs known to the index. If done is
// closed, the background goroutine terminates. This blocks any modification of
// the index.
@ -204,12 +225,10 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob {
case <-done:
return
case ch <- PackedBlob{
Blob: pack.Blob{
ID: id,
Offset: blob.offset,
Type: blob.tpe,
Length: blob.length,
},
ID: id,
Offset: blob.offset,
Type: blob.tpe,
Length: blob.length,
PackID: blob.packID,
}:
}

View File

@ -69,21 +69,21 @@ func TestIndexSerialize(t *testing.T) {
OK(t, err)
for _, testBlob := range tests {
packID, tpe, offset, length, err := idx.Lookup(testBlob.id)
result, err := idx.Lookup(testBlob.id)
OK(t, err)
Equals(t, testBlob.pack, packID)
Equals(t, testBlob.tpe, tpe)
Equals(t, testBlob.offset, offset)
Equals(t, testBlob.length, length)
Equals(t, testBlob.pack, result.PackID)
Equals(t, testBlob.tpe, result.Type)
Equals(t, testBlob.offset, result.Offset)
Equals(t, testBlob.length, result.Length)
packID, tpe, offset, length, err = idx2.Lookup(testBlob.id)
result2, err := idx2.Lookup(testBlob.id)
OK(t, err)
Equals(t, testBlob.pack, packID)
Equals(t, testBlob.tpe, tpe)
Equals(t, testBlob.offset, offset)
Equals(t, testBlob.length, length)
Equals(t, testBlob.pack, result2.PackID)
Equals(t, testBlob.tpe, result2.Type)
Equals(t, testBlob.offset, result2.Offset)
Equals(t, testBlob.length, result2.Length)
}
// add more blobs to idx
@ -126,13 +126,13 @@ func TestIndexSerialize(t *testing.T) {
// all new blobs must be in the index
for _, testBlob := range newtests {
packID, tpe, offset, length, err := idx3.Lookup(testBlob.id)
blob, err := idx3.Lookup(testBlob.id)
OK(t, err)
Equals(t, testBlob.pack, packID)
Equals(t, testBlob.tpe, tpe)
Equals(t, testBlob.offset, offset)
Equals(t, testBlob.length, length)
Equals(t, testBlob.pack, blob.PackID)
Equals(t, testBlob.tpe, blob.Type)
Equals(t, testBlob.offset, blob.Offset)
Equals(t, testBlob.length, blob.Length)
}
}
@ -247,13 +247,13 @@ func TestIndexUnserialize(t *testing.T) {
OK(t, err)
for _, test := range exampleTests {
packID, tpe, offset, length, err := idx.Lookup(test.id)
blob, err := idx.Lookup(test.id)
OK(t, err)
Equals(t, test.packID, packID)
Equals(t, test.tpe, tpe)
Equals(t, test.offset, offset)
Equals(t, test.length, length)
Equals(t, test.packID, blob.PackID)
Equals(t, test.tpe, blob.Type)
Equals(t, test.offset, blob.Offset)
Equals(t, test.length, blob.Length)
}
Equals(t, oldIdx, idx.Supersedes())
@ -264,13 +264,13 @@ func TestIndexUnserializeOld(t *testing.T) {
OK(t, err)
for _, test := range exampleTests {
packID, tpe, offset, length, err := idx.Lookup(test.id)
blob, err := idx.Lookup(test.id)
OK(t, err)
Equals(t, test.packID, packID)
Equals(t, test.tpe, tpe)
Equals(t, test.offset, offset)
Equals(t, test.length, length)
Equals(t, test.packID, blob.PackID)
Equals(t, test.tpe, blob.Type)
Equals(t, test.offset, blob.Offset)
Equals(t, test.length, blob.Length)
}
Equals(t, 0, len(idx.Supersedes()))
@ -310,17 +310,17 @@ func TestConvertIndex(t *testing.T) {
"Index %v count blobs %v: %v != %v", id.Str(), pack.Tree, idx.Count(pack.Tree), oldIndex.Count(pack.Tree))
for packedBlob := range idx.Each(nil) {
packID, tpe, offset, length, err := oldIndex.Lookup(packedBlob.ID)
blob, err := oldIndex.Lookup(packedBlob.ID)
OK(t, err)
Assert(t, packID == packedBlob.PackID,
"Check blob %v: pack ID %v != %v", packedBlob.ID, packID, packedBlob.PackID)
Assert(t, tpe == packedBlob.Type,
"Check blob %v: Type %v != %v", packedBlob.ID, tpe, packedBlob.Type)
Assert(t, offset == packedBlob.Offset,
"Check blob %v: Type %v != %v", packedBlob.ID, offset, packedBlob.Offset)
Assert(t, length == packedBlob.Length,
"Check blob %v: Type %v != %v", packedBlob.ID, length, packedBlob.Length)
Assert(t, blob.PackID == packedBlob.PackID,
"Check blob %v: pack ID %v != %v", packedBlob.ID, blob.PackID, packedBlob.PackID)
Assert(t, blob.Type == packedBlob.Type,
"Check blob %v: Type %v != %v", packedBlob.ID, blob.Type, packedBlob.Type)
Assert(t, blob.Offset == packedBlob.Offset,
"Check blob %v: Type %v != %v", packedBlob.ID, blob.Offset, packedBlob.Offset)
Assert(t, blob.Length == packedBlob.Length,
"Check blob %v: Type %v != %v", packedBlob.ID, blob.Length, packedBlob.Length)
}
}
})

View File

@ -33,24 +33,23 @@ func NewMasterIndex() *MasterIndex {
}
// Lookup queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) Lookup(id backend.ID) (packID backend.ID, tpe pack.BlobType, offset, length uint, err error) {
func (mi *MasterIndex) Lookup(id backend.ID) (blob PackedBlob, err error) {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
debug.Log("MasterIndex.Lookup", "looking up id %v", id.Str())
for _, idx := range mi.idx {
packID, tpe, offset, length, err = idx.Lookup(id)
blob, err = idx.Lookup(id)
if err == nil {
debug.Log("MasterIndex.Lookup",
"found id %v in pack %v at offset %d with length %d",
id.Str(), packID.Str(), offset, length)
"found id %v: %v", blob)
return
}
}
debug.Log("MasterIndex.Lookup", "id %v not found in any index", id.Str())
return backend.ID{}, pack.Data, 0, 0, fmt.Errorf("id %v not found in any index", id)
return PackedBlob{}, fmt.Errorf("id %v not found in any index", id)
}
// LookupSize queries all known Indexes for the ID and returns the first match.

View File

@ -92,32 +92,33 @@ func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, erro
func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) {
debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
// lookup pack
packID, tpe, offset, length, err := r.idx.Lookup(id)
blob, err := r.idx.Lookup(id)
if err != nil {
debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err)
return nil, err
}
if length > uint(cap(plaintextBuf))+crypto.Extension {
return nil, fmt.Errorf("buf is too small, need %d more bytes", length-uint(cap(plaintextBuf))-crypto.Extension)
plaintextBufSize := uint(cap(plaintextBuf))
if blob.PlaintextLength() > plaintextBufSize {
return nil, fmt.Errorf("buf is too small, need %d more bytes", blob.PlaintextLength()-plaintextBufSize)
}
if tpe != t {
debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, tpe)
return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", tpe, t)
if blob.Type != t {
debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, blob.Type)
return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", blob.Type, t)
}
debug.Log("Repo.LoadBlob", "id %v found in pack %v at offset %v (length %d)", id.Str(), packID.Str(), offset, length)
debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob)
// load blob from pack
rd, err := r.be.GetReader(backend.Data, packID.String(), offset, length)
rd, err := r.be.GetReader(backend.Data, blob.PackID.String(), blob.Offset, blob.Length)
if err != nil {
debug.Log("Repo.LoadBlob", "error loading pack %v for %v: %v", packID.Str(), id.Str(), err)
debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err)
return nil, err
}
// make buffer that is large enough for the complete blob
ciphertextBuf := make([]byte, length)
ciphertextBuf := make([]byte, blob.Length)
_, err = io.ReadFull(rd, ciphertextBuf)
if err != nil {
return nil, err
@ -173,13 +174,13 @@ func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interf
// data and afterwards call json.Unmarshal on the item.
func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) error {
// lookup pack
packID, _, offset, length, err := r.idx.Lookup(id)
blob, err := r.idx.Lookup(id)
if err != nil {
return err
}
// load blob from pack
rd, err := r.be.GetReader(backend.Data, packID.String(), offset, length)
rd, err := r.be.GetReader(backend.Data, blob.PackID.String(), blob.Offset, blob.Length)
if err != nil {
return err
}