mirror of
https://github.com/octoleo/restic.git
synced 2024-11-22 12:55:18 +00:00
Refactor Index.Store() to take a PackedBlob
This commit is contained in:
parent
f3f84b1544
commit
1fc0d78913
@ -90,7 +90,7 @@ func (cmd CmdRebuildIndex) RebuildIndex() error {
|
||||
}
|
||||
|
||||
blobsDone[b] = struct{}{}
|
||||
combinedIndex.Store(packedBlob.Type, packedBlob.ID, packedBlob.PackID, packedBlob.Offset, packedBlob.Length)
|
||||
combinedIndex.Store(packedBlob)
|
||||
}
|
||||
|
||||
combinedIndex.AddToSupersedes(indexID)
|
||||
@ -162,7 +162,13 @@ func (cmd CmdRebuildIndex) RebuildIndex() error {
|
||||
|
||||
for _, blob := range up.Entries {
|
||||
debug.Log("RebuildIndex.RebuildIndex", "pack %v: blob %v", packID.Str(), blob)
|
||||
combinedIndex.Store(blob.Type, blob.ID, packID, blob.Offset, blob.Length)
|
||||
combinedIndex.Store(repository.PackedBlob{
|
||||
Type: blob.Type,
|
||||
ID: blob.ID,
|
||||
PackID: packID,
|
||||
Offset: blob.Offset,
|
||||
Length: blob.Length,
|
||||
})
|
||||
}
|
||||
|
||||
err = rd.Close()
|
||||
|
@ -40,12 +40,12 @@ func NewIndex() *Index {
|
||||
}
|
||||
}
|
||||
|
||||
func (idx *Index) store(t pack.BlobType, id backend.ID, pack backend.ID, offset, length uint) {
|
||||
idx.pack[id] = indexEntry{
|
||||
tpe: t,
|
||||
packID: pack,
|
||||
offset: offset,
|
||||
length: length,
|
||||
func (idx *Index) store(blob PackedBlob) {
|
||||
idx.pack[blob.ID] = indexEntry{
|
||||
tpe: blob.Type,
|
||||
packID: blob.PackID,
|
||||
offset: blob.Offset,
|
||||
length: blob.Length,
|
||||
}
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ var IndexFull = func(idx *Index) bool {
|
||||
|
||||
// Store remembers the id and pack in the index. An existing entry will be
|
||||
// silently overwritten.
|
||||
func (idx *Index) Store(t pack.BlobType, id backend.ID, pack backend.ID, offset, length uint) {
|
||||
func (idx *Index) Store(blob PackedBlob) {
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
@ -104,10 +104,9 @@ func (idx *Index) Store(t pack.BlobType, id backend.ID, pack backend.ID, offset,
|
||||
panic("store new item in finalized index")
|
||||
}
|
||||
|
||||
debug.Log("Index.Store", "pack %v contains id %v (%v), offset %v, length %v",
|
||||
pack.Str(), id.Str(), t, offset, length)
|
||||
debug.Log("Index.Store", "%v", blob)
|
||||
|
||||
idx.store(t, id, pack, offset, length)
|
||||
idx.store(blob)
|
||||
}
|
||||
|
||||
// Lookup queries the index for the blob ID and returns a PackedBlob.
|
||||
@ -489,7 +488,13 @@ func DecodeIndex(rd io.Reader) (idx *Index, err error) {
|
||||
idx = NewIndex()
|
||||
for _, pack := range idxJSON.Packs {
|
||||
for _, blob := range pack.Blobs {
|
||||
idx.store(blob.Type, blob.ID, pack.ID, blob.Offset, blob.Length)
|
||||
idx.store(PackedBlob{
|
||||
Type: blob.Type,
|
||||
ID: blob.ID,
|
||||
Offset: blob.Offset,
|
||||
Length: blob.Length,
|
||||
PackID: pack.ID,
|
||||
})
|
||||
}
|
||||
}
|
||||
idx.supersedes = idxJSON.Supersedes
|
||||
@ -514,7 +519,13 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
|
||||
idx = NewIndex()
|
||||
for _, pack := range list {
|
||||
for _, blob := range pack.Blobs {
|
||||
idx.store(blob.Type, blob.ID, pack.ID, blob.Offset, blob.Length)
|
||||
idx.store(PackedBlob{
|
||||
Type: blob.Type,
|
||||
ID: blob.ID,
|
||||
PackID: pack.ID,
|
||||
Offset: blob.Offset,
|
||||
Length: blob.Length,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,13 @@ func TestIndexSerialize(t *testing.T) {
|
||||
for j := 0; j < 20; j++ {
|
||||
id := randomID()
|
||||
length := uint(i*100 + j)
|
||||
idx.Store(pack.Data, id, packID, pos, length)
|
||||
idx.Store(repository.PackedBlob{
|
||||
Type: pack.Data,
|
||||
ID: id,
|
||||
PackID: packID,
|
||||
Offset: pos,
|
||||
Length: length,
|
||||
})
|
||||
|
||||
tests = append(tests, testEntry{
|
||||
id: id,
|
||||
@ -95,7 +101,13 @@ func TestIndexSerialize(t *testing.T) {
|
||||
for j := 0; j < 10; j++ {
|
||||
id := randomID()
|
||||
length := uint(i*100 + j)
|
||||
idx.Store(pack.Data, id, packID, pos, length)
|
||||
idx.Store(repository.PackedBlob{
|
||||
Type: pack.Data,
|
||||
ID: id,
|
||||
PackID: packID,
|
||||
Offset: pos,
|
||||
Length: length,
|
||||
})
|
||||
|
||||
newtests = append(newtests, testEntry{
|
||||
id: id,
|
||||
@ -154,7 +166,13 @@ func TestIndexSize(t *testing.T) {
|
||||
for j := 0; j < blobs; j++ {
|
||||
id := randomID()
|
||||
length := uint(i*100 + j)
|
||||
idx.Store(pack.Data, id, packID, pos, length)
|
||||
idx.Store(repository.PackedBlob{
|
||||
Type: pack.Data,
|
||||
ID: id,
|
||||
PackID: packID,
|
||||
Offset: pos,
|
||||
Length: length,
|
||||
})
|
||||
|
||||
pos += length
|
||||
}
|
||||
@ -361,7 +379,13 @@ func TestIndexPacks(t *testing.T) {
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
packID := randomID()
|
||||
idx.Store(pack.Data, randomID(), packID, 0, 23)
|
||||
idx.Store(repository.PackedBlob{
|
||||
Type: pack.Data,
|
||||
ID: randomID(),
|
||||
PackID: packID,
|
||||
Offset: 0,
|
||||
Length: 23,
|
||||
})
|
||||
|
||||
packs.Insert(packID)
|
||||
}
|
||||
|
@ -270,7 +270,13 @@ func (r *Repository) savePacker(p *pack.Packer) error {
|
||||
// update blobs in the index
|
||||
for _, b := range p.Blobs() {
|
||||
debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), sid.Str())
|
||||
r.idx.Current().Store(b.Type, b.ID, sid, b.Offset, uint(b.Length))
|
||||
r.idx.Current().Store(PackedBlob{
|
||||
Type: b.Type,
|
||||
ID: b.ID,
|
||||
PackID: sid,
|
||||
Offset: b.Offset,
|
||||
Length: uint(b.Length),
|
||||
})
|
||||
r.idx.RemoveFromInFlight(b.ID)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user