2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-26 06:46:34 +00:00

repository: remove unused index.Store

This commit is contained in:
Michael Eischer 2022-05-26 13:41:06 +02:00
parent 628ae799ca
commit fe5a8e137a
4 changed files with 30 additions and 48 deletions

View File

@ -131,27 +131,6 @@ var IndexFull = func(idx *Index, compress bool) bool {
}
// Store remembers the id and pack in the index.
func (idx *Index) Store(pb restic.PackedBlob) {
idx.m.Lock()
defer idx.m.Unlock()
if idx.final {
panic("store new item in finalized index")
}
debug.Log("%v", pb)
// get packIndex and save if new packID
packIndex, ok := idx.packIDToIndex[pb.PackID]
if !ok {
packIndex = idx.addToPacks(pb.PackID)
idx.packIDToIndex[pb.PackID] = packIndex
}
idx.store(packIndex, pb.Blob)
}
// StorePack remembers the ids of all blobs of a given pack
// in the index
func (idx *Index) StorePack(id restic.ID, blobs []restic.Blob) {

View File

@ -19,6 +19,7 @@ func TestIndexSerialize(t *testing.T) {
// create 50 packs with 20 blobs each
for i := 0; i < 50; i++ {
packID := restic.NewRandomID()
var blobs []restic.Blob
pos := uint(0)
for j := 0; j < 20; j++ {
@ -37,10 +38,11 @@ func TestIndexSerialize(t *testing.T) {
},
PackID: packID,
}
idx.Store(pb)
blobs = append(blobs, pb.Blob)
tests = append(tests, pb)
pos += length
}
idx.StorePack(packID, blobs)
}
wr := bytes.NewBuffer(nil)
@ -83,6 +85,7 @@ func TestIndexSerialize(t *testing.T) {
newtests := []restic.PackedBlob{}
for i := 0; i < 10; i++ {
packID := restic.NewRandomID()
var blobs []restic.Blob
pos := uint(0)
for j := 0; j < 10; j++ {
@ -95,10 +98,11 @@ func TestIndexSerialize(t *testing.T) {
},
PackID: packID,
}
idx.Store(pb)
blobs = append(blobs, pb.Blob)
newtests = append(newtests, pb)
pos += length
}
idx.StorePack(packID, blobs)
}
// finalize; serialize idx, unserialize to idx3
@ -141,24 +145,23 @@ func TestIndexSize(t *testing.T) {
idx := repository.NewIndex()
packs := 200
blobs := 100
blobCount := 100
for i := 0; i < packs; i++ {
packID := restic.NewRandomID()
var blobs []restic.Blob
pos := uint(0)
for j := 0; j < blobs; j++ {
for j := 0; j < blobCount; j++ {
length := uint(i*100 + j)
idx.Store(restic.PackedBlob{
Blob: restic.Blob{
BlobHandle: restic.NewRandomBlobHandle(),
Offset: pos,
Length: length,
},
PackID: packID,
blobs = append(blobs, restic.Blob{
BlobHandle: restic.NewRandomBlobHandle(),
Offset: pos,
Length: length,
})
pos += length
}
idx.StorePack(packID, blobs)
}
wr := bytes.NewBuffer(nil)
@ -166,7 +169,7 @@ func TestIndexSize(t *testing.T) {
err := idx.Encode(wr)
rtest.OK(t, err)
t.Logf("Index file size for %d blobs in %d packs is %d", blobs*packs, packs, wr.Len())
t.Logf("Index file size for %d blobs in %d packs is %d", blobCount*packs, packs, wr.Len())
}
// example index serialization from doc/Design.rst
@ -419,13 +422,12 @@ func TestIndexPacks(t *testing.T) {
for i := 0; i < 20; i++ {
packID := restic.NewRandomID()
idx.Store(restic.PackedBlob{
Blob: restic.Blob{
idx.StorePack(packID, []restic.Blob{
{
BlobHandle: restic.NewRandomBlobHandle(),
Offset: 0,
Length: 23,
},
PackID: packID,
})
packs.Insert(packID)
@ -529,6 +531,7 @@ func TestIndexHas(t *testing.T) {
// create 50 packs with 20 blobs each
for i := 0; i < 50; i++ {
packID := restic.NewRandomID()
var blobs []restic.Blob
pos := uint(0)
for j := 0; j < 20; j++ {
@ -547,10 +550,11 @@ func TestIndexHas(t *testing.T) {
},
PackID: packID,
}
idx.Store(pb)
blobs = append(blobs, pb.Blob)
tests = append(tests, pb)
pos += length
}
idx.StorePack(packID, blobs)
}
for _, testBlob := range tests {

View File

@ -57,12 +57,12 @@ func TestMasterIndex(t *testing.T) {
}
idx1 := repository.NewIndex()
idx1.Store(blob1)
idx1.Store(blob12a)
idx1.StorePack(blob1.PackID, []restic.Blob{blob1.Blob})
idx1.StorePack(blob12a.PackID, []restic.Blob{blob12a.Blob})
idx2 := repository.NewIndex()
idx2.Store(blob2)
idx2.Store(blob12b)
idx2.StorePack(blob2.PackID, []restic.Blob{blob2.Blob})
idx2.StorePack(blob12b.PackID, []restic.Blob{blob12b.Blob})
mIdx := repository.NewMasterIndex()
mIdx.Insert(idx1)
@ -154,10 +154,10 @@ func TestMasterMergeFinalIndexes(t *testing.T) {
}
idx1 := repository.NewIndex()
idx1.Store(blob1)
idx1.StorePack(blob1.PackID, []restic.Blob{blob1.Blob})
idx2 := repository.NewIndex()
idx2.Store(blob2)
idx2.StorePack(blob2.PackID, []restic.Blob{blob2.Blob})
mIdx := repository.NewMasterIndex()
mIdx.Insert(idx1)
@ -191,8 +191,8 @@ func TestMasterMergeFinalIndexes(t *testing.T) {
// merge another index containing identical blobs
idx3 := repository.NewIndex()
idx3.Store(blob1)
idx3.Store(blob2)
idx3.StorePack(blob1.PackID, []restic.Blob{blob1.Blob})
idx3.StorePack(blob2.PackID, []restic.Blob{blob2.Blob})
mIdx.Insert(idx3)
finalIndexes = mIdx.FinalizeNotFinalIndexes()

View File

@ -362,13 +362,12 @@ func benchmarkLoadIndex(b *testing.B, version uint) {
idx := repository.NewIndex()
for i := 0; i < 5000; i++ {
idx.Store(restic.PackedBlob{
Blob: restic.Blob{
idx.StorePack(restic.NewRandomID(), []restic.Blob{
{
BlobHandle: restic.NewRandomBlobHandle(),
Length: 1234,
Offset: 1235,
},
PackID: restic.NewRandomID(),
})
}