2022-06-12 12:43:43 +00:00
|
|
|
package index
|
2015-10-12 20:34:12 +00:00
|
|
|
|
|
|
|
import (
|
2022-05-26 10:38:18 +00:00
|
|
|
"bytes"
|
2017-06-18 12:45:02 +00:00
|
|
|
"context"
|
2021-01-30 15:35:05 +00:00
|
|
|
"fmt"
|
2021-08-07 22:38:17 +00:00
|
|
|
"runtime"
|
2015-10-12 20:34:12 +00:00
|
|
|
"sync"
|
|
|
|
|
2017-07-23 12:21:03 +00:00
|
|
|
"github.com/restic/restic/internal/debug"
|
2020-11-04 13:11:29 +00:00
|
|
|
"github.com/restic/restic/internal/restic"
|
2024-05-19 13:37:54 +00:00
|
|
|
"github.com/restic/restic/internal/ui/progress"
|
2020-11-12 01:49:53 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2015-10-12 20:34:12 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// MasterIndex is a collection of indexes and IDs of chunks that are in the process of being saved.
|
|
|
|
type MasterIndex struct {
|
2020-06-06 20:20:44 +00:00
|
|
|
idx []*Index
|
|
|
|
pendingBlobs restic.BlobSet
|
|
|
|
idxMutex sync.RWMutex
|
2022-04-10 10:20:15 +00:00
|
|
|
compress bool
|
2015-10-12 20:34:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewMasterIndex creates a new master index.
|
|
|
|
func NewMasterIndex() *MasterIndex {
|
2024-05-19 18:36:16 +00:00
|
|
|
mi := &MasterIndex{pendingBlobs: restic.NewBlobSet()}
|
|
|
|
mi.clear()
|
|
|
|
return mi
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mi *MasterIndex) clear() {
|
2020-07-04 05:06:14 +00:00
|
|
|
// Always add an empty final index, such that MergeFinalIndexes can merge into this.
|
2024-05-19 18:36:16 +00:00
|
|
|
mi.idx = []*Index{NewIndex()}
|
|
|
|
mi.idx[0].Finalize()
|
2015-10-12 20:34:12 +00:00
|
|
|
}
|
|
|
|
|
2022-06-12 12:43:43 +00:00
|
|
|
func (mi *MasterIndex) MarkCompressed() {
|
2022-04-10 10:20:15 +00:00
|
|
|
mi.compress = true
|
|
|
|
}
|
|
|
|
|
2020-06-14 11:26:10 +00:00
|
|
|
// Lookup queries all known Indexes for the ID and returns all matches.
|
2020-11-05 21:18:00 +00:00
|
|
|
func (mi *MasterIndex) Lookup(bh restic.BlobHandle) (pbs []restic.PackedBlob) {
|
2015-10-12 20:34:12 +00:00
|
|
|
mi.idxMutex.RLock()
|
|
|
|
defer mi.idxMutex.RUnlock()
|
|
|
|
|
|
|
|
for _, idx := range mi.idx {
|
2020-11-05 21:18:00 +00:00
|
|
|
pbs = idx.Lookup(bh, pbs)
|
2015-10-12 20:34:12 +00:00
|
|
|
}
|
|
|
|
|
2020-11-05 21:00:41 +00:00
|
|
|
return pbs
|
2015-10-12 20:34:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// LookupSize queries all known Indexes for the ID and returns the first match.
|
2020-11-05 21:18:00 +00:00
|
|
|
func (mi *MasterIndex) LookupSize(bh restic.BlobHandle) (uint, bool) {
|
2015-10-12 20:34:12 +00:00
|
|
|
mi.idxMutex.RLock()
|
|
|
|
defer mi.idxMutex.RUnlock()
|
|
|
|
|
|
|
|
for _, idx := range mi.idx {
|
2020-11-05 21:18:00 +00:00
|
|
|
if size, found := idx.LookupSize(bh); found {
|
2018-01-12 06:20:12 +00:00
|
|
|
return size, found
|
2015-10-12 20:34:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-12 06:20:12 +00:00
|
|
|
return 0, false
|
2015-10-12 20:34:12 +00:00
|
|
|
}
|
|
|
|
|
2020-06-06 20:20:44 +00:00
|
|
|
// AddPending adds a given blob to list of pending Blobs
|
|
|
|
// Before doing so it checks if this blob is already known.
|
|
|
|
// Returns true if adding was successful and false if the blob
|
|
|
|
// was already known
|
2022-06-12 12:43:43 +00:00
|
|
|
func (mi *MasterIndex) AddPending(bh restic.BlobHandle) bool {
|
2020-06-06 20:20:44 +00:00
|
|
|
|
|
|
|
mi.idxMutex.Lock()
|
|
|
|
defer mi.idxMutex.Unlock()
|
|
|
|
|
|
|
|
// Check if blob is pending or in index
|
2020-11-05 21:18:00 +00:00
|
|
|
if mi.pendingBlobs.Has(bh) {
|
2020-06-06 20:20:44 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, idx := range mi.idx {
|
2020-11-05 21:18:00 +00:00
|
|
|
if idx.Has(bh) {
|
2020-06-06 20:20:44 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// really not known -> insert
|
2020-11-05 21:18:00 +00:00
|
|
|
mi.pendingBlobs.Insert(bh)
|
2020-06-06 20:20:44 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-10-12 20:34:12 +00:00
|
|
|
// Has queries all known Indexes for the ID and returns the first match.
|
2020-06-06 20:20:44 +00:00
|
|
|
// Also returns true if the ID is pending.
|
2020-11-05 21:18:00 +00:00
|
|
|
func (mi *MasterIndex) Has(bh restic.BlobHandle) bool {
|
2015-10-12 20:34:12 +00:00
|
|
|
mi.idxMutex.RLock()
|
|
|
|
defer mi.idxMutex.RUnlock()
|
|
|
|
|
2020-06-06 20:20:44 +00:00
|
|
|
// also return true if blob is pending
|
2020-11-05 21:18:00 +00:00
|
|
|
if mi.pendingBlobs.Has(bh) {
|
2020-06-06 20:20:44 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-10-12 20:34:12 +00:00
|
|
|
for _, idx := range mi.idx {
|
2020-11-05 21:18:00 +00:00
|
|
|
if idx.Has(bh) {
|
2015-10-12 20:34:12 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-08-16 14:00:36 +00:00
|
|
|
// IDs returns the IDs of all indexes contained in the index.
|
|
|
|
func (mi *MasterIndex) IDs() restic.IDSet {
|
|
|
|
mi.idxMutex.RLock()
|
|
|
|
defer mi.idxMutex.RUnlock()
|
|
|
|
|
|
|
|
ids := restic.NewIDSet()
|
|
|
|
for _, idx := range mi.idx {
|
|
|
|
if !idx.Final() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
indexIDs, err := idx.IDs()
|
|
|
|
if err != nil {
|
|
|
|
debug.Log("not using index, ID() returned error %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, id := range indexIDs {
|
|
|
|
ids.Insert(id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ids
|
|
|
|
}
|
|
|
|
|
2020-10-10 05:42:22 +00:00
|
|
|
// Packs returns all packs that are covered by the index.
|
2020-12-22 22:01:28 +00:00
|
|
|
// If packBlacklist is given, those packs are only contained in the
|
|
|
|
// resulting IDSet if they are contained in a non-final (newly written) index.
|
2020-12-05 12:59:18 +00:00
|
|
|
func (mi *MasterIndex) Packs(packBlacklist restic.IDSet) restic.IDSet {
|
2020-10-10 05:42:22 +00:00
|
|
|
mi.idxMutex.RLock()
|
|
|
|
defer mi.idxMutex.RUnlock()
|
|
|
|
|
|
|
|
packs := restic.NewIDSet()
|
|
|
|
for _, idx := range mi.idx {
|
2020-12-05 12:59:18 +00:00
|
|
|
idxPacks := idx.Packs()
|
2022-08-19 19:10:43 +00:00
|
|
|
if idx.final && len(packBlacklist) > 0 {
|
2020-12-05 12:59:18 +00:00
|
|
|
idxPacks = idxPacks.Sub(packBlacklist)
|
|
|
|
}
|
|
|
|
packs.Merge(idxPacks)
|
2020-10-10 05:42:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return packs
|
|
|
|
}
|
|
|
|
|
2015-10-12 20:34:12 +00:00
|
|
|
// Insert adds a new index to the MasterIndex.
|
|
|
|
func (mi *MasterIndex) Insert(idx *Index) {
|
|
|
|
mi.idxMutex.Lock()
|
|
|
|
defer mi.idxMutex.Unlock()
|
|
|
|
|
|
|
|
mi.idx = append(mi.idx, idx)
|
|
|
|
}
|
|
|
|
|
2020-07-28 20:24:43 +00:00
|
|
|
// StorePack remembers the id and pack in the index.
|
2020-06-06 20:20:44 +00:00
|
|
|
func (mi *MasterIndex) StorePack(id restic.ID, blobs []restic.Blob) {
|
2017-10-07 12:11:42 +00:00
|
|
|
mi.idxMutex.Lock()
|
|
|
|
defer mi.idxMutex.Unlock()
|
|
|
|
|
2020-06-06 20:20:44 +00:00
|
|
|
// delete blobs from pending
|
|
|
|
for _, blob := range blobs {
|
|
|
|
mi.pendingBlobs.Delete(restic.BlobHandle{Type: blob.Type, ID: blob.ID})
|
|
|
|
}
|
|
|
|
|
2015-10-12 20:34:12 +00:00
|
|
|
for _, idx := range mi.idx {
|
|
|
|
if !idx.Final() {
|
2020-06-06 20:20:44 +00:00
|
|
|
idx.StorePack(id, blobs)
|
2017-01-02 13:14:51 +00:00
|
|
|
return
|
2015-10-12 20:34:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
newIdx := NewIndex()
|
2020-06-06 20:20:44 +00:00
|
|
|
newIdx.StorePack(id, blobs)
|
2015-10-12 20:34:12 +00:00
|
|
|
mi.idx = append(mi.idx, newIdx)
|
|
|
|
}
|
|
|
|
|
2022-05-26 13:43:04 +00:00
|
|
|
// finalizeNotFinalIndexes finalizes all indexes that
|
2020-06-06 20:20:44 +00:00
|
|
|
// have not yet been saved and returns that list
|
2022-05-26 13:43:04 +00:00
|
|
|
func (mi *MasterIndex) finalizeNotFinalIndexes() []*Index {
|
2015-10-12 20:34:12 +00:00
|
|
|
mi.idxMutex.Lock()
|
|
|
|
defer mi.idxMutex.Unlock()
|
|
|
|
|
|
|
|
var list []*Index
|
|
|
|
|
|
|
|
for _, idx := range mi.idx {
|
|
|
|
if !idx.Final() {
|
2020-06-06 20:20:44 +00:00
|
|
|
idx.Finalize()
|
2015-10-12 20:34:12 +00:00
|
|
|
list = append(list, idx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("return %d indexes", len(list))
|
2015-10-12 21:59:17 +00:00
|
|
|
return list
|
|
|
|
}
|
|
|
|
|
2022-05-26 13:43:04 +00:00
|
|
|
// finalizeFullIndexes finalizes all indexes that are full and returns that list.
|
|
|
|
func (mi *MasterIndex) finalizeFullIndexes() []*Index {
|
2015-10-12 21:59:17 +00:00
|
|
|
mi.idxMutex.Lock()
|
|
|
|
defer mi.idxMutex.Unlock()
|
|
|
|
|
|
|
|
var list []*Index
|
|
|
|
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("checking %d indexes", len(mi.idx))
|
2015-10-12 21:59:17 +00:00
|
|
|
for _, idx := range mi.idx {
|
|
|
|
if idx.Final() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-04-10 10:20:15 +00:00
|
|
|
if IndexFull(idx, mi.compress) {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("index %p is full", idx)
|
2020-06-06 20:20:44 +00:00
|
|
|
idx.Finalize()
|
2015-10-12 21:59:17 +00:00
|
|
|
list = append(list, idx)
|
|
|
|
} else {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("index %p not full", idx)
|
2015-10-12 21:59:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("return %d indexes", len(list))
|
2015-10-12 20:34:12 +00:00
|
|
|
return list
|
|
|
|
}
|
|
|
|
|
2022-08-19 18:04:39 +00:00
|
|
|
// Each runs fn on all blobs known to the index. When the context is cancelled,
|
|
|
|
// the index iteration return immediately. This blocks any modification of the index.
|
2024-04-05 20:20:14 +00:00
|
|
|
func (mi *MasterIndex) Each(ctx context.Context, fn func(restic.PackedBlob)) error {
|
2017-06-18 12:45:02 +00:00
|
|
|
mi.idxMutex.RLock()
|
2022-08-19 18:04:39 +00:00
|
|
|
defer mi.idxMutex.RUnlock()
|
2017-06-18 12:45:02 +00:00
|
|
|
|
2022-08-19 18:04:39 +00:00
|
|
|
for _, idx := range mi.idx {
|
2024-04-05 20:20:14 +00:00
|
|
|
if err := idx.Each(ctx, fn); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-08-19 18:04:39 +00:00
|
|
|
}
|
2024-04-05 20:20:14 +00:00
|
|
|
return nil
|
2017-06-18 12:45:02 +00:00
|
|
|
}
|
|
|
|
|
2020-07-04 05:06:14 +00:00
|
|
|
// MergeFinalIndexes merges all final indexes together.
|
|
|
|
// After calling, there will be only one big final index in MasterIndex
|
|
|
|
// containing all final index contents.
|
|
|
|
// Indexes that are not final are left untouched.
|
|
|
|
// This merging can only be called after all index files are loaded - as
|
|
|
|
// removing of superseded index contents is only possible for unmerged indexes.
|
2021-01-30 15:35:05 +00:00
|
|
|
func (mi *MasterIndex) MergeFinalIndexes() error {
|
2020-07-04 05:06:14 +00:00
|
|
|
mi.idxMutex.Lock()
|
|
|
|
defer mi.idxMutex.Unlock()
|
|
|
|
|
|
|
|
// The first index is always final and the one to merge into
|
|
|
|
newIdx := mi.idx[:1]
|
|
|
|
for i := 1; i < len(mi.idx); i++ {
|
|
|
|
idx := mi.idx[i]
|
|
|
|
// clear reference in masterindex as it may become stale
|
|
|
|
mi.idx[i] = nil
|
2022-06-05 19:57:16 +00:00
|
|
|
// do not merge indexes that have no id set
|
|
|
|
ids, _ := idx.IDs()
|
|
|
|
if !idx.Final() || len(ids) == 0 {
|
2020-07-04 05:06:14 +00:00
|
|
|
newIdx = append(newIdx, idx)
|
|
|
|
} else {
|
2021-01-30 15:35:05 +00:00
|
|
|
err := mi.idx[0].merge(idx)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("MergeFinalIndexes: %w", err)
|
|
|
|
}
|
2020-07-04 05:06:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
mi.idx = newIdx
|
2021-01-30 15:35:05 +00:00
|
|
|
|
|
|
|
return nil
|
2020-07-04 05:06:14 +00:00
|
|
|
}
|
|
|
|
|
2024-05-19 13:37:54 +00:00
|
|
|
func (mi *MasterIndex) Load(ctx context.Context, r restic.ListerLoaderUnpacked, p *progress.Counter, cb func(id restic.ID, idx *Index, oldFormat bool, err error) error) error {
|
|
|
|
indexList, err := restic.MemorizeList(ctx, r, restic.IndexFile)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if p != nil {
|
|
|
|
var numIndexFiles uint64
|
|
|
|
err := indexList.List(ctx, restic.IndexFile, func(_ restic.ID, _ int64) error {
|
|
|
|
numIndexFiles++
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
p.SetMax(numIndexFiles)
|
|
|
|
defer p.Done()
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ForAllIndexes(ctx, indexList, r, func(id restic.ID, idx *Index, oldFormat bool, err error) error {
|
|
|
|
if p != nil {
|
|
|
|
p.Add(1)
|
|
|
|
}
|
|
|
|
if cb != nil {
|
|
|
|
err = cb(id, idx, oldFormat, err)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// special case to allow check to ignore index loading errors
|
|
|
|
if idx == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
mi.Insert(idx)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return mi.MergeFinalIndexes()
|
|
|
|
}
|
|
|
|
|
2024-05-19 18:38:58 +00:00
|
|
|
type MasterIndexRewriteOpts struct {
|
2024-05-19 13:55:42 +00:00
|
|
|
SaveProgress *progress.Counter
|
|
|
|
DeleteProgress func() *progress.Counter
|
|
|
|
DeleteReport func(id restic.ID, err error)
|
|
|
|
}
|
|
|
|
|
2024-05-19 18:38:58 +00:00
|
|
|
// Rewrite removes packs whose ID is in excludePacks from all known indexes.
|
|
|
|
// It also removes the rewritten index files and those listed in extraObsolete.
|
|
|
|
// If oldIndexes is not nil, then only the indexes in this set are processed.
|
|
|
|
// This is used by repair index to only rewrite and delete the old indexes.
|
|
|
|
//
|
|
|
|
// Must not be called concurrently to any other MasterIndex operation.
|
|
|
|
func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, excludePacks restic.IDSet, oldIndexes restic.IDSet, extraObsolete restic.IDs, opts MasterIndexRewriteOpts) error {
|
|
|
|
for _, idx := range mi.idx {
|
|
|
|
if !idx.Final() {
|
|
|
|
panic("internal error - index must be saved before calling MasterIndex.Rewrite")
|
|
|
|
}
|
|
|
|
}
|
2022-05-26 10:49:03 +00:00
|
|
|
|
2024-05-19 18:38:58 +00:00
|
|
|
var indexes restic.IDSet
|
|
|
|
if oldIndexes != nil {
|
|
|
|
// repair index adds new index entries for already existing pack files
|
|
|
|
// only remove the old (possibly broken) entries by only processing old indexes
|
|
|
|
indexes = oldIndexes
|
|
|
|
} else {
|
|
|
|
indexes = mi.IDs()
|
|
|
|
}
|
2015-11-02 18:28:30 +00:00
|
|
|
|
2024-05-19 18:38:58 +00:00
|
|
|
p := opts.SaveProgress
|
|
|
|
p.SetMax(uint64(len(indexes)))
|
2015-11-02 18:28:30 +00:00
|
|
|
|
2024-05-19 18:38:58 +00:00
|
|
|
// reset state which is not necessary for Rewrite and just consumes a lot of memory
|
|
|
|
// the index state would be invalid after Rewrite completes anyways
|
|
|
|
mi.clear()
|
|
|
|
runtime.GC()
|
2017-06-18 12:45:02 +00:00
|
|
|
|
2024-05-19 18:38:58 +00:00
|
|
|
// copy excludePacks to prevent unintended sideeffects
|
|
|
|
excludePacks = excludePacks.Clone()
|
|
|
|
debug.Log("start rebuilding index of %d indexes, excludePacks: %v", len(indexes), excludePacks)
|
2024-01-20 14:58:06 +00:00
|
|
|
wg, wgCtx := errgroup.WithContext(ctx)
|
2015-11-02 18:28:30 +00:00
|
|
|
|
2024-05-19 18:38:58 +00:00
|
|
|
idxCh := make(chan restic.ID)
|
|
|
|
wg.Go(func() error {
|
|
|
|
defer close(idxCh)
|
|
|
|
for id := range indexes {
|
|
|
|
select {
|
|
|
|
case idxCh <- id:
|
|
|
|
case <-wgCtx.Done():
|
|
|
|
return wgCtx.Err()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
var rewriteWg sync.WaitGroup
|
|
|
|
type rewriteTask struct {
|
|
|
|
idx *Index
|
|
|
|
oldFormat bool
|
|
|
|
}
|
|
|
|
rewriteCh := make(chan rewriteTask)
|
|
|
|
loader := func() error {
|
|
|
|
defer rewriteWg.Done()
|
|
|
|
for id := range idxCh {
|
|
|
|
buf, err := repo.LoadUnpacked(wgCtx, restic.IndexFile, id)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("LoadUnpacked(%v): %w", id.Str(), err)
|
|
|
|
}
|
|
|
|
idx, oldFormat, err := DecodeIndex(buf, id)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-11-12 01:49:53 +00:00
|
|
|
|
2024-05-19 18:38:58 +00:00
|
|
|
select {
|
|
|
|
case rewriteCh <- rewriteTask{idx, oldFormat}:
|
|
|
|
case <-wgCtx.Done():
|
|
|
|
return wgCtx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// loading an index can take quite some time such that this can be both CPU- or IO-bound
|
|
|
|
loaderCount := int(repo.Connections()) + runtime.GOMAXPROCS(0)
|
|
|
|
// run workers on ch
|
|
|
|
for i := 0; i < loaderCount; i++ {
|
|
|
|
rewriteWg.Add(1)
|
|
|
|
wg.Go(loader)
|
|
|
|
}
|
2020-11-12 01:49:53 +00:00
|
|
|
wg.Go(func() error {
|
2024-05-19 18:38:58 +00:00
|
|
|
rewriteWg.Wait()
|
|
|
|
close(rewriteCh)
|
|
|
|
return nil
|
|
|
|
})
|
2015-11-02 18:28:30 +00:00
|
|
|
|
2024-05-19 18:38:58 +00:00
|
|
|
obsolete := restic.NewIDSet(extraObsolete...)
|
|
|
|
saveCh := make(chan *Index)
|
|
|
|
|
|
|
|
wg.Go(func() error {
|
|
|
|
defer close(saveCh)
|
|
|
|
newIndex := NewIndex()
|
|
|
|
for task := range rewriteCh {
|
|
|
|
// always rewrite indexes using the old format, that include a pack that must be removed or that are not full
|
|
|
|
if !task.oldFormat && len(task.idx.Packs().Intersect(excludePacks)) == 0 && IndexFull(task.idx, mi.compress) {
|
|
|
|
// make sure that each pack is only stored exactly once in the index
|
|
|
|
excludePacks.Merge(task.idx.Packs())
|
|
|
|
// index is already up to date
|
|
|
|
p.Add(1)
|
|
|
|
continue
|
2020-11-12 01:49:53 +00:00
|
|
|
}
|
|
|
|
|
2024-05-19 18:38:58 +00:00
|
|
|
ids, err := task.idx.IDs()
|
|
|
|
if err != nil || len(ids) != 1 {
|
|
|
|
panic("internal error, index has no ID")
|
|
|
|
}
|
|
|
|
obsolete.Merge(restic.NewIDSet(ids...))
|
2020-11-12 01:49:53 +00:00
|
|
|
|
2024-05-19 18:38:58 +00:00
|
|
|
for pbs := range task.idx.EachByPack(wgCtx, excludePacks) {
|
2022-05-24 20:30:42 +00:00
|
|
|
newIndex.StorePack(pbs.PackID, pbs.Blobs)
|
2022-04-10 10:20:15 +00:00
|
|
|
if IndexFull(newIndex, mi.compress) {
|
2020-11-12 01:49:53 +00:00
|
|
|
select {
|
2024-05-19 18:38:58 +00:00
|
|
|
case saveCh <- newIndex:
|
2024-01-20 14:58:06 +00:00
|
|
|
case <-wgCtx.Done():
|
|
|
|
return wgCtx.Err()
|
2020-11-12 01:49:53 +00:00
|
|
|
}
|
|
|
|
newIndex = NewIndex()
|
|
|
|
}
|
2020-10-10 05:42:22 +00:00
|
|
|
}
|
2024-03-29 23:19:58 +00:00
|
|
|
if wgCtx.Err() != nil {
|
|
|
|
return wgCtx.Err()
|
|
|
|
}
|
2024-05-19 18:38:58 +00:00
|
|
|
// make sure that each pack is only stored exactly once in the index
|
|
|
|
excludePacks.Merge(task.idx.Packs())
|
|
|
|
p.Add(1)
|
2015-11-02 18:28:30 +00:00
|
|
|
}
|
|
|
|
|
2020-11-12 01:49:53 +00:00
|
|
|
select {
|
2024-05-19 18:38:58 +00:00
|
|
|
case saveCh <- newIndex:
|
2024-01-20 14:58:06 +00:00
|
|
|
case <-wgCtx.Done():
|
2020-11-12 01:49:53 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
// a worker receives an index from ch, and saves the index
|
|
|
|
worker := func() error {
|
2024-05-19 18:38:58 +00:00
|
|
|
for idx := range saveCh {
|
2020-11-12 01:49:53 +00:00
|
|
|
idx.Finalize()
|
2024-01-20 14:58:06 +00:00
|
|
|
if _, err := SaveIndex(wgCtx, repo, idx); err != nil {
|
2020-11-12 01:49:53 +00:00
|
|
|
return err
|
2020-10-10 05:42:22 +00:00
|
|
|
}
|
2015-11-02 18:28:30 +00:00
|
|
|
}
|
2020-11-12 01:49:53 +00:00
|
|
|
return nil
|
2015-11-02 18:28:30 +00:00
|
|
|
}
|
2020-10-18 07:24:34 +00:00
|
|
|
|
2021-08-07 22:38:17 +00:00
|
|
|
// encoding an index can take quite some time such that this can be both CPU- or IO-bound
|
|
|
|
workerCount := int(repo.Connections()) + runtime.GOMAXPROCS(0)
|
2020-11-12 01:49:53 +00:00
|
|
|
// run workers on ch
|
2021-08-07 22:38:17 +00:00
|
|
|
for i := 0; i < workerCount; i++ {
|
2022-05-10 20:17:50 +00:00
|
|
|
wg.Go(worker)
|
|
|
|
}
|
2024-01-20 14:58:06 +00:00
|
|
|
err := wg.Wait()
|
|
|
|
p.Done()
|
|
|
|
if err != nil {
|
2024-05-19 18:38:58 +00:00
|
|
|
return fmt.Errorf("failed to rewrite indexes: %w", err)
|
2024-01-20 14:58:06 +00:00
|
|
|
}
|
2015-11-02 18:28:30 +00:00
|
|
|
|
2024-01-20 14:58:06 +00:00
|
|
|
p = nil
|
|
|
|
if opts.DeleteProgress != nil {
|
|
|
|
p = opts.DeleteProgress()
|
|
|
|
}
|
|
|
|
defer p.Done()
|
|
|
|
return restic.ParallelRemove(ctx, repo, obsolete, restic.IndexFile, func(id restic.ID, err error) error {
|
|
|
|
if opts.DeleteReport != nil {
|
|
|
|
opts.DeleteReport(id, err)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}, p)
|
2015-11-02 18:28:30 +00:00
|
|
|
}
|
2021-08-20 08:09:34 +00:00
|
|
|
|
2024-05-19 18:38:58 +00:00
|
|
|
// SaveFallback saves all known indexes to index files, leaving out any
|
|
|
|
// packs whose ID is contained in packBlacklist from finalized indexes.
|
|
|
|
// It is only intended for use by prune with the UnsafeRecovery option.
|
|
|
|
//
|
|
|
|
// Must not be called concurrently to any other MasterIndex operation.
|
|
|
|
func (mi *MasterIndex) SaveFallback(ctx context.Context, repo restic.SaverRemoverUnpacked, excludePacks restic.IDSet, p *progress.Counter) error {
|
|
|
|
p.SetMax(uint64(len(mi.Packs(excludePacks))))
|
|
|
|
|
|
|
|
mi.idxMutex.Lock()
|
|
|
|
defer mi.idxMutex.Unlock()
|
|
|
|
|
|
|
|
debug.Log("start rebuilding index of %d indexes, excludePacks: %v", len(mi.idx), excludePacks)
|
|
|
|
|
|
|
|
obsolete := restic.NewIDSet()
|
|
|
|
wg, wgCtx := errgroup.WithContext(ctx)
|
|
|
|
|
|
|
|
ch := make(chan *Index)
|
|
|
|
wg.Go(func() error {
|
|
|
|
defer close(ch)
|
|
|
|
newIndex := NewIndex()
|
|
|
|
for _, idx := range mi.idx {
|
|
|
|
if idx.Final() {
|
|
|
|
ids, err := idx.IDs()
|
|
|
|
if err != nil {
|
|
|
|
panic("internal error - finalized index without ID")
|
|
|
|
}
|
|
|
|
debug.Log("adding index ids %v to supersedes field", ids)
|
|
|
|
obsolete.Merge(restic.NewIDSet(ids...))
|
|
|
|
}
|
|
|
|
|
|
|
|
for pbs := range idx.EachByPack(wgCtx, excludePacks) {
|
|
|
|
newIndex.StorePack(pbs.PackID, pbs.Blobs)
|
|
|
|
p.Add(1)
|
|
|
|
if IndexFull(newIndex, mi.compress) {
|
|
|
|
select {
|
|
|
|
case ch <- newIndex:
|
|
|
|
case <-wgCtx.Done():
|
|
|
|
return wgCtx.Err()
|
|
|
|
}
|
|
|
|
newIndex = NewIndex()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if wgCtx.Err() != nil {
|
|
|
|
return wgCtx.Err()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case ch <- newIndex:
|
|
|
|
case <-wgCtx.Done():
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
// a worker receives an index from ch, and saves the index
|
|
|
|
worker := func() error {
|
|
|
|
for idx := range ch {
|
|
|
|
idx.Finalize()
|
|
|
|
if _, err := SaveIndex(wgCtx, repo, idx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// keep concurrency bounded as we're on a fallback path
|
|
|
|
workerCount := int(repo.Connections())
|
|
|
|
// run workers on ch
|
|
|
|
for i := 0; i < workerCount; i++ {
|
|
|
|
wg.Go(worker)
|
|
|
|
}
|
|
|
|
err := wg.Wait()
|
|
|
|
p.Done()
|
|
|
|
// the index no longer matches to stored state
|
|
|
|
mi.clear()
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-05-26 10:38:18 +00:00
|
|
|
// SaveIndex saves an index in the repository.
|
|
|
|
func SaveIndex(ctx context.Context, repo restic.SaverUnpacked, index *Index) (restic.ID, error) {
|
|
|
|
buf := bytes.NewBuffer(nil)
|
|
|
|
|
|
|
|
err := index.Encode(buf)
|
|
|
|
if err != nil {
|
|
|
|
return restic.ID{}, err
|
|
|
|
}
|
|
|
|
|
2022-06-05 19:57:16 +00:00
|
|
|
id, err := repo.SaveUnpacked(ctx, restic.IndexFile, buf.Bytes())
|
|
|
|
ierr := index.SetID(id)
|
|
|
|
if ierr != nil {
|
|
|
|
// logic bug
|
|
|
|
panic(ierr)
|
|
|
|
}
|
|
|
|
return id, err
|
2022-05-26 10:38:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// saveIndex saves all indexes in the backend.
|
|
|
|
func (mi *MasterIndex) saveIndex(ctx context.Context, r restic.SaverUnpacked, indexes ...*Index) error {
|
|
|
|
for i, idx := range indexes {
|
|
|
|
debug.Log("Saving index %d", i)
|
|
|
|
|
|
|
|
sid, err := SaveIndex(ctx, r, idx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log("Saved index %d as %v", i, sid)
|
|
|
|
}
|
|
|
|
|
|
|
|
return mi.MergeFinalIndexes()
|
|
|
|
}
|
|
|
|
|
|
|
|
// SaveIndex saves all new indexes in the backend.
|
|
|
|
func (mi *MasterIndex) SaveIndex(ctx context.Context, r restic.SaverUnpacked) error {
|
2022-05-26 13:43:04 +00:00
|
|
|
return mi.saveIndex(ctx, r, mi.finalizeNotFinalIndexes()...)
|
2022-05-26 10:38:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// SaveFullIndex saves all full indexes in the backend.
|
|
|
|
func (mi *MasterIndex) SaveFullIndex(ctx context.Context, r restic.SaverUnpacked) error {
|
2022-05-26 13:43:04 +00:00
|
|
|
return mi.saveIndex(ctx, r, mi.finalizeFullIndexes()...)
|
2022-05-26 10:38:18 +00:00
|
|
|
}
|
|
|
|
|
2021-08-20 08:09:34 +00:00
|
|
|
// ListPacks returns the blobs of the specified pack files grouped by pack file.
|
|
|
|
func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan restic.PackBlobs {
|
|
|
|
out := make(chan restic.PackBlobs)
|
|
|
|
go func() {
|
|
|
|
defer close(out)
|
|
|
|
// only resort a part of the index to keep the memory overhead bounded
|
|
|
|
for i := byte(0); i < 16; i++ {
|
|
|
|
packBlob := make(map[restic.ID][]restic.Blob)
|
|
|
|
for pack := range packs {
|
|
|
|
if pack[0]&0xf == i {
|
|
|
|
packBlob[pack] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(packBlob) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
2024-04-05 20:20:14 +00:00
|
|
|
err := mi.Each(ctx, func(pb restic.PackedBlob) {
|
2021-08-20 08:09:34 +00:00
|
|
|
if packs.Has(pb.PackID) && pb.PackID[0]&0xf == i {
|
|
|
|
packBlob[pb.PackID] = append(packBlob[pb.PackID], pb.Blob)
|
|
|
|
}
|
2022-08-19 18:04:39 +00:00
|
|
|
})
|
2024-04-05 20:20:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2021-08-20 08:09:34 +00:00
|
|
|
|
|
|
|
// pass on packs
|
|
|
|
for packID, pbs := range packBlob {
|
2022-08-19 19:06:33 +00:00
|
|
|
// allow GC
|
|
|
|
packBlob[packID] = nil
|
2021-08-20 08:09:34 +00:00
|
|
|
select {
|
|
|
|
case out <- restic.PackBlobs{PackID: packID, Blobs: pbs}:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return out
|
|
|
|
}
|