2015-06-28 22:22:25 +00:00
|
|
|
package checker
|
|
|
|
|
|
|
|
import (
|
2015-07-11 14:00:49 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2015-07-11 22:25:42 +00:00
|
|
|
"sync"
|
2015-06-28 22:22:25 +00:00
|
|
|
|
2015-07-11 14:00:49 +00:00
|
|
|
"github.com/restic/restic"
|
2015-06-28 22:22:25 +00:00
|
|
|
"github.com/restic/restic/backend"
|
|
|
|
"github.com/restic/restic/debug"
|
|
|
|
"github.com/restic/restic/repository"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Checker runs various checks on a repository. It is advisable to create an
|
|
|
|
// exclusive Lock in the repository before running any checks.
|
|
|
|
//
|
|
|
|
// A Checker only tests for internal errors within the data structures of the
|
|
|
|
// repository (e.g. missing blobs), and needs a valid Repository to work on.
|
|
|
|
type Checker struct {
|
2015-07-25 15:05:45 +00:00
|
|
|
packs map[backend.ID]struct{}
|
|
|
|
blobs map[backend.ID]struct{}
|
2015-07-12 14:42:22 +00:00
|
|
|
blobRefs struct {
|
|
|
|
sync.Mutex
|
2015-07-25 15:05:45 +00:00
|
|
|
M map[backend.ID]uint
|
2015-07-12 14:42:22 +00:00
|
|
|
}
|
2015-07-25 15:05:45 +00:00
|
|
|
indexes map[backend.ID]*repository.Index
|
2015-07-12 15:09:48 +00:00
|
|
|
orphanedPacks backend.IDs
|
2015-06-28 22:22:25 +00:00
|
|
|
|
|
|
|
masterIndex *repository.Index
|
|
|
|
|
|
|
|
repo *repository.Repository
|
|
|
|
}
|
|
|
|
|
|
|
|
// New returns a new checker which runs on repo.
|
|
|
|
func New(repo *repository.Repository) *Checker {
|
2015-07-12 14:42:22 +00:00
|
|
|
c := &Checker{
|
2015-07-25 15:05:45 +00:00
|
|
|
packs: make(map[backend.ID]struct{}),
|
|
|
|
blobs: make(map[backend.ID]struct{}),
|
2015-06-28 22:22:25 +00:00
|
|
|
masterIndex: repository.NewIndex(),
|
2015-07-25 15:05:45 +00:00
|
|
|
indexes: make(map[backend.ID]*repository.Index),
|
2015-06-28 22:22:25 +00:00
|
|
|
repo: repo,
|
|
|
|
}
|
2015-07-12 14:42:22 +00:00
|
|
|
|
2015-07-25 15:05:45 +00:00
|
|
|
c.blobRefs.M = make(map[backend.ID]uint)
|
2015-07-12 14:42:22 +00:00
|
|
|
|
|
|
|
return c
|
2015-06-28 22:22:25 +00:00
|
|
|
}
|
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
const defaultParallelism = 40
|
2015-06-28 22:22:25 +00:00
|
|
|
|
|
|
|
// LoadIndex loads all index files.
|
|
|
|
func (c *Checker) LoadIndex() error {
|
|
|
|
debug.Log("LoadIndex", "Start")
|
|
|
|
type indexRes struct {
|
|
|
|
Index *repository.Index
|
|
|
|
ID string
|
|
|
|
}
|
|
|
|
|
|
|
|
indexCh := make(chan indexRes)
|
|
|
|
|
|
|
|
worker := func(id string, done <-chan struct{}) error {
|
|
|
|
debug.Log("LoadIndex", "worker got index %v", id)
|
|
|
|
idx, err := repository.LoadIndex(c.repo, id)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case indexCh <- indexRes{Index: idx, ID: id}:
|
|
|
|
case <-done:
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var perr error
|
|
|
|
go func() {
|
|
|
|
defer close(indexCh)
|
|
|
|
debug.Log("LoadIndex", "start loading indexes in parallel")
|
2015-07-11 22:25:42 +00:00
|
|
|
perr = repository.FilesInParallel(c.repo.Backend(), backend.Index, defaultParallelism, worker)
|
2015-06-28 22:22:25 +00:00
|
|
|
debug.Log("LoadIndex", "loading indexes finished, error: %v", perr)
|
|
|
|
}()
|
|
|
|
|
|
|
|
done := make(chan struct{})
|
|
|
|
defer close(done)
|
|
|
|
|
|
|
|
for res := range indexCh {
|
|
|
|
debug.Log("LoadIndex", "process index %v", res.ID)
|
2015-07-25 15:05:45 +00:00
|
|
|
id, err := backend.ParseID(res.ID)
|
2015-06-28 22:22:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
c.indexes[id] = res.Index
|
|
|
|
c.masterIndex.Merge(res.Index)
|
|
|
|
|
|
|
|
debug.Log("LoadIndex", "process blobs")
|
|
|
|
cnt := 0
|
|
|
|
for blob := range res.Index.Each(done) {
|
2015-07-25 15:05:45 +00:00
|
|
|
c.packs[blob.PackID] = struct{}{}
|
|
|
|
c.blobs[blob.ID] = struct{}{}
|
|
|
|
c.blobRefs.M[blob.ID] = 0
|
2015-06-28 22:22:25 +00:00
|
|
|
cnt++
|
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log("LoadIndex", "%d blobs processed", cnt)
|
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log("LoadIndex", "done, error %v", perr)
|
2015-07-11 14:00:49 +00:00
|
|
|
|
|
|
|
c.repo.SetIndex(c.masterIndex)
|
|
|
|
|
2015-06-28 22:22:25 +00:00
|
|
|
return perr
|
|
|
|
}
|
|
|
|
|
2015-07-11 14:00:49 +00:00
|
|
|
// PackError describes an error with a specific pack.
|
|
|
|
type PackError struct {
|
2015-07-12 15:09:48 +00:00
|
|
|
ID backend.ID
|
|
|
|
Orphaned bool
|
|
|
|
Err error
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e PackError) Error() string {
|
2015-07-12 15:09:48 +00:00
|
|
|
return "pack " + e.ID.String() + ": " + e.Err.Error()
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-25 15:05:45 +00:00
|
|
|
func packIDTester(repo *repository.Repository, inChan <-chan backend.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) {
|
2015-07-11 22:25:42 +00:00
|
|
|
debug.Log("Checker.testPackID", "worker start")
|
|
|
|
defer debug.Log("Checker.testPackID", "worker done")
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-11 22:25:42 +00:00
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
for id := range inChan {
|
2015-07-25 15:05:45 +00:00
|
|
|
ok, err := repo.Backend().Test(backend.Data, id.String())
|
2015-07-11 14:00:49 +00:00
|
|
|
if err != nil {
|
2015-07-25 15:05:45 +00:00
|
|
|
err = PackError{ID: id, Err: err}
|
2015-07-11 22:25:42 +00:00
|
|
|
} else {
|
|
|
|
if !ok {
|
2015-07-25 15:05:45 +00:00
|
|
|
err = PackError{ID: id, Err: errors.New("does not exist")}
|
2015-07-11 22:25:42 +00:00
|
|
|
}
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-11 22:25:42 +00:00
|
|
|
if err != nil {
|
2015-07-25 15:05:45 +00:00
|
|
|
debug.Log("Checker.testPackID", "error checking for pack %s: %v", id.Str(), err)
|
2015-07-11 22:25:42 +00:00
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
case errChan <- err:
|
|
|
|
}
|
|
|
|
|
2015-07-11 14:00:49 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-07-11 22:25:42 +00:00
|
|
|
|
2015-07-25 15:05:45 +00:00
|
|
|
debug.Log("Checker.testPackID", "pack %s exists", id.Str())
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
2015-07-11 22:25:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Packs checks that all packs referenced in the index are still available and
|
2015-07-11 23:44:19 +00:00
|
|
|
// there are no packs that aren't in an index. errChan is closed after all
|
|
|
|
// packs have been checked.
|
|
|
|
func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
|
|
|
|
defer close(errChan)
|
|
|
|
|
2015-07-11 22:25:42 +00:00
|
|
|
debug.Log("Checker.Packs", "checking for %d packs", len(c.packs))
|
2015-07-25 15:05:45 +00:00
|
|
|
seenPacks := make(map[backend.ID]struct{})
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-11 22:25:42 +00:00
|
|
|
var workerWG sync.WaitGroup
|
|
|
|
|
2015-07-25 15:05:45 +00:00
|
|
|
IDChan := make(chan backend.ID)
|
2015-07-11 22:25:42 +00:00
|
|
|
for i := 0; i < defaultParallelism; i++ {
|
|
|
|
workerWG.Add(1)
|
|
|
|
go packIDTester(c.repo, IDChan, errChan, &workerWG, done)
|
|
|
|
}
|
|
|
|
|
|
|
|
for id := range c.packs {
|
|
|
|
seenPacks[id] = struct{}{}
|
|
|
|
IDChan <- id
|
|
|
|
}
|
|
|
|
close(IDChan)
|
|
|
|
|
|
|
|
debug.Log("Checker.Packs", "waiting for %d workers to terminate", defaultParallelism)
|
|
|
|
workerWG.Wait()
|
|
|
|
debug.Log("Checker.Packs", "workers terminated")
|
|
|
|
|
2015-07-11 14:00:49 +00:00
|
|
|
for id := range c.repo.List(backend.Data, done) {
|
2015-07-12 14:42:22 +00:00
|
|
|
debug.Log("Checker.Packs", "check data blob %v", id.Str())
|
2015-07-25 15:05:45 +00:00
|
|
|
if _, ok := seenPacks[id]; !ok {
|
2015-07-12 15:09:48 +00:00
|
|
|
c.orphanedPacks = append(c.orphanedPacks, id)
|
2015-07-11 23:44:19 +00:00
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
2015-07-12 15:09:48 +00:00
|
|
|
case errChan <- PackError{ID: id, Orphaned: true, Err: errors.New("not referenced in any index")}:
|
2015-07-11 23:44:19 +00:00
|
|
|
}
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error is an error that occurred while checking a repository.
|
|
|
|
type Error struct {
|
2015-07-25 15:05:45 +00:00
|
|
|
TreeID *backend.ID
|
|
|
|
BlobID *backend.ID
|
2015-07-11 14:00:49 +00:00
|
|
|
Err error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e Error) Error() string {
|
|
|
|
if e.BlobID != nil && e.TreeID != nil {
|
|
|
|
msg := "tree " + e.TreeID.String()
|
|
|
|
msg += ", blob " + e.BlobID.String()
|
|
|
|
msg += ": " + e.Err.Error()
|
|
|
|
return msg
|
|
|
|
}
|
|
|
|
|
|
|
|
if e.TreeID != nil {
|
|
|
|
return "tree " + e.TreeID.String() + ": " + e.Err.Error()
|
|
|
|
}
|
|
|
|
|
|
|
|
return e.Err.Error()
|
|
|
|
}
|
|
|
|
|
|
|
|
func loadTreeFromSnapshot(repo *repository.Repository, id backend.ID) (backend.ID, error) {
|
|
|
|
sn, err := restic.LoadSnapshot(repo, id)
|
|
|
|
if err != nil {
|
|
|
|
debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err)
|
2015-07-25 15:05:45 +00:00
|
|
|
return backend.ID{}, err
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if sn.Tree == nil {
|
|
|
|
debug.Log("Checker.loadTreeFromSnapshot", "snapshot %v has no tree", id.Str())
|
2015-07-25 15:05:45 +00:00
|
|
|
return backend.ID{}, fmt.Errorf("snapshot %v has no tree", id)
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-25 15:05:45 +00:00
|
|
|
return *sn.Tree, nil
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs.
|
|
|
|
func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
|
|
|
|
var trees struct {
|
|
|
|
IDs backend.IDs
|
|
|
|
sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
var errs struct {
|
|
|
|
errs []error
|
|
|
|
sync.Mutex
|
|
|
|
}
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
snapshotWorker := func(strID string, done <-chan struct{}) error {
|
|
|
|
id, err := backend.ParseID(strID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
debug.Log("Checker.Snaphots", "load snapshot %v", id.Str())
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
treeID, err := loadTreeFromSnapshot(repo, id)
|
2015-07-11 14:00:49 +00:00
|
|
|
if err != nil {
|
2015-07-12 14:42:22 +00:00
|
|
|
errs.Lock()
|
|
|
|
errs.errs = append(errs.errs, err)
|
|
|
|
errs.Unlock()
|
|
|
|
return nil
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log("Checker.Snaphots", "snapshot %v has tree %v", id.Str(), treeID.Str())
|
2015-07-12 14:42:22 +00:00
|
|
|
trees.Lock()
|
|
|
|
trees.IDs = append(trees.IDs, treeID)
|
|
|
|
trees.Unlock()
|
|
|
|
|
|
|
|
return nil
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
err := repository.FilesInParallel(repo.Backend(), backend.Snapshot, defaultParallelism, snapshotWorker)
|
|
|
|
if err != nil {
|
|
|
|
errs.errs = append(errs.errs, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return trees.IDs, errs.errs
|
|
|
|
}
|
|
|
|
|
|
|
|
// TreeError is returned when loading a tree from the repository failed.
|
|
|
|
type TreeError struct {
|
|
|
|
ID backend.ID
|
|
|
|
Errors []error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e TreeError) Error() string {
|
|
|
|
return fmt.Sprintf("%v: %d errors", e.ID.String(), len(e.Errors))
|
|
|
|
}
|
|
|
|
|
|
|
|
type treeJob struct {
|
|
|
|
backend.ID
|
|
|
|
error
|
|
|
|
*restic.Tree
|
|
|
|
}
|
|
|
|
|
|
|
|
// loadTreeWorker loads trees from repo and sends them to out.
|
|
|
|
func loadTreeWorker(repo *repository.Repository,
|
|
|
|
in <-chan backend.ID, out chan<- treeJob,
|
|
|
|
done <-chan struct{}, wg *sync.WaitGroup) {
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
debug.Log("checker.loadTreeWorker", "exiting")
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
|
|
|
|
var (
|
|
|
|
inCh = in
|
|
|
|
outCh = out
|
|
|
|
job treeJob
|
|
|
|
)
|
|
|
|
|
|
|
|
outCh = nil
|
|
|
|
for {
|
2015-07-11 23:44:19 +00:00
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
2015-07-12 14:42:22 +00:00
|
|
|
|
|
|
|
case treeID, ok := <-inCh:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
debug.Log("checker.loadTreeWorker", "load tree %v", treeID.Str())
|
|
|
|
|
|
|
|
tree, err := restic.LoadTree(repo, treeID)
|
|
|
|
debug.Log("checker.loadTreeWorker", "load tree %v (%v) returned err %v", tree, treeID.Str(), err)
|
|
|
|
job = treeJob{ID: treeID, error: err, Tree: tree}
|
|
|
|
outCh = out
|
|
|
|
inCh = nil
|
|
|
|
|
|
|
|
case outCh <- job:
|
|
|
|
debug.Log("checker.loadTreeWorker", "sent tree %v", job.ID.Str())
|
|
|
|
outCh = nil
|
|
|
|
inCh = in
|
2015-07-11 23:44:19 +00:00
|
|
|
}
|
|
|
|
}
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
// checkTreeWorker checks the trees received and sends out errors to errChan.
|
|
|
|
func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- TreeError, done <-chan struct{}, wg *sync.WaitGroup) {
|
|
|
|
defer func() {
|
|
|
|
debug.Log("checker.checkTreeWorker", "exiting")
|
|
|
|
wg.Done()
|
|
|
|
}()
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
var (
|
|
|
|
inCh = in
|
|
|
|
outCh = out
|
|
|
|
treeError TreeError
|
|
|
|
)
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
outCh = nil
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
case job, ok := <-inCh:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-07-25 15:05:45 +00:00
|
|
|
id := job.ID
|
2015-07-12 14:42:22 +00:00
|
|
|
alreadyChecked := false
|
|
|
|
c.blobRefs.Lock()
|
|
|
|
if c.blobRefs.M[id] > 0 {
|
|
|
|
alreadyChecked = true
|
|
|
|
}
|
|
|
|
c.blobRefs.M[id]++
|
|
|
|
debug.Log("checker.checkTreeWorker", "tree %v refcount %d", job.ID.Str(), c.blobRefs.M[id])
|
|
|
|
c.blobRefs.Unlock()
|
|
|
|
|
|
|
|
if alreadyChecked {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log("checker.checkTreeWorker", "load tree %v", job.ID.Str())
|
|
|
|
|
|
|
|
errs := c.checkTree(job.ID, job.Tree)
|
|
|
|
if len(errs) > 0 {
|
|
|
|
debug.Log("checker.checkTreeWorker", "checked tree %v: %v errors", job.ID.Str(), len(errs))
|
|
|
|
treeError = TreeError{ID: job.ID, Errors: errs}
|
|
|
|
outCh = out
|
|
|
|
inCh = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
case outCh <- treeError:
|
|
|
|
debug.Log("checker.checkTreeWorker", "tree %v: sent %d errors", treeError.ID, len(treeError.Errors))
|
|
|
|
outCh = nil
|
|
|
|
inCh = in
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
2015-07-12 14:42:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) {
|
|
|
|
defer func() {
|
|
|
|
debug.Log("checker.filterTrees", "closing output channels")
|
|
|
|
close(loaderChan)
|
|
|
|
close(out)
|
|
|
|
}()
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
var (
|
|
|
|
inCh = in
|
|
|
|
outCh = out
|
|
|
|
loadCh = loaderChan
|
|
|
|
job treeJob
|
|
|
|
nextTreeID backend.ID
|
|
|
|
outstandingLoadTreeJobs = 0
|
|
|
|
)
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
outCh = nil
|
|
|
|
loadCh = nil
|
|
|
|
|
|
|
|
for {
|
|
|
|
if loadCh == nil && len(backlog) > 0 {
|
|
|
|
loadCh = loaderChan
|
|
|
|
nextTreeID, backlog = backlog[0], backlog[1:]
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
if loadCh == nil && outCh == nil && outstandingLoadTreeJobs == 0 {
|
|
|
|
debug.Log("checker.filterTrees", "backlog is empty, all channels nil, exiting")
|
|
|
|
return
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
case loadCh <- nextTreeID:
|
|
|
|
outstandingLoadTreeJobs++
|
|
|
|
loadCh = nil
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
case j, ok := <-inCh:
|
|
|
|
if !ok {
|
|
|
|
debug.Log("checker.filterTrees", "input channel closed")
|
|
|
|
inCh = nil
|
|
|
|
in = nil
|
|
|
|
continue
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
2015-07-12 14:42:22 +00:00
|
|
|
|
|
|
|
outstandingLoadTreeJobs--
|
|
|
|
debug.Log("checker.filterTrees", "input job tree %v", j.ID.Str())
|
|
|
|
|
|
|
|
backlog = append(backlog, j.Tree.Subtrees()...)
|
|
|
|
|
|
|
|
job = j
|
|
|
|
outCh = out
|
|
|
|
inCh = nil
|
|
|
|
|
|
|
|
case outCh <- job:
|
|
|
|
outCh = nil
|
|
|
|
inCh = in
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
2015-07-12 14:42:22 +00:00
|
|
|
}
|
|
|
|
}
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
// Structure checks that for all snapshots all referenced data blobs and
|
|
|
|
// subtrees are available in the index. errChan is closed after all trees have
|
|
|
|
// been traversed.
|
|
|
|
func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) {
|
|
|
|
defer close(errChan)
|
|
|
|
|
|
|
|
trees, errs := loadSnapshotTreeIDs(c.repo)
|
|
|
|
debug.Log("checker.Structure", "need to check %d trees from snapshots, %d errs returned", len(trees), len(errs))
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
for _, err := range errs {
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
case errChan <- err:
|
|
|
|
}
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
treeIDChan := make(chan backend.ID)
|
|
|
|
treeJobChan1 := make(chan treeJob)
|
|
|
|
treeJobChan2 := make(chan treeJob)
|
|
|
|
treeErrChan := make(chan TreeError)
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < defaultParallelism; i++ {
|
|
|
|
wg.Add(2)
|
|
|
|
go loadTreeWorker(c.repo, treeIDChan, treeJobChan1, done, &wg)
|
|
|
|
go c.checkTreeWorker(treeJobChan2, treeErrChan, done, &wg)
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
filterTrees(trees, treeIDChan, treeJobChan1, treeJobChan2, done)
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) {
|
|
|
|
debug.Log("Checker.checkTree", "checking tree %v", id.Str())
|
|
|
|
|
|
|
|
// if _, ok := c.blobs[id2map(id)]; !ok {
|
|
|
|
// errs = append(errs, Error{TreeID: id, Err: errors.New("not found in index")})
|
|
|
|
// }
|
|
|
|
|
|
|
|
// blobs, subtrees, treeErrors := c.tree(id)
|
|
|
|
// if treeErrors != nil {
|
|
|
|
// debug.Log("Checker.trees", "error checking tree %v: %v", id.Str(), treeErrors)
|
|
|
|
// errs = append(errs, treeErrors...)
|
|
|
|
// continue
|
|
|
|
// }
|
|
|
|
|
|
|
|
// treeIDs = append(treeIDs, subtrees...)
|
|
|
|
|
|
|
|
// treesChecked[id2map(id)] = struct{}{}
|
|
|
|
|
|
|
|
var blobs []backend.ID
|
|
|
|
|
2015-07-11 14:00:49 +00:00
|
|
|
for i, node := range tree.Nodes {
|
|
|
|
switch node.Type {
|
|
|
|
case "file":
|
|
|
|
blobs = append(blobs, node.Content...)
|
|
|
|
case "dir":
|
|
|
|
if node.Subtree == nil {
|
2015-07-25 15:05:45 +00:00
|
|
|
errs = append(errs, Error{TreeID: &id, Err: fmt.Errorf("node %d is dir but has no subtree", i)})
|
2015-07-11 14:00:49 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-07-12 14:42:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, blobID := range blobs {
|
|
|
|
c.blobRefs.Lock()
|
2015-07-25 15:05:45 +00:00
|
|
|
c.blobRefs.M[blobID]++
|
|
|
|
debug.Log("Checker.checkTree", "blob %v refcount %d", blobID.Str(), c.blobRefs.M[blobID])
|
2015-07-12 14:42:22 +00:00
|
|
|
c.blobRefs.Unlock()
|
2015-07-11 14:00:49 +00:00
|
|
|
|
2015-07-25 15:05:45 +00:00
|
|
|
if _, ok := c.blobs[blobID]; !ok {
|
2015-07-12 14:42:22 +00:00
|
|
|
debug.Log("Checker.trees", "tree %v references blob %v which isn't contained in index", id.Str(), blobID.Str())
|
|
|
|
|
2015-07-25 15:05:45 +00:00
|
|
|
errs = append(errs, Error{TreeID: &id, BlobID: &blobID, Err: errors.New("not found in index")})
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-12 14:42:22 +00:00
|
|
|
return errs
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// UnusedBlobs returns all blobs that have never been referenced.
|
|
|
|
func (c *Checker) UnusedBlobs() (blobs backend.IDs) {
|
2015-07-12 14:42:22 +00:00
|
|
|
c.blobRefs.Lock()
|
|
|
|
defer c.blobRefs.Unlock()
|
|
|
|
|
2015-07-11 14:00:49 +00:00
|
|
|
debug.Log("Checker.UnusedBlobs", "checking %d blobs", len(c.blobs))
|
|
|
|
for id := range c.blobs {
|
2015-07-12 14:42:22 +00:00
|
|
|
if c.blobRefs.M[id] == 0 {
|
2015-07-25 15:05:45 +00:00
|
|
|
debug.Log("Checker.UnusedBlobs", "blob %v not not referenced", id.Str())
|
|
|
|
blobs = append(blobs, id)
|
2015-07-11 14:00:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return blobs
|
2015-06-28 22:22:25 +00:00
|
|
|
}
|
2015-07-12 15:09:48 +00:00
|
|
|
|
|
|
|
// OrphanedPacks returns a slice of unused packs (only available after Packs() was run).
|
|
|
|
func (c *Checker) OrphanedPacks() backend.IDs {
|
|
|
|
return c.orphanedPacks
|
|
|
|
}
|