2016-08-04 17:42:40 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"restic"
|
|
|
|
"restic/backend"
|
|
|
|
"restic/debug"
|
2016-08-15 18:13:56 +00:00
|
|
|
"restic/index"
|
2016-08-04 17:42:40 +00:00
|
|
|
"restic/pack"
|
|
|
|
"restic/repository"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"golang.org/x/crypto/ssh/terminal"
|
|
|
|
)
|
|
|
|
|
|
|
|
// CmdPrune implements the 'prune' command.
|
|
|
|
type CmdPrune struct {
|
|
|
|
global *GlobalOptions
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
_, err := parser.AddCommand("prune",
|
|
|
|
"removes content from a repository",
|
2016-08-20 16:33:24 +00:00
|
|
|
`
|
|
|
|
The prune command removes rendundant and unneeded data from the repository.
|
|
|
|
For removing snapshots, please see the 'forget' command, then afterwards run
|
|
|
|
'prune'.
|
|
|
|
`,
|
2016-08-04 17:42:40 +00:00
|
|
|
&CmdPrune{global: &globalOpts})
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// newProgressMax returns a progress that counts blobs.
|
|
|
|
func newProgressMax(show bool, max uint64, description string) *restic.Progress {
|
|
|
|
if !show {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
p := restic.NewProgress(time.Second)
|
|
|
|
|
|
|
|
p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
|
|
|
|
status := fmt.Sprintf("[%s] %s %d / %d %s",
|
|
|
|
formatDuration(d),
|
|
|
|
formatPercent(s.Blobs, max),
|
|
|
|
s.Blobs, max, description)
|
|
|
|
|
|
|
|
w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
|
|
|
|
if err == nil {
|
|
|
|
if len(status) > w {
|
|
|
|
max := w - len(status) - 4
|
|
|
|
status = status[:max] + "... "
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("\x1b[2K%s\r", status)
|
|
|
|
}
|
|
|
|
|
|
|
|
p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
|
|
|
|
fmt.Printf("\n")
|
|
|
|
}
|
|
|
|
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
|
|
|
// Execute runs the 'prune' command.
|
|
|
|
func (cmd CmdPrune) Execute(args []string) error {
|
|
|
|
repo, err := cmd.global.OpenRepository()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
lock, err := lockRepoExclusive(repo)
|
|
|
|
defer unlockRepo(lock)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = repo.LoadIndex()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
done := make(chan struct{})
|
|
|
|
defer close(done)
|
|
|
|
|
|
|
|
var stats struct {
|
|
|
|
blobs int
|
|
|
|
packs int
|
|
|
|
snapshots int
|
2016-08-15 18:13:56 +00:00
|
|
|
bytes int64
|
2016-08-04 17:42:40 +00:00
|
|
|
}
|
|
|
|
|
2016-08-15 19:10:20 +00:00
|
|
|
cmd.global.Verbosef("counting files in repo\n")
|
|
|
|
for _ = range repo.List(backend.Data, done) {
|
|
|
|
stats.packs++
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd.global.Verbosef("building new index for repo\n")
|
|
|
|
|
|
|
|
bar := newProgressMax(cmd.global.ShowProgress(), uint64(stats.packs), "files")
|
|
|
|
idx, err := index.New(repo, bar)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-15 18:13:56 +00:00
|
|
|
for _, pack := range idx.Packs {
|
|
|
|
stats.bytes += pack.Size
|
2016-08-04 17:42:40 +00:00
|
|
|
}
|
2016-08-15 18:13:56 +00:00
|
|
|
cmd.global.Verbosef("repository contains %v packs (%v blobs) with %v bytes\n",
|
|
|
|
len(idx.Packs), len(idx.Blobs), formatBytes(uint64(stats.bytes)))
|
2016-08-04 17:42:40 +00:00
|
|
|
|
2016-08-15 18:13:56 +00:00
|
|
|
blobCount := make(map[pack.Handle]int)
|
2016-08-04 17:42:40 +00:00
|
|
|
duplicateBlobs := 0
|
|
|
|
duplicateBytes := 0
|
|
|
|
|
2016-08-15 18:13:56 +00:00
|
|
|
// find duplicate blobs
|
|
|
|
for _, p := range idx.Packs {
|
|
|
|
for _, entry := range p.Entries {
|
2016-08-04 17:42:40 +00:00
|
|
|
stats.blobs++
|
2016-08-15 18:13:56 +00:00
|
|
|
h := pack.Handle{ID: entry.ID, Type: entry.Type}
|
|
|
|
blobCount[h]++
|
2016-08-04 17:42:40 +00:00
|
|
|
|
2016-08-15 18:13:56 +00:00
|
|
|
if blobCount[h] > 1 {
|
2016-08-04 17:42:40 +00:00
|
|
|
duplicateBlobs++
|
2016-08-15 18:13:56 +00:00
|
|
|
duplicateBytes += int(entry.Length)
|
2016-08-04 17:42:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-15 19:13:38 +00:00
|
|
|
cmd.global.Verbosef("processed %d blobs: %d duplicate blobs, %v duplicate\n",
|
|
|
|
stats.blobs, duplicateBlobs, formatBytes(uint64(duplicateBytes)))
|
2016-08-04 17:42:40 +00:00
|
|
|
cmd.global.Verbosef("load all snapshots\n")
|
|
|
|
|
2016-08-15 18:13:56 +00:00
|
|
|
// find referenced blobs
|
2016-08-04 17:42:40 +00:00
|
|
|
snapshots, err := restic.LoadAllSnapshots(repo)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
stats.snapshots = len(snapshots)
|
|
|
|
|
|
|
|
cmd.global.Verbosef("find data that is still in use for %d snapshots\n", stats.snapshots)
|
|
|
|
|
|
|
|
usedBlobs := pack.NewBlobSet()
|
|
|
|
seenBlobs := pack.NewBlobSet()
|
|
|
|
|
2016-08-15 19:10:20 +00:00
|
|
|
bar = newProgressMax(cmd.global.ShowProgress(), uint64(len(snapshots)), "snapshots")
|
2016-08-04 17:42:40 +00:00
|
|
|
bar.Start()
|
|
|
|
for _, sn := range snapshots {
|
|
|
|
debug.Log("CmdPrune.Execute", "process snapshot %v", sn.ID().Str())
|
|
|
|
|
|
|
|
err = restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, seenBlobs)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log("CmdPrune.Execute", "found %v blobs for snapshot %v", sn.ID().Str())
|
|
|
|
bar.Report(restic.Stat{Blobs: 1})
|
|
|
|
}
|
|
|
|
bar.Done()
|
|
|
|
|
|
|
|
cmd.global.Verbosef("found %d of %d data blobs still in use\n", len(usedBlobs), stats.blobs)
|
|
|
|
|
2016-08-15 18:13:56 +00:00
|
|
|
// find packs that need a rewrite
|
|
|
|
rewritePacks := backend.NewIDSet()
|
|
|
|
for h, blob := range idx.Blobs {
|
|
|
|
if !usedBlobs.Has(h) {
|
|
|
|
rewritePacks.Merge(blob.Packs)
|
|
|
|
}
|
2016-08-04 17:42:40 +00:00
|
|
|
|
2016-08-15 18:13:56 +00:00
|
|
|
if blobCount[h] > 1 {
|
|
|
|
rewritePacks.Merge(blob.Packs)
|
2016-08-04 17:42:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd.global.Verbosef("will rewrite %d packs\n", len(rewritePacks))
|
|
|
|
|
|
|
|
err = repository.Repack(repo, rewritePacks, usedBlobs)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd.global.Verbosef("creating new index\n")
|
|
|
|
|
2016-08-15 19:10:20 +00:00
|
|
|
bar = newProgressMax(cmd.global.ShowProgress(), uint64(stats.packs), "files")
|
|
|
|
idx, err = index.New(repo, bar)
|
2016-08-04 17:42:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-15 18:49:01 +00:00
|
|
|
var supersedes backend.IDs
|
|
|
|
for idxID := range repo.List(backend.Index, done) {
|
|
|
|
err := repo.Backend().Remove(backend.Index, idxID.String())
|
|
|
|
if err != nil {
|
|
|
|
fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", idxID.Str(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
supersedes = append(supersedes, idxID)
|
|
|
|
}
|
|
|
|
|
|
|
|
id, err := idx.Save(repo, supersedes)
|
2016-08-15 18:46:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
cmd.global.Verbosef("saved new index as %v\n", id.Str())
|
|
|
|
|
2016-08-04 17:42:40 +00:00
|
|
|
cmd.global.Verbosef("done\n")
|
|
|
|
return nil
|
|
|
|
}
|