2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-15 17:47:21 +00:00

Merge pull request #518 from restic/implement-prune

Implement prune
This commit is contained in:
Alexander Neumann 2016-08-21 09:22:22 +02:00
commit 2be0aa9dbc
92 changed files with 6289 additions and 1018 deletions

View File

@ -380,6 +380,98 @@ Now you can easily initialize restic to use Minio server as backend with this co
Please note that knowledge of your password is required to access Please note that knowledge of your password is required to access
the repository. Losing your password means that your data is irrecoverably lost. the repository. Losing your password means that your data is irrecoverably lost.
# Removing old snapshots
All backup space is finite, so restic allows removing old snapshots. This can
be done either manually (by specifying a snapshot ID to remove) or by using a
policy that describes which snapshots to forget. For all remove operations, two
commands need to be called in sequence: `forget` to remove a snapshot and
`prune` to actually remove the data that was referenced by the snapshot from
the repository.
## Remove a single snapshot
The command `snapshots` can be used to list all snapshots in a repository like this:
$ restic -r /tmp/backup snapshots
enter password for repository:
ID Date Host Directory
----------------------------------------------------------------------
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
79766175 2015-05-08 21:40:19 kasimir /home/user/work
bdbd3439 2015-05-08 21:45:17 luigi /home/art
590c8fc8 2015-05-08 21:47:38 kazik /srv
9f0bc19e 2015-05-08 21:46:11 luigi /srv
In order to remove the snapshot of `/home/art`, use the `forget` command and
specify the snapshot ID on the command line:
$ restic -r /tmp/backup forget bdbd3439
enter password for repository:
removed snapshot d3f01f63
Afterwards this snapshot is removed:
$ restic -r /tmp/backup snapshots
enter password for repository:
ID Date Host Directory
----------------------------------------------------------------------
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
79766175 2015-05-08 21:40:19 kasimir /home/user/work
590c8fc8 2015-05-08 21:47:38 kazik /srv
9f0bc19e 2015-05-08 21:46:11 luigi /srv
But the data that was referenced by files in this snapshot is still stored in
the repository. To cleanup unreferenced data, the `prune` command must be run:
$ restic -r /tmp/backup prune
enter password for repository:
counting files in repo
building new index for repo
[0:00] 100.00% 22 / 22 files
repository contains 22 packs (8512 blobs) with 100.092 MiB bytes
processed 8512 blobs: 0 duplicate blobs, 0B duplicate
load all snapshots
find data that is still in use for 1 snapshots
[0:00] 100.00% 1 / 1 snapshots
found 8433 of 8512 data blobs still in use
will rewrite 3 packs
creating new index
[0:00] 86.36% 19 / 22 files
saved new index as 544a5084
done
Afterwards the repository is smaller.
## Removing snapshots according to a policy
Removing snapshots manually is tedious and error-prone, therefore restic allows
specifying which snapshots should be removed automatically according to a
policy. You can specify how many hourly, daily, weekly, monthly and yearly
snapshots to keep, any other snapshots are removed. The most important
command-line parameter here is `--dry-run` which instructs restic to not remove
anything but print which snapshots would be removed.
When `forget` is run with a policy, restic loads the list of all snapshots,
then groups these by host name and list of directories. The policy is then
applied to each group of snapshots separately. This is a safety feature.
The `forget` command accepts the following parameters:
* `--keep-last n` never delete the `n` last (most recent) snapshots
* `--keep-hourly n` for the last `n` hours in which a snapshot was made, keep
only the last snapshot for each hour.
* `--keep-daily n` for the last `n` days which have one or more snapshots, only
keep the last one for that day.
* `--keep-monthly n` for the last `n` months which have one or more snapshots, only
keep the last one for that month.
* `--keep-yearly n` for the last `n` years which have one or more snapshots, only
keep the last one for that year.
Additionally, you can restrict removing snapshots to those which have a
particular hostname with the `--hostname` parameter.
# Debugging restic # Debugging restic
The program can be built with debug support like this: The program can be built with debug support like this:

View File

@ -161,13 +161,14 @@ func (cmd CmdCat) Execute(args []string) error {
return err return err
case "blob": case "blob":
blob, err := repo.Index().Lookup(id) list, err := repo.Index().Lookup(id, pack.Data)
if err != nil { if err != nil {
return err return err
} }
blob := list[0]
buf := make([]byte, blob.Length) buf := make([]byte, blob.Length)
data, err := repo.LoadBlob(blob.Type, id, buf) data, err := repo.LoadBlob(id, pack.Data, buf)
if err != nil { if err != nil {
return err return err
} }

View File

@ -48,7 +48,7 @@ func prettyPrintJSON(wr io.Writer, item interface{}) error {
return err return err
} }
func printSnapshots(repo *repository.Repository, wr io.Writer) error { func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
@ -126,9 +126,9 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
name := job.Data.(string) name := job.Data.(string)
h := backend.Handle{Type: backend.Data, Name: name} h := backend.Handle{Type: backend.Data, Name: name}
rd := backend.NewReadSeeker(repo.Backend(), h) ldr := pack.BackendLoader{Backend: repo.Backend(), Handle: h}
unpacker, err := pack.NewUnpacker(repo.Key(), rd) unpacker, err := pack.NewUnpacker(repo.Key(), ldr)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -226,14 +226,14 @@ func (cmd CmdDump) Execute(args []string) error {
case "indexes": case "indexes":
return cmd.DumpIndexes() return cmd.DumpIndexes()
case "snapshots": case "snapshots":
return printSnapshots(repo, os.Stdout) return debugPrintSnapshots(repo, os.Stdout)
case "trees": case "trees":
return printTrees(repo, os.Stdout) return printTrees(repo, os.Stdout)
case "packs": case "packs":
return printPacks(repo, os.Stdout) return printPacks(repo, os.Stdout)
case "all": case "all":
fmt.Printf("snapshots:\n") fmt.Printf("snapshots:\n")
err := printSnapshots(repo, os.Stdout) err := debugPrintSnapshots(repo, os.Stdout)
if err != nil { if err != nil {
return err return err
} }

View File

@ -0,0 +1,168 @@
package main
import (
"fmt"
"io"
"restic"
"restic/backend"
"strings"
)
// CmdForget implements the 'forget' command.
type CmdForget struct {
Last int `short:"l" long:"keep-last" description:"keep the last n snapshots"`
Hourly int `short:"H" long:"keep-hourly" description:"keep the last n hourly snapshots"`
Daily int `short:"d" long:"keep-daily" description:"keep the last n daily snapshots"`
Weekly int `short:"w" long:"keep-weekly" description:"keep the last n weekly snapshots"`
Monthly int `short:"m" long:"keep-monthly" description:"keep the last n monthly snapshots"`
Yearly int `short:"y" long:"keep-yearly" description:"keep the last n yearly snapshots"`
Hostname string `long:"hostname" description:"only forget snapshots for the given hostname"`
DryRun bool `short:"n" long:"dry-run" description:"do not delete anything, just print what would be done"`
global *GlobalOptions
}
func init() {
_, err := parser.AddCommand("forget",
"removes snapshots from a repository",
`
The forget command removes snapshots according to a policy. Please note
that this command really only deletes the snapshot object in the repo, which
is a reference to data stored there. In order to remove this (now
unreferenced) data after 'forget' was run successfully, see the 'prune'
command.
`,
&CmdForget{global: &globalOpts})
if err != nil {
panic(err)
}
}
// Usage returns usage information for 'forget'.
func (cmd CmdForget) Usage() string {
return "[snapshot ID] ..."
}
func printSnapshots(w io.Writer, snapshots restic.Snapshots) {
tab := NewTable()
tab.Header = fmt.Sprintf("%-8s %-19s %-10s %s", "ID", "Date", "Host", "Directory")
tab.RowFormat = "%-8s %-19s %-10s %s"
for _, sn := range snapshots {
if len(sn.Paths) == 0 {
continue
}
id := sn.ID()
tab.Rows = append(tab.Rows, []interface{}{id.Str(), sn.Time.Format(TimeFormat), sn.Hostname, sn.Paths[0]})
if len(sn.Paths) > 1 {
for _, path := range sn.Paths[1:] {
tab.Rows = append(tab.Rows, []interface{}{"", "", "", path})
}
}
}
tab.Write(w)
}
// Execute runs the 'forget' command.
func (cmd CmdForget) Execute(args []string) error {
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
err = repo.LoadIndex()
if err != nil {
return err
}
// first, process all snapshot IDs given as arguments
for _, s := range args {
id, err := restic.FindSnapshot(repo, s)
if err != nil {
return err
}
if !cmd.DryRun {
err = repo.Backend().Remove(backend.Snapshot, id.String())
if err != nil {
return err
}
cmd.global.Verbosef("removed snapshot %v\n", id.Str())
} else {
cmd.global.Verbosef("would removed snapshot %v\n", id.Str())
}
}
policy := restic.ExpirePolicy{
Last: cmd.Last,
Hourly: cmd.Hourly,
Daily: cmd.Daily,
Weekly: cmd.Weekly,
Monthly: cmd.Monthly,
Yearly: cmd.Yearly,
}
if policy.Empty() {
return nil
}
// then, load all remaining snapshots
snapshots, err := restic.LoadAllSnapshots(repo)
if err != nil {
return err
}
// group by hostname and dirs
type key struct {
Hostname string
Dirs string
}
snapshotGroups := make(map[key]restic.Snapshots)
for _, sn := range snapshots {
if cmd.Hostname != "" && sn.Hostname != cmd.Hostname {
continue
}
k := key{Hostname: sn.Hostname, Dirs: strings.Join(sn.Paths, ":")}
list := snapshotGroups[k]
list = append(list, sn)
snapshotGroups[k] = list
}
for key, snapshotGroup := range snapshotGroups {
cmd.global.Printf("snapshots for host %v, directories %v:\n\n", key.Hostname, key.Dirs)
keep, remove := restic.ApplyPolicy(snapshotGroup, policy)
cmd.global.Printf("keep %d snapshots:\n", len(keep))
printSnapshots(cmd.global.stdout, keep)
cmd.global.Printf("\n")
cmd.global.Printf("remove %d snapshots:\n", len(remove))
printSnapshots(cmd.global.stdout, remove)
cmd.global.Printf("\n")
if !cmd.DryRun {
for _, sn := range remove {
err = repo.Backend().Remove(backend.Snapshot, sn.ID().String())
if err != nil {
return err
}
}
}
}
return nil
}

View File

@ -0,0 +1,215 @@
package main
import (
"fmt"
"os"
"restic"
"restic/backend"
"restic/debug"
"restic/index"
"restic/pack"
"restic/repository"
"time"
"golang.org/x/crypto/ssh/terminal"
)
// CmdPrune implements the 'prune' command.
type CmdPrune struct {
global *GlobalOptions
}
func init() {
_, err := parser.AddCommand("prune",
"removes content from a repository",
`
The prune command removes rendundant and unneeded data from the repository.
For removing snapshots, please see the 'forget' command, then afterwards run
'prune'.
`,
&CmdPrune{global: &globalOpts})
if err != nil {
panic(err)
}
}
// newProgressMax returns a progress that counts blobs.
func newProgressMax(show bool, max uint64, description string) *restic.Progress {
if !show {
return nil
}
p := restic.NewProgress(time.Second)
p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
status := fmt.Sprintf("[%s] %s %d / %d %s",
formatDuration(d),
formatPercent(s.Blobs, max),
s.Blobs, max, description)
w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
if err == nil {
if len(status) > w {
max := w - len(status) - 4
status = status[:max] + "... "
}
}
fmt.Printf("\x1b[2K%s\r", status)
}
p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
fmt.Printf("\n")
}
return p
}
// Execute runs the 'prune' command.
func (cmd CmdPrune) Execute(args []string) error {
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
err = repo.LoadIndex()
if err != nil {
return err
}
done := make(chan struct{})
defer close(done)
var stats struct {
blobs int
packs int
snapshots int
bytes int64
}
cmd.global.Verbosef("counting files in repo\n")
for _ = range repo.List(backend.Data, done) {
stats.packs++
}
cmd.global.Verbosef("building new index for repo\n")
bar := newProgressMax(cmd.global.ShowProgress(), uint64(stats.packs), "files")
idx, err := index.New(repo, bar)
if err != nil {
return err
}
for _, pack := range idx.Packs {
stats.bytes += pack.Size
}
cmd.global.Verbosef("repository contains %v packs (%v blobs) with %v bytes\n",
len(idx.Packs), len(idx.Blobs), formatBytes(uint64(stats.bytes)))
blobCount := make(map[pack.Handle]int)
duplicateBlobs := 0
duplicateBytes := 0
// find duplicate blobs
for _, p := range idx.Packs {
for _, entry := range p.Entries {
stats.blobs++
h := pack.Handle{ID: entry.ID, Type: entry.Type}
blobCount[h]++
if blobCount[h] > 1 {
duplicateBlobs++
duplicateBytes += int(entry.Length)
}
}
}
cmd.global.Verbosef("processed %d blobs: %d duplicate blobs, %v duplicate\n",
stats.blobs, duplicateBlobs, formatBytes(uint64(duplicateBytes)))
cmd.global.Verbosef("load all snapshots\n")
// find referenced blobs
snapshots, err := restic.LoadAllSnapshots(repo)
if err != nil {
return err
}
stats.snapshots = len(snapshots)
cmd.global.Verbosef("find data that is still in use for %d snapshots\n", stats.snapshots)
usedBlobs := pack.NewBlobSet()
seenBlobs := pack.NewBlobSet()
bar = newProgressMax(cmd.global.ShowProgress(), uint64(len(snapshots)), "snapshots")
bar.Start()
for _, sn := range snapshots {
debug.Log("CmdPrune.Execute", "process snapshot %v", sn.ID().Str())
err = restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, seenBlobs)
if err != nil {
return err
}
debug.Log("CmdPrune.Execute", "found %v blobs for snapshot %v", sn.ID().Str())
bar.Report(restic.Stat{Blobs: 1})
}
bar.Done()
cmd.global.Verbosef("found %d of %d data blobs still in use\n", len(usedBlobs), stats.blobs)
// find packs that need a rewrite
rewritePacks := backend.NewIDSet()
for h, blob := range idx.Blobs {
if !usedBlobs.Has(h) {
rewritePacks.Merge(blob.Packs)
}
if blobCount[h] > 1 {
rewritePacks.Merge(blob.Packs)
}
}
cmd.global.Verbosef("will rewrite %d packs\n", len(rewritePacks))
err = repository.Repack(repo, rewritePacks, usedBlobs)
if err != nil {
return err
}
cmd.global.Verbosef("creating new index\n")
for _ = range repo.List(backend.Data, done) {
stats.packs++
}
bar = newProgressMax(cmd.global.ShowProgress(), uint64(stats.packs), "files")
idx, err = index.New(repo, bar)
if err != nil {
return err
}
var supersedes backend.IDs
for idxID := range repo.List(backend.Index, done) {
err := repo.Backend().Remove(backend.Index, idxID.String())
if err != nil {
fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", idxID.Str(), err)
}
supersedes = append(supersedes, idxID)
}
id, err := idx.Save(repo, supersedes)
if err != nil {
return err
}
cmd.global.Verbosef("saved new index as %v\n", id.Str())
cmd.global.Verbosef("done\n")
return nil
}

View File

@ -1,14 +1,6 @@
package main package main
import ( import "restic/repository"
"fmt"
"os"
"restic/backend"
"restic/debug"
"restic/pack"
"restic/repository"
"restic/worker"
)
type CmdRebuildIndex struct { type CmdRebuildIndex struct {
global *GlobalOptions global *GlobalOptions
@ -26,94 +18,6 @@ func init() {
} }
} }
const rebuildIndexWorkers = 10
func loadBlobsFromPacks(repo *repository.Repository) (packs map[backend.ID][]pack.Blob) {
done := make(chan struct{})
defer close(done)
f := func(job worker.Job, done <-chan struct{}) (interface{}, error) {
return repo.ListPack(job.Data.(backend.ID))
}
jobCh := make(chan worker.Job)
resCh := make(chan worker.Job)
wp := worker.New(rebuildIndexWorkers, f, jobCh, resCh)
go func() {
for id := range repo.List(backend.Data, done) {
jobCh <- worker.Job{Data: id}
}
close(jobCh)
}()
packs = make(map[backend.ID][]pack.Blob)
for job := range resCh {
id := job.Data.(backend.ID)
if job.Error != nil {
fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id, job.Error)
continue
}
entries := job.Result.([]pack.Blob)
packs[id] = entries
}
wp.Wait()
return packs
}
func listIndexIDs(repo *repository.Repository) (list backend.IDs) {
done := make(chan struct{})
for id := range repo.List(backend.Index, done) {
list = append(list, id)
}
return list
}
func (cmd CmdRebuildIndex) rebuildIndex() error {
debug.Log("RebuildIndex.RebuildIndex", "start rebuilding index")
packs := loadBlobsFromPacks(cmd.repo)
cmd.global.Verbosef("loaded blobs from %d packs\n", len(packs))
idx := repository.NewIndex()
for packID, entries := range packs {
for _, entry := range entries {
pb := repository.PackedBlob{
ID: entry.ID,
Type: entry.Type,
Length: entry.Length,
Offset: entry.Offset,
PackID: packID,
}
idx.Store(pb)
}
}
oldIndexes := listIndexIDs(cmd.repo)
idx.AddToSupersedes(oldIndexes...)
cmd.global.Printf(" saving new index\n")
id, err := repository.SaveIndex(cmd.repo, idx)
if err != nil {
debug.Log("RebuildIndex.RebuildIndex", "error saving index: %v", err)
return err
}
debug.Log("RebuildIndex.RebuildIndex", "new index saved as %v", id.Str())
for _, indexID := range oldIndexes {
err := cmd.repo.Backend().Remove(backend.Index, indexID.String())
if err != nil {
cmd.global.Warnf("unable to remove index %v: %v\n", indexID.Str(), err)
}
}
return nil
}
func (cmd CmdRebuildIndex) Execute(args []string) error { func (cmd CmdRebuildIndex) Execute(args []string) error {
repo, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
if err != nil { if err != nil {
@ -127,5 +31,5 @@ func (cmd CmdRebuildIndex) Execute(args []string) error {
return err return err
} }
return cmd.rebuildIndex() return repository.RebuildIndex(repo)
} }

View File

@ -22,7 +22,7 @@ func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, er
// check if tree has been saved before // check if tree has been saved before
id := backend.Hash(data) id := backend.Hash(data)
if repo.Index().Has(id) { if repo.Index().Has(id, pack.Tree) {
return id, nil return id, nil
} }
@ -58,7 +58,7 @@ func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name
id := backend.Hash(chunk.Data) id := backend.Hash(chunk.Data)
if !repo.Index().Has(id) { if !repo.Index().Has(id, pack.Data) {
_, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil) _, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil)
if err != nil { if err != nil {
return nil, backend.ID{}, err return nil, backend.ID{}, err

View File

@ -13,7 +13,7 @@ import (
) )
func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []byte) []byte { func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []byte) []byte {
buf, err := repo.LoadBlob(pack.Data, id, buf) buf, err := repo.LoadBlob(id, pack.Data, buf)
if err != nil { if err != nil {
t.Fatalf("LoadBlob(%v) returned error %v", id, err) t.Fatalf("LoadBlob(%v) returned error %v", id, err)
} }

View File

@ -72,7 +72,7 @@ func NewArchiver(repo *repository.Repository) *Archiver {
// When the blob is not known, false is returned and the blob is added to the // When the blob is not known, false is returned and the blob is added to the
// list. This means that the caller false is returned to is responsible to save // list. This means that the caller false is returned to is responsible to save
// the blob to the backend. // the blob to the backend.
func (arch *Archiver) isKnownBlob(id backend.ID) bool { func (arch *Archiver) isKnownBlob(id backend.ID, t pack.BlobType) bool {
arch.knownBlobs.Lock() arch.knownBlobs.Lock()
defer arch.knownBlobs.Unlock() defer arch.knownBlobs.Unlock()
@ -82,7 +82,7 @@ func (arch *Archiver) isKnownBlob(id backend.ID) bool {
arch.knownBlobs.Insert(id) arch.knownBlobs.Insert(id)
_, err := arch.repo.Index().Lookup(id) _, err := arch.repo.Index().Lookup(id, t)
if err == nil { if err == nil {
return true return true
} }
@ -94,7 +94,7 @@ func (arch *Archiver) isKnownBlob(id backend.ID) bool {
func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error { func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error {
debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str()) debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
if arch.isKnownBlob(id) { if arch.isKnownBlob(id, pack.Data) {
debug.Log("Archiver.Save", "blob %v is known\n", id.Str()) debug.Log("Archiver.Save", "blob %v is known\n", id.Str())
return nil return nil
} }
@ -119,7 +119,7 @@ func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) {
// check if tree has been saved before // check if tree has been saved before
id := backend.Hash(data) id := backend.Hash(data)
if arch.isKnownBlob(id) { if arch.isKnownBlob(id, pack.Tree) {
return id, nil return id, nil
} }

View File

@ -101,7 +101,7 @@ func testArchiverDuplication(t *testing.T) {
id := randomID() id := randomID()
if repo.Index().Has(id) { if repo.Index().Has(id, pack.Data) {
continue continue
} }

View File

@ -60,6 +60,47 @@ func (s IDSet) Equals(other IDSet) bool {
return true return true
} }
// Merge adds the blobs in other to the current set.
func (s IDSet) Merge(other IDSet) {
for id := range other {
s.Insert(id)
}
}
// Intersect returns a new set containing the IDs that are present in both sets.
func (s IDSet) Intersect(other IDSet) (result IDSet) {
result = NewIDSet()
set1 := s
set2 := other
// iterate over the smaller set
if len(set2) < len(set1) {
set1, set2 = set2, set1
}
for id := range set1 {
if set2.Has(id) {
result.Insert(id)
}
}
return result
}
// Sub returns a new set containing all IDs that are present in s but not in
// other.
func (s IDSet) Sub(other IDSet) (result IDSet) {
result = NewIDSet()
for id := range s {
if !other.Has(id) {
result.Insert(id)
}
}
return result
}
func (s IDSet) String() string { func (s IDSet) String() string {
str := s.List().String() str := s.List().String()
if len(str) < 2 { if len(str) < 2 {

View File

@ -31,7 +31,9 @@ type Backend interface {
Lister Lister
// Load returns the data stored in the backend for h at the given offset // Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt. // and saves it in p. Load has the same semantics as io.ReaderAt, except
// that a negative offset is also allowed. In this case it references a
// position relative to the end of the file (similar to Seek()).
Load(h Handle, p []byte, off int64) (int, error) Load(h Handle, p []byte, off int64) (int, error)
// Save stores the data in the backend under the given handle. // Save stores the data in the backend under the given handle.

View File

@ -51,6 +51,13 @@ func TestLocalBackendLoad(t *testing.T) {
test.TestLoad(t) test.TestLoad(t)
} }
func TestLocalBackendLoadNegativeOffset(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLoadNegativeOffset(t)
}
func TestLocalBackendSave(t *testing.T) { func TestLocalBackendSave(t *testing.T) {
if SkipMessage != "" { if SkipMessage != "" {
t.Skip(SkipMessage) t.Skip(SkipMessage)

View File

@ -98,9 +98,12 @@ func dirname(base string, t backend.Type, name string) string {
return filepath.Join(base, n) return filepath.Join(base, n)
} }
// Load returns the data stored in the backend for h at the given offset // Load returns the data stored in the backend for h at the given offset and
// and saves it in p. Load has the same semantics as io.ReaderAt. // saves it in p. Load has the same semantics as io.ReaderAt, with one
// exception: when off is lower than zero, it is treated as an offset relative
// to the end of the file.
func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) { func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
debug.Log("backend.local.Load", "Load %v, length %v at %v", h, len(p), off)
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return 0, err return 0, err
} }
@ -117,11 +120,15 @@ func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
} }
}() }()
if off > 0 { switch {
case off > 0:
_, err = f.Seek(off, 0) _, err = f.Seek(off, 0)
if err != nil { case off < 0:
return 0, err _, err = f.Seek(off, 2)
} }
if err != nil {
return 0, err
} }
return io.ReadFull(f, p) return io.ReadFull(f, p)
@ -162,6 +169,7 @@ func writeToTempfile(tempdir string, p []byte) (filename string, err error) {
// Save stores data in the backend at the handle. // Save stores data in the backend at the handle.
func (b *Local) Save(h backend.Handle, p []byte) (err error) { func (b *Local) Save(h backend.Handle, p []byte) (err error) {
debug.Log("backend.local.Save", "Save %v, length %v", h, len(p))
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return err return err
} }
@ -203,6 +211,7 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) {
// Stat returns information about a blob. // Stat returns information about a blob.
func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) { func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) {
debug.Log("backend.local.Stat", "Stat %v", h)
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err return backend.BlobInfo{}, err
} }
@ -217,6 +226,7 @@ func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) {
// Test returns true if a blob of the given type and name exists in the backend. // Test returns true if a blob of the given type and name exists in the backend.
func (b *Local) Test(t backend.Type, name string) (bool, error) { func (b *Local) Test(t backend.Type, name string) (bool, error) {
debug.Log("backend.local.Test", "Test %v %v", t, name)
_, err := fs.Stat(filename(b.p, t, name)) _, err := fs.Stat(filename(b.p, t, name))
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
@ -230,6 +240,7 @@ func (b *Local) Test(t backend.Type, name string) (bool, error) {
// Remove removes the blob with the given name and type. // Remove removes the blob with the given name and type.
func (b *Local) Remove(t backend.Type, name string) error { func (b *Local) Remove(t backend.Type, name string) error {
debug.Log("backend.local.Remove", "Remove %v %v", t, name)
fn := filename(b.p, t, name) fn := filename(b.p, t, name)
// reset read-only flag // reset read-only flag
@ -304,6 +315,7 @@ func listDirs(dir string) (filenames []string, err error) {
// goroutine is started for this. If the channel done is closed, sending // goroutine is started for this. If the channel done is closed, sending
// stops. // stops.
func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
debug.Log("backend.local.List", "List %v", t)
lister := listDir lister := listDir
if t == backend.Data { if t == backend.Data {
lister = listDirs lister = listDirs
@ -336,11 +348,13 @@ func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
// Delete removes the repository and all files. // Delete removes the repository and all files.
func (b *Local) Delete() error { func (b *Local) Delete() error {
debug.Log("backend.local.Delete", "Delete()")
return fs.RemoveAll(b.p) return fs.RemoveAll(b.p)
} }
// Close closes all open files. // Close closes all open files.
func (b *Local) Close() error { func (b *Local) Close() error {
debug.Log("backend.local.Close", "Close()")
// this does not need to do anything, all open files are closed within the // this does not need to do anything, all open files are closed within the
// same function. // same function.
return nil return nil

View File

@ -51,6 +51,13 @@ func TestMemBackendLoad(t *testing.T) {
test.TestLoad(t) test.TestLoad(t)
} }
func TestMemBackendLoadNegativeOffset(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLoadNegativeOffset(t)
}
func TestMemBackendSave(t *testing.T) { func TestMemBackendSave(t *testing.T) {
if SkipMessage != "" { if SkipMessage != "" {
t.Skip(SkipMessage) t.Skip(SkipMessage)

View File

@ -116,8 +116,13 @@ func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, err
} }
buf := be.data[entry{h.Type, h.Name}] buf := be.data[entry{h.Type, h.Name}]
if off > int64(len(buf)) { switch {
case off > int64(len(buf)):
return 0, errors.New("offset beyond end of file") return 0, errors.New("offset beyond end of file")
case off < -int64(len(buf)):
off = 0
case off < 0:
off = int64(len(buf)) + off
} }
buf = buf[off:] buf = buf[off:]

View File

@ -1,63 +0,0 @@
package backend
import (
"errors"
"io"
)
type readSeeker struct {
be Backend
h Handle
t Type
name string
offset int64
size int64
}
// NewReadSeeker returns an io.ReadSeeker for the given object in the backend.
func NewReadSeeker(be Backend, h Handle) io.ReadSeeker {
return &readSeeker{be: be, h: h}
}
func (rd *readSeeker) Read(p []byte) (int, error) {
n, err := rd.be.Load(rd.h, p, rd.offset)
rd.offset += int64(n)
return n, err
}
func (rd *readSeeker) Seek(offset int64, whence int) (n int64, err error) {
switch whence {
case 0:
rd.offset = offset
case 1:
rd.offset += offset
case 2:
if rd.size == 0 {
rd.size, err = rd.getSize()
if err != nil {
return 0, err
}
}
pos := rd.size + offset
if pos < 0 {
return 0, errors.New("invalid offset, before start of blob")
}
rd.offset = pos
return rd.offset, nil
default:
return 0, errors.New("invalid value for parameter whence")
}
return rd.offset, nil
}
func (rd *readSeeker) getSize() (int64, error) {
stat, err := rd.be.Stat(rd.h)
if err != nil {
return 0, err
}
return stat.Size, nil
}

View File

@ -1,114 +0,0 @@
package backend_test
import (
"bytes"
"io"
"math/rand"
"restic/backend"
"restic/backend/mem"
"testing"
. "restic/test"
)
func abs(a int) int {
if a < 0 {
return -a
}
return a
}
func loadAndCompare(t testing.TB, rd io.ReadSeeker, size int, offset int64, expected []byte) {
var (
pos int64
err error
)
if offset >= 0 {
pos, err = rd.Seek(offset, 0)
} else {
pos, err = rd.Seek(offset, 2)
}
if err != nil {
t.Errorf("Seek(%d, 0) returned error: %v", offset, err)
return
}
if offset >= 0 && pos != offset {
t.Errorf("pos after seek is wrong, want %d, got %d", offset, pos)
} else if offset < 0 && pos != int64(size)+offset {
t.Errorf("pos after relative seek is wrong, want %d, got %d", int64(size)+offset, pos)
}
buf := make([]byte, len(expected))
n, err := rd.Read(buf)
// if we requested data beyond the end of the file, ignore
// ErrUnexpectedEOF error
if offset > 0 && len(buf) > size && err == io.ErrUnexpectedEOF {
err = nil
buf = buf[:size]
}
if offset < 0 && len(buf) > abs(int(offset)) && err == io.ErrUnexpectedEOF {
err = nil
buf = buf[:abs(int(offset))]
}
if n != len(buf) {
t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d",
len(buf), offset, len(buf), n)
return
}
if err != nil {
t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), offset, err)
return
}
buf = buf[:n]
if !bytes.Equal(buf, expected) {
t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), offset)
return
}
}
func TestReadSeeker(t *testing.T) {
b := mem.New()
length := rand.Intn(1<<24) + 2000
data := Random(23, length)
id := backend.Hash(data)
handle := backend.Handle{Type: backend.Data, Name: id.String()}
err := b.Save(handle, data)
if err != nil {
t.Fatalf("Save() error: %v", err)
}
for i := 0; i < 50; i++ {
l := rand.Intn(length + 2000)
o := rand.Intn(length + 2000)
if rand.Float32() > 0.5 {
o = -o
}
d := data
if o > 0 && o < len(d) {
d = d[o:]
} else {
o = len(d)
d = d[:0]
}
if l > 0 && l < len(d) {
d = d[:l]
}
rd := backend.NewReadSeeker(b, handle)
loadAndCompare(t, rd, len(data), int64(o), d)
}
}

View File

@ -51,6 +51,13 @@ func TestRestBackendLoad(t *testing.T) {
test.TestLoad(t) test.TestLoad(t)
} }
func TestRestBackendLoadNegativeOffset(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLoadNegativeOffset(t)
}
func TestRestBackendSave(t *testing.T) { func TestRestBackendSave(t *testing.T) {
if SkipMessage != "" { if SkipMessage != "" {
t.Skip(SkipMessage) t.Skip(SkipMessage)

View File

@ -75,6 +75,20 @@ func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err er
return 0, err return 0, err
} }
// invert offset
if off < 0 {
info, err := b.Stat(h)
if err != nil {
return 0, err
}
if -off > info.Size {
off = 0
} else {
off = info.Size + off
}
}
req, err := http.NewRequest("GET", restPath(b.url, h), nil) req, err := http.NewRequest("GET", restPath(b.url, h), nil)
if err != nil { if err != nil {
return 0, err return 0, err

View File

@ -51,6 +51,13 @@ func TestS3BackendLoad(t *testing.T) {
test.TestLoad(t) test.TestLoad(t)
} }
func TestS3BackendLoadNegativeOffset(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLoadNegativeOffset(t)
}
func TestS3BackendSave(t *testing.T) { func TestS3BackendSave(t *testing.T) {
if SkipMessage != "" { if SkipMessage != "" {
t.Skip(SkipMessage) t.Skip(SkipMessage)

View File

@ -77,37 +77,75 @@ func (be *s3) Location() string {
// Load returns the data stored in the backend for h at the given offset // Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt. // and saves it in p. Load has the same semantics as io.ReaderAt.
func (be s3) Load(h backend.Handle, p []byte, off int64) (int, error) { func (be s3) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
var obj *minio.Object
debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p)) debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p))
path := be.s3path(h.Type, h.Name) path := be.s3path(h.Type, h.Name)
obj, err := be.client.GetObject(be.bucketname, path)
if err != nil {
debug.Log("s3.GetReader", " err %v", err)
return 0, err
}
if off > 0 {
_, err = obj.Seek(off, 0)
if err != nil {
return 0, err
}
}
<-be.connChan <-be.connChan
defer func() { defer func() {
be.connChan <- struct{}{} be.connChan <- struct{}{}
}() }()
// This may not read the whole object, so ensure object obj, err = be.client.GetObject(be.bucketname, path)
// is closed to avoid duplicate connections.
n, err := io.ReadFull(obj, p)
if err != nil { if err != nil {
obj.Close() debug.Log("s3.Load", " err %v", err)
} else { return 0, err
err = obj.Close()
} }
return n, err
// make sure that the object is closed properly.
defer func() {
e := obj.Close()
if err == nil {
err = e
}
}()
info, err := obj.Stat()
if err != nil {
return 0, err
}
// handle negative offsets
if off < 0 {
// if the negative offset is larger than the object itself, read from
// the beginning.
if -off > info.Size {
off = 0
} else {
// otherwise compute the offset from the end of the file.
off = info.Size + off
}
}
// return an error if the offset is beyond the end of the file
if off > info.Size {
return 0, io.EOF
}
var nextError error
// manually create an io.ErrUnexpectedEOF
if off+int64(len(p)) > info.Size {
newlen := info.Size - off
p = p[:newlen]
nextError = io.ErrUnexpectedEOF
debug.Log("s3.Load", " capped buffer to %v byte", len(p))
}
n, err = obj.ReadAt(p, off)
if int64(n) == info.Size-off && err == io.EOF {
err = nil
}
if err == nil {
err = nextError
}
return n, err
} }
// Save stores data in the backend at the handle. // Save stores data in the backend at the handle.
@ -116,7 +154,7 @@ func (be s3) Save(h backend.Handle, p []byte) (err error) {
return err return err
} }
debug.Log("s3.Save", "%v bytes at %d", len(p), h) debug.Log("s3.Save", "%v with %d bytes", h, len(p))
path := be.s3path(h.Type, h.Name) path := be.s3path(h.Type, h.Name)

View File

@ -51,6 +51,13 @@ func TestSftpBackendLoad(t *testing.T) {
test.TestLoad(t) test.TestLoad(t)
} }
func TestSftpBackendLoadNegativeOffset(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLoadNegativeOffset(t)
}
func TestSftpBackendSave(t *testing.T) { func TestSftpBackendSave(t *testing.T) {
if SkipMessage != "" { if SkipMessage != "" {
t.Skip(SkipMessage) t.Skip(SkipMessage)

View File

@ -308,11 +308,15 @@ func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
} }
}() }()
if off > 0 { switch {
case off > 0:
_, err = f.Seek(off, 0) _, err = f.Seek(off, 0)
if err != nil { case off < 0:
return 0, err _, err = f.Seek(off, 2)
} }
if err != nil {
return 0, err
} }
return io.ReadFull(f, p) return io.ReadFull(f, p)

View File

@ -51,6 +51,13 @@ func TestTestBackendLoad(t *testing.T) {
test.TestLoad(t) test.TestLoad(t)
} }
func TestTestBackendLoadNegativeOffset(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLoadNegativeOffset(t)
}
func TestTestBackendSave(t *testing.T) { func TestTestBackendSave(t *testing.T) {
if SkipMessage != "" { if SkipMessage != "" {
t.Skip(SkipMessage) t.Skip(SkipMessage)

View File

@ -220,9 +220,60 @@ func TestLoad(t testing.TB) {
buf := make([]byte, l) buf := make([]byte, l)
n, err := b.Load(handle, buf, int64(o)) n, err := b.Load(handle, buf, int64(o))
// if we requested data beyond the end of the file, ignore // if we requested data beyond the end of the file, require
// ErrUnexpectedEOF error // ErrUnexpectedEOF error
if l > len(d) && err == io.ErrUnexpectedEOF { if l > len(d) {
if err != io.ErrUnexpectedEOF {
t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), int64(o))
}
err = nil
buf = buf[:len(d)]
}
if err != nil {
t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), int64(o), err)
continue
}
if n != len(buf) {
t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d",
len(buf), int64(o), len(buf), n)
continue
}
buf = buf[:n]
if !bytes.Equal(buf, d) {
t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), int64(o))
continue
}
}
// test with negative offset
for i := 0; i < 50; i++ {
l := rand.Intn(length + 2000)
o := rand.Intn(length + 2000)
d := data
if o < len(d) {
d = d[len(d)-o:]
} else {
o = 0
}
if l > 0 && l < len(d) {
d = d[:l]
}
buf := make([]byte, l)
n, err := b.Load(handle, buf, -int64(o))
// if we requested data beyond the end of the file, require
// ErrUnexpectedEOF error
if l > len(d) {
if err != io.ErrUnexpectedEOF {
t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), int64(o))
continue
}
err = nil err = nil
buf = buf[:len(d)] buf = buf[:len(d)]
} }
@ -259,6 +310,62 @@ func TestLoad(t testing.TB) {
OK(t, b.Remove(backend.Data, id.String())) OK(t, b.Remove(backend.Data, id.String()))
} }
// TestLoadNegativeOffset tests the backend's Load function with negative offsets.
func TestLoadNegativeOffset(t testing.TB) {
b := open(t)
defer close(t)
length := rand.Intn(1<<24) + 2000
data := Random(23, length)
id := backend.Hash(data)
handle := backend.Handle{Type: backend.Data, Name: id.String()}
err := b.Save(handle, data)
if err != nil {
t.Fatalf("Save() error: %v", err)
}
// test normal reads
for i := 0; i < 50; i++ {
l := rand.Intn(length + 2000)
o := -rand.Intn(length + 2000)
buf := make([]byte, l)
n, err := b.Load(handle, buf, int64(o))
// if we requested data beyond the end of the file, require
// ErrUnexpectedEOF error
if len(buf) > -o {
if err != io.ErrUnexpectedEOF {
t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), o)
continue
}
err = nil
buf = buf[:-o]
}
if err != nil {
t.Errorf("Load(%d, %d) returned error: %v", len(buf), o, err)
continue
}
if n != len(buf) {
t.Errorf("Load(%d, %d) returned short read, only got %d bytes", len(buf), o, n)
continue
}
p := len(data) + o
if !bytes.Equal(buf, data[p:p+len(buf)]) {
t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), o)
continue
}
}
OK(t, b.Remove(backend.Data, id.String()))
}
// TestSave tests saving data in the backend. // TestSave tests saving data in the backend.
func TestSave(t testing.TB) { func TestSave(t testing.TB) {
b := open(t) b := open(t)

View File

@ -0,0 +1,17 @@
package backend
import (
"crypto/rand"
"io"
)
// RandomID retuns a randomly generated ID. This is mainly used for testing.
// When reading from rand fails, the function panics.
func RandomID() ID {
id := ID{}
_, err := io.ReadFull(rand.Reader, id[:])
if err != nil {
panic(err)
}
return id
}

View File

@ -1,7 +1,6 @@
package checker package checker
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"sync" "sync"
@ -677,7 +676,7 @@ func checkPack(r *repository.Repository, id backend.ID) error {
return fmt.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) return fmt.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
} }
unpacker, err := pack.NewUnpacker(r.Key(), bytes.NewReader(buf)) unpacker, err := pack.NewUnpacker(r.Key(), pack.BufferLoader(buf))
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,163 +0,0 @@
package checker
import (
"errors"
"restic/backend"
"restic/debug"
"restic/repository"
)
// Repacker extracts still used blobs from packs with unused blobs and creates
// new packs.
type Repacker struct {
unusedBlobs backend.IDSet
repo *repository.Repository
}
// NewRepacker returns a new repacker that (when Repack() in run) cleans up the
// repository and creates new packs and indexs so that all blobs in unusedBlobs
// aren't used any more.
func NewRepacker(repo *repository.Repository, unusedBlobs backend.IDSet) *Repacker {
return &Repacker{
repo: repo,
unusedBlobs: unusedBlobs,
}
}
// Repack runs the process of finding still used blobs in packs with unused
// blobs, extracts them and creates new packs with just the still-in-use blobs.
func (r *Repacker) Repack() error {
debug.Log("Repacker.Repack", "searching packs for %v", r.unusedBlobs)
unneededPacks, err := FindPacksForBlobs(r.repo, r.unusedBlobs)
if err != nil {
return err
}
debug.Log("Repacker.Repack", "found packs: %v", unneededPacks)
blobs, err := FindBlobsForPacks(r.repo, unneededPacks)
if err != nil {
return err
}
debug.Log("Repacker.Repack", "found blobs: %v", blobs)
for id := range r.unusedBlobs {
debug.Log("Repacker.Repack", "remove unused blob %v", id.Str())
blobs.Delete(id)
}
debug.Log("Repacker.Repack", "need to repack blobs: %v", blobs)
err = RepackBlobs(r.repo, r.repo, blobs)
if err != nil {
return err
}
debug.Log("Repacker.Repack", "remove unneeded packs: %v", unneededPacks)
for packID := range unneededPacks {
err = r.repo.Backend().Remove(backend.Data, packID.String())
if err != nil {
return err
}
}
debug.Log("Repacker.Repack", "rebuild index, unneeded packs: %v", unneededPacks)
idx, err := r.repo.Index().RebuildIndex(unneededPacks)
newIndexID, err := repository.SaveIndex(r.repo, idx)
debug.Log("Repacker.Repack", "saved new index at %v, err %v", newIndexID.Str(), err)
if err != nil {
return err
}
debug.Log("Repacker.Repack", "remove old indexes: %v", idx.Supersedes())
for _, id := range idx.Supersedes() {
err = r.repo.Backend().Remove(backend.Index, id.String())
if err != nil {
debug.Log("Repacker.Repack", "error removing index %v: %v", id.Str(), err)
return err
}
debug.Log("Repacker.Repack", "removed index %v", id.Str())
}
return nil
}
// FindPacksForBlobs returns the set of packs that contain the blobs.
func FindPacksForBlobs(repo *repository.Repository, blobs backend.IDSet) (backend.IDSet, error) {
packs := backend.NewIDSet()
idx := repo.Index()
for id := range blobs {
blob, err := idx.Lookup(id)
if err != nil {
return nil, err
}
packs.Insert(blob.PackID)
}
return packs, nil
}
// FindBlobsForPacks returns the set of blobs contained in a pack of packs.
func FindBlobsForPacks(repo *repository.Repository, packs backend.IDSet) (backend.IDSet, error) {
blobs := backend.NewIDSet()
for packID := range packs {
for _, packedBlob := range repo.Index().ListPack(packID) {
blobs.Insert(packedBlob.ID)
}
}
return blobs, nil
}
// repackBlob loads a single blob from src and saves it in dst.
func repackBlob(src, dst *repository.Repository, id backend.ID) error {
blob, err := src.Index().Lookup(id)
if err != nil {
return err
}
debug.Log("RepackBlobs", "repacking blob %v, len %v", id.Str(), blob.PlaintextLength())
buf := make([]byte, 0, blob.PlaintextLength())
buf, err = src.LoadBlob(blob.Type, id, buf)
if err != nil {
return err
}
if uint(len(buf)) != blob.PlaintextLength() {
debug.Log("RepackBlobs", "repack blob %v: len(buf) isn't equal to length: %v = %v", id.Str(), len(buf), blob.PlaintextLength())
return errors.New("LoadBlob returned wrong data, len() doesn't match")
}
_, err = dst.SaveAndEncrypt(blob.Type, buf, &id)
if err != nil {
return err
}
return nil
}
// RepackBlobs reads all blobs in blobIDs from src and saves them into new pack
// files in dst. Source and destination repo may be the same.
func RepackBlobs(src, dst *repository.Repository, blobIDs backend.IDSet) (err error) {
for id := range blobIDs {
err = repackBlob(src, dst, id)
if err != nil {
return err
}
}
err = dst.Flush()
if err != nil {
return err
}
return nil
}

View File

@ -1,127 +0,0 @@
package checker_test
import (
"testing"
"restic/backend"
"restic/checker"
. "restic/test"
)
var findPackTests = []struct {
blobIDs backend.IDSet
packIDs backend.IDSet
}{
{
backend.IDSet{
ParseID("534f211b4fc2cf5b362a24e8eba22db5372a75b7e974603ff9263f5a471760f4"): struct{}{},
ParseID("51aa04744b518c6a85b4e7643cfa99d58789c2a6ca2a3fda831fa3032f28535c"): struct{}{},
ParseID("454515bca5f4f60349a527bd814cc2681bc3625716460cc6310771c966d8a3bf"): struct{}{},
ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"): struct{}{},
},
backend.IDSet{
ParseID("19a731a515618ec8b75fc0ff3b887d8feb83aef1001c9899f6702761142ed068"): struct{}{},
ParseID("657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6"): struct{}{},
},
},
}
var findBlobTests = []struct {
packIDs backend.IDSet
blobIDs backend.IDSet
}{
{
backend.IDSet{
ParseID("60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"): struct{}{},
},
backend.IDSet{
ParseID("356493f0b00a614d36c698591bbb2b1d801932d85328c1f508019550034549fc"): struct{}{},
ParseID("b8a6bcdddef5c0f542b4648b2ef79bc0ed4377d4109755d2fb78aff11e042663"): struct{}{},
ParseID("5714f7274a8aa69b1692916739dc3835d09aac5395946b8ec4f58e563947199a"): struct{}{},
ParseID("b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c"): struct{}{},
ParseID("08d0444e9987fa6e35ce4232b2b71473e1a8f66b2f9664cc44dc57aad3c5a63a"): struct{}{},
},
},
{
backend.IDSet{
ParseID("60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"): struct{}{},
ParseID("ff7e12cd66d896b08490e787d1915c641e678d7e6b4a00e60db5d13054f4def4"): struct{}{},
},
backend.IDSet{
ParseID("356493f0b00a614d36c698591bbb2b1d801932d85328c1f508019550034549fc"): struct{}{},
ParseID("b8a6bcdddef5c0f542b4648b2ef79bc0ed4377d4109755d2fb78aff11e042663"): struct{}{},
ParseID("5714f7274a8aa69b1692916739dc3835d09aac5395946b8ec4f58e563947199a"): struct{}{},
ParseID("b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c"): struct{}{},
ParseID("08d0444e9987fa6e35ce4232b2b71473e1a8f66b2f9664cc44dc57aad3c5a63a"): struct{}{},
ParseID("aa79d596dbd4c863e5400deaca869830888fe1ce9f51b4a983f532c77f16a596"): struct{}{},
ParseID("b2396c92781307111accf2ebb1cd62b58134b744d90cb6f153ca456a98dc3e76"): struct{}{},
ParseID("5249af22d3b2acd6da8048ac37b2a87fa346fabde55ed23bb866f7618843c9fe"): struct{}{},
ParseID("f41c2089a9d58a4b0bf39369fa37588e6578c928aea8e90a4490a6315b9905c1"): struct{}{},
},
},
}
func TestRepackerFindPacks(t *testing.T) {
WithTestEnvironment(t, checkerTestData, func(repodir string) {
repo := OpenLocalRepo(t, repodir)
OK(t, repo.LoadIndex())
for _, test := range findPackTests {
packIDs, err := checker.FindPacksForBlobs(repo, test.blobIDs)
OK(t, err)
Equals(t, test.packIDs, packIDs)
}
for _, test := range findBlobTests {
blobs, err := checker.FindBlobsForPacks(repo, test.packIDs)
OK(t, err)
Assert(t, test.blobIDs.Equals(blobs),
"list of blobs for packs %v does not match, expected:\n %v\ngot:\n %v",
test.packIDs, test.blobIDs, blobs)
}
})
}
func TestRepacker(t *testing.T) {
WithTestEnvironment(t, checkerTestData, func(repodir string) {
repo := OpenLocalRepo(t, repodir)
OK(t, repo.LoadIndex())
repo.Backend().Remove(backend.Snapshot, "c2b53c5e6a16db92fbb9aa08bd2794c58b379d8724d661ee30d20898bdfdff22")
unusedBlobs := backend.IDSet{
ParseID("5714f7274a8aa69b1692916739dc3835d09aac5395946b8ec4f58e563947199a"): struct{}{},
ParseID("08d0444e9987fa6e35ce4232b2b71473e1a8f66b2f9664cc44dc57aad3c5a63a"): struct{}{},
ParseID("356493f0b00a614d36c698591bbb2b1d801932d85328c1f508019550034549fc"): struct{}{},
ParseID("b8a6bcdddef5c0f542b4648b2ef79bc0ed4377d4109755d2fb78aff11e042663"): struct{}{},
}
chkr := checker.New(repo)
_, errs := chkr.LoadIndex()
OKs(t, errs)
errs = checkStruct(chkr)
OKs(t, errs)
list := backend.NewIDSet(chkr.UnusedBlobs()...)
if !unusedBlobs.Equals(list) {
t.Fatalf("expected unused blobs:\n %v\ngot:\n %v", unusedBlobs, list)
}
repacker := checker.NewRepacker(repo, unusedBlobs)
OK(t, repacker.Repack())
chkr = checker.New(repo)
_, errs = chkr.LoadIndex()
OKs(t, errs)
OKs(t, checkPacks(chkr))
OKs(t, checkStruct(chkr))
blobs := chkr.UnusedBlobs()
Assert(t, len(blobs) == 0,
"expected zero unused blobs, got %v", blobs)
})
}

View File

@ -0,0 +1,53 @@
package checker
import (
"restic/repository"
"testing"
)
// TestCheckRepo runs the checker on repo.
func TestCheckRepo(t testing.TB, repo *repository.Repository) {
chkr := New(repo)
hints, errs := chkr.LoadIndex()
if len(errs) != 0 {
t.Fatalf("errors loading index: %v", errs)
}
if len(hints) != 0 {
t.Fatalf("errors loading index: %v", hints)
}
done := make(chan struct{})
defer close(done)
// packs
errChan := make(chan error)
go chkr.Packs(errChan, done)
for err := range errChan {
t.Error(err)
}
// structure
errChan = make(chan error)
go chkr.Structure(errChan, done)
for err := range errChan {
t.Error(err)
}
// unused blobs
blobs := chkr.UnusedBlobs()
if len(blobs) > 0 {
t.Errorf("unused blobs found: %v", blobs)
}
// read data
errChan = make(chan error)
go chkr.ReadData(nil, errChan, done)
for err := range errChan {
t.Error(err)
}
}

43
src/restic/find.go Normal file
View File

@ -0,0 +1,43 @@
package restic
import (
"restic/backend"
"restic/pack"
"restic/repository"
)
// FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data
// blobs) to the set blobs. The tree blobs in the `seen` BlobSet will not be visited
// again.
func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.BlobSet, seen pack.BlobSet) error {
blobs.Insert(pack.Handle{ID: treeID, Type: pack.Tree})
tree, err := LoadTree(repo, treeID)
if err != nil {
return err
}
for _, node := range tree.Nodes {
switch node.Type {
case "file":
for _, blob := range node.Content {
blobs.Insert(pack.Handle{ID: blob, Type: pack.Data})
}
case "dir":
subtreeID := *node.Subtree
h := pack.Handle{ID: subtreeID, Type: pack.Tree}
if seen.Has(h) {
continue
}
seen.Insert(h)
err := FindUsedBlobs(repo, subtreeID, blobs, seen)
if err != nil {
return err
}
}
}
return nil
}

138
src/restic/find_test.go Normal file
View File

@ -0,0 +1,138 @@
package restic
import (
"bufio"
"encoding/json"
"flag"
"fmt"
"os"
"path/filepath"
"sort"
"testing"
"time"
"restic/pack"
"restic/repository"
)
func loadIDSet(t testing.TB, filename string) pack.BlobSet {
f, err := os.Open(filename)
if err != nil {
t.Logf("unable to open golden file %v: %v", filename, err)
return pack.NewBlobSet()
}
sc := bufio.NewScanner(f)
blobs := pack.NewBlobSet()
for sc.Scan() {
var h pack.Handle
err := json.Unmarshal([]byte(sc.Text()), &h)
if err != nil {
t.Errorf("file %v contained invalid blob: %#v", filename, err)
continue
}
blobs.Insert(h)
}
if err = f.Close(); err != nil {
t.Errorf("closing file %v failed with error %v", filename, err)
}
return blobs
}
func saveIDSet(t testing.TB, filename string, s pack.BlobSet) {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatalf("unable to update golden file %v: %v", filename, err)
return
}
var hs pack.Handles
for h := range s {
hs = append(hs, h)
}
sort.Sort(hs)
enc := json.NewEncoder(f)
for _, h := range hs {
err = enc.Encode(h)
if err != nil {
t.Fatalf("Encode() returned error: %v", err)
}
}
if err = f.Close(); err != nil {
t.Fatalf("close file %v returned error: %v", filename, err)
}
}
var updateGoldenFiles = flag.Bool("update", false, "update golden files in testdata/")
const (
findTestSnapshots = 3
findTestDepth = 2
)
var findTestTime = time.Unix(1469960361, 23)
func TestFindUsedBlobs(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
var snapshots []*Snapshot
for i := 0; i < findTestSnapshots; i++ {
sn := TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0)
t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str())
snapshots = append(snapshots, sn)
}
for i, sn := range snapshots {
usedBlobs := pack.NewBlobSet()
err := FindUsedBlobs(repo, *sn.Tree, usedBlobs, pack.NewBlobSet())
if err != nil {
t.Errorf("FindUsedBlobs returned error: %v", err)
continue
}
if len(usedBlobs) == 0 {
t.Errorf("FindUsedBlobs returned an empty set")
continue
}
goldenFilename := filepath.Join("testdata", fmt.Sprintf("used_blobs_snapshot%d", i))
want := loadIDSet(t, goldenFilename)
if !want.Equals(usedBlobs) {
t.Errorf("snapshot %d: wrong list of blobs returned:\n missing blobs: %v\n extra blobs: %v",
i, want.Sub(usedBlobs), usedBlobs.Sub(want))
}
if *updateGoldenFiles {
saveIDSet(t, goldenFilename, usedBlobs)
}
}
}
func BenchmarkFindUsedBlobs(b *testing.B) {
repo, cleanup := repository.TestRepository(b)
defer cleanup()
sn := TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0)
b.ResetTimer()
for i := 0; i < b.N; i++ {
seen := pack.NewBlobSet()
blobs := pack.NewBlobSet()
err := FindUsedBlobs(repo, *sn.Tree, blobs, seen)
if err != nil {
b.Error(err)
}
b.Logf("found %v blobs", len(blobs))
}
}

View File

@ -27,8 +27,8 @@ var _ = fs.HandleReleaser(&file{})
// BlobLoader is an abstracted repository with a reduced set of methods used // BlobLoader is an abstracted repository with a reduced set of methods used
// for fuse operations. // for fuse operations.
type BlobLoader interface { type BlobLoader interface {
LookupBlobSize(backend.ID) (uint, error) LookupBlobSize(backend.ID, pack.BlobType) (uint, error)
LoadBlob(pack.BlobType, backend.ID, []byte) ([]byte, error) LoadBlob(backend.ID, pack.BlobType, []byte) ([]byte, error)
} }
type file struct { type file struct {
@ -53,7 +53,7 @@ func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool) (*file, error
var bytes uint64 var bytes uint64
sizes := make([]uint, len(node.Content)) sizes := make([]uint, len(node.Content))
for i, id := range node.Content { for i, id := range node.Content {
size, err := repo.LookupBlobSize(id) size, err := repo.LookupBlobSize(id, pack.Data)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -110,7 +110,7 @@ func (f *file) getBlobAt(i int) (blob []byte, err error) {
buf = make([]byte, f.sizes[i]) buf = make([]byte, f.sizes[i])
} }
blob, err = f.repo.LoadBlob(pack.Data, f.node.Content[i], buf) blob, err = f.repo.LoadBlob(f.node.Content[i], pack.Data, buf)
if err != nil { if err != nil {
debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err) debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
return nil, err return nil, err

View File

@ -26,7 +26,7 @@ func NewMockRepo(content map[backend.ID][]byte) *MockRepo {
return &MockRepo{blobs: content} return &MockRepo{blobs: content}
} }
func (m *MockRepo) LookupBlobSize(id backend.ID) (uint, error) { func (m *MockRepo) LookupBlobSize(id backend.ID, t pack.BlobType) (uint, error) {
buf, ok := m.blobs[id] buf, ok := m.blobs[id]
if !ok { if !ok {
return 0, errors.New("blob not found") return 0, errors.New("blob not found")
@ -35,8 +35,8 @@ func (m *MockRepo) LookupBlobSize(id backend.ID) (uint, error) {
return uint(len(buf)), nil return uint(len(buf)), nil
} }
func (m *MockRepo) LoadBlob(t pack.BlobType, id backend.ID, buf []byte) ([]byte, error) { func (m *MockRepo) LoadBlob(id backend.ID, t pack.BlobType, buf []byte) ([]byte, error) {
size, err := m.LookupBlobSize(id) size, err := m.LookupBlobSize(id, t)
if err != nil { if err != nil {
return nil, err return nil, err
} }

338
src/restic/index/index.go Normal file
View File

@ -0,0 +1,338 @@
// Package index contains various data structures for indexing content in a repository or backend.
package index
import (
"errors"
"fmt"
"os"
"restic"
"restic/backend"
"restic/debug"
"restic/list"
"restic/pack"
"restic/types"
"restic/worker"
)
// Pack contains information about the contents of a pack.
type Pack struct {
Size int64
Entries []pack.Blob
}
// Blob contains information about a blob.
type Blob struct {
Size int64
Packs backend.IDSet
}
// Index contains information about blobs and packs stored in a repo.
type Index struct {
Packs map[backend.ID]Pack
Blobs map[pack.Handle]Blob
IndexIDs backend.IDSet
}
func newIndex() *Index {
return &Index{
Packs: make(map[backend.ID]Pack),
Blobs: make(map[pack.Handle]Blob),
IndexIDs: backend.NewIDSet(),
}
}
// New creates a new index for repo from scratch.
func New(repo types.Repository, p *restic.Progress) (*Index, error) {
done := make(chan struct{})
defer close(done)
p.Start()
defer p.Done()
ch := make(chan worker.Job)
go list.AllPacks(repo, ch, done)
idx := newIndex()
for job := range ch {
p.Report(restic.Stat{Blobs: 1})
packID := job.Data.(backend.ID)
if job.Error != nil {
fmt.Fprintf(os.Stderr, "unable to list pack %v: %v\n", packID.Str(), job.Error)
continue
}
j := job.Result.(list.Result)
debug.Log("Index.New", "pack %v contains %d blobs", packID.Str(), len(j.Entries()))
err := idx.AddPack(packID, j.Size(), j.Entries())
if err != nil {
return nil, err
}
p := Pack{Entries: j.Entries(), Size: j.Size()}
idx.Packs[packID] = p
}
return idx, nil
}
const loadIndexParallelism = 20
type packJSON struct {
ID backend.ID `json:"id"`
Blobs []blobJSON `json:"blobs"`
}
type blobJSON struct {
ID backend.ID `json:"id"`
Type pack.BlobType `json:"type"`
Offset uint `json:"offset"`
Length uint `json:"length"`
}
type indexJSON struct {
Supersedes backend.IDs `json:"supersedes,omitempty"`
Packs []*packJSON `json:"packs"`
}
func loadIndexJSON(repo types.Repository, id backend.ID) (*indexJSON, error) {
debug.Log("index.loadIndexJSON", "process index %v\n", id.Str())
var idx indexJSON
err := repo.LoadJSONUnpacked(backend.Index, id, &idx)
if err != nil {
return nil, err
}
return &idx, nil
}
// Load creates an index by loading all index files from the repo.
func Load(repo types.Repository, p *restic.Progress) (*Index, error) {
debug.Log("index.Load", "loading indexes")
p.Start()
defer p.Done()
done := make(chan struct{})
defer close(done)
supersedes := make(map[backend.ID]backend.IDSet)
results := make(map[backend.ID]map[backend.ID]Pack)
index := newIndex()
for id := range repo.List(backend.Index, done) {
p.Report(restic.Stat{Blobs: 1})
debug.Log("index.Load", "Load index %v", id.Str())
idx, err := loadIndexJSON(repo, id)
if err != nil {
return nil, err
}
res := make(map[backend.ID]Pack)
supersedes[id] = backend.NewIDSet()
for _, sid := range idx.Supersedes {
debug.Log("index.Load", " index %v supersedes %v", id.Str(), sid)
supersedes[id].Insert(sid)
}
for _, jpack := range idx.Packs {
entries := make([]pack.Blob, 0, len(jpack.Blobs))
for _, blob := range jpack.Blobs {
entry := pack.Blob{
ID: blob.ID,
Type: blob.Type,
Offset: blob.Offset,
Length: blob.Length,
}
entries = append(entries, entry)
}
if err = index.AddPack(jpack.ID, 0, entries); err != nil {
return nil, err
}
}
results[id] = res
index.IndexIDs.Insert(id)
}
for superID, list := range supersedes {
for indexID := range list {
if _, ok := results[indexID]; !ok {
continue
}
debug.Log("index.Load", " removing index %v, superseded by %v", indexID.Str(), superID.Str())
fmt.Fprintf(os.Stderr, "index %v can be removed, superseded by index %v\n", indexID.Str(), superID.Str())
delete(results, indexID)
}
}
return index, nil
}
// AddPack adds a pack to the index. If this pack is already in the index, an
// error is returned.
func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error {
if _, ok := idx.Packs[id]; ok {
return fmt.Errorf("pack %v already present in the index", id.Str())
}
idx.Packs[id] = Pack{Size: size, Entries: entries}
for _, entry := range entries {
h := pack.Handle{ID: entry.ID, Type: entry.Type}
if _, ok := idx.Blobs[h]; !ok {
idx.Blobs[h] = Blob{
Size: int64(entry.Length),
Packs: backend.NewIDSet(),
}
}
idx.Blobs[h].Packs.Insert(id)
}
return nil
}
// RemovePack deletes a pack from the index.
func (idx *Index) RemovePack(id backend.ID) error {
if _, ok := idx.Packs[id]; !ok {
return fmt.Errorf("pack %v not found in the index", id.Str())
}
for _, blob := range idx.Packs[id].Entries {
h := pack.Handle{ID: blob.ID, Type: blob.Type}
idx.Blobs[h].Packs.Delete(id)
if len(idx.Blobs[h].Packs) == 0 {
delete(idx.Blobs, h)
}
}
delete(idx.Packs, id)
return nil
}
// DuplicateBlobs returns a list of blobs that are stored more than once in the
// repo.
func (idx *Index) DuplicateBlobs() (dups pack.BlobSet) {
dups = pack.NewBlobSet()
seen := pack.NewBlobSet()
for _, p := range idx.Packs {
for _, entry := range p.Entries {
h := pack.Handle{ID: entry.ID, Type: entry.Type}
if seen.Has(h) {
dups.Insert(h)
}
seen.Insert(h)
}
}
return dups
}
// PacksForBlobs returns the set of packs in which the blobs are contained.
func (idx *Index) PacksForBlobs(blobs pack.BlobSet) (packs backend.IDSet) {
packs = backend.NewIDSet()
for h := range blobs {
blob, ok := idx.Blobs[h]
if !ok {
continue
}
for id := range blob.Packs {
packs.Insert(id)
}
}
return packs
}
// Location describes the location of a blob in a pack.
type Location struct {
PackID backend.ID
pack.Blob
}
// ErrBlobNotFound is return by FindBlob when the blob could not be found in
// the index.
var ErrBlobNotFound = errors.New("blob not found in index")
// FindBlob returns a list of packs and positions the blob can be found in.
func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) {
blob, ok := idx.Blobs[h]
if !ok {
return nil, ErrBlobNotFound
}
result := make([]Location, 0, len(blob.Packs))
for packID := range blob.Packs {
pack, ok := idx.Packs[packID]
if !ok {
return nil, fmt.Errorf("pack %v not found in index", packID.Str())
}
for _, entry := range pack.Entries {
if entry.Type != h.Type {
continue
}
if !entry.ID.Equal(h.ID) {
continue
}
loc := Location{PackID: packID, Blob: entry}
result = append(result, loc)
}
}
return result, nil
}
// Save writes the complete index to the repo.
func (idx *Index) Save(repo types.Repository, supersedes backend.IDs) (backend.ID, error) {
packs := make(map[backend.ID][]pack.Blob, len(idx.Packs))
for id, p := range idx.Packs {
packs[id] = p.Entries
}
return Save(repo, packs, supersedes)
}
// Save writes a new index containing the given packs.
func Save(repo types.Repository, packs map[backend.ID][]pack.Blob, supersedes backend.IDs) (backend.ID, error) {
idx := &indexJSON{
Supersedes: supersedes,
Packs: make([]*packJSON, 0, len(packs)),
}
for packID, blobs := range packs {
b := make([]blobJSON, 0, len(blobs))
for _, blob := range blobs {
b = append(b, blobJSON{
ID: blob.ID,
Type: blob.Type,
Offset: blob.Offset,
Length: blob.Length,
})
}
p := &packJSON{
ID: packID,
Blobs: b,
}
idx.Packs = append(idx.Packs, p)
}
return repo.SaveJSONUnpacked(backend.Index, idx)
}

View File

@ -0,0 +1,352 @@
package index
import (
"math/rand"
"restic"
"restic/backend"
"restic/backend/local"
"restic/pack"
"restic/repository"
. "restic/test"
"testing"
"time"
)
var (
snapshotTime = time.Unix(1470492820, 207401672)
snapshots = 3
depth = 3
)
func createFilledRepo(t testing.TB, snapshots int, dup float32) (*repository.Repository, func()) {
repo, cleanup := repository.TestRepository(t)
for i := 0; i < 3; i++ {
restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth, dup)
}
return repo, cleanup
}
func validateIndex(t testing.TB, repo *repository.Repository, idx *Index) {
for id := range repo.List(backend.Data, nil) {
if _, ok := idx.Packs[id]; !ok {
t.Errorf("pack %v missing from index", id.Str())
}
}
}
func TestIndexNew(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
idx, err := New(repo, nil)
if err != nil {
t.Fatalf("New() returned error %v", err)
}
if idx == nil {
t.Fatalf("New() returned nil index")
}
validateIndex(t, repo, idx)
}
func TestIndexLoad(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
loadIdx, err := Load(repo, nil)
if err != nil {
t.Fatalf("Load() returned error %v", err)
}
if loadIdx == nil {
t.Fatalf("Load() returned nil index")
}
validateIndex(t, repo, loadIdx)
newIdx, err := New(repo, nil)
if err != nil {
t.Fatalf("New() returned error %v", err)
}
if len(loadIdx.Packs) != len(newIdx.Packs) {
t.Errorf("number of packs does not match: want %v, got %v",
len(loadIdx.Packs), len(newIdx.Packs))
}
validateIndex(t, repo, newIdx)
for packID, packNew := range newIdx.Packs {
packLoad, ok := loadIdx.Packs[packID]
if !ok {
t.Errorf("loaded index does not list pack %v", packID.Str())
continue
}
if len(packNew.Entries) != len(packLoad.Entries) {
t.Errorf(" number of entries in pack %v does not match: %d != %d\n %v\n %v",
packID.Str(), len(packNew.Entries), len(packLoad.Entries),
packNew.Entries, packLoad.Entries)
continue
}
for _, entryNew := range packNew.Entries {
found := false
for _, entryLoad := range packLoad.Entries {
if !entryLoad.ID.Equal(entryNew.ID) {
continue
}
if entryLoad.Type != entryNew.Type {
continue
}
if entryLoad.Offset != entryNew.Offset {
continue
}
if entryLoad.Length != entryNew.Length {
continue
}
found = true
break
}
if !found {
t.Errorf("blob not found in loaded index: %v", entryNew)
}
}
}
}
func openRepo(t testing.TB, dir, password string) *repository.Repository {
b, err := local.Open(dir)
if err != nil {
t.Fatalf("open backend %v failed: %v", dir, err)
}
r := repository.New(b)
err = r.SearchKey(password)
if err != nil {
t.Fatalf("unable to open repo with password: %v", err)
}
return r
}
func BenchmarkIndexNew(b *testing.B) {
repo, cleanup := createFilledRepo(b, 3, 0)
defer cleanup()
b.ResetTimer()
for i := 0; i < b.N; i++ {
idx, err := New(repo, nil)
if err != nil {
b.Fatalf("New() returned error %v", err)
}
if idx == nil {
b.Fatalf("New() returned nil index")
}
}
}
func TestIndexDuplicateBlobs(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0.01)
defer cleanup()
idx, err := New(repo, nil)
if err != nil {
t.Fatal(err)
}
dups := idx.DuplicateBlobs()
if len(dups) == 0 {
t.Errorf("no duplicate blobs found")
}
t.Logf("%d packs, %d unique blobs", len(idx.Packs), len(idx.Blobs))
packs := idx.PacksForBlobs(dups)
if len(packs) == 0 {
t.Errorf("no packs with duplicate blobs found")
}
t.Logf("%d packs with duplicate blobs", len(packs))
}
func loadIndex(t testing.TB, repo *repository.Repository) *Index {
idx, err := Load(repo, nil)
if err != nil {
t.Fatalf("Load() returned error %v", err)
}
return idx
}
func TestIndexSave(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
idx := loadIndex(t, repo)
packs := make(map[backend.ID][]pack.Blob)
for id := range idx.Packs {
if rand.Float32() < 0.5 {
packs[id] = idx.Packs[id].Entries
}
}
t.Logf("save %d/%d packs in a new index\n", len(packs), len(idx.Packs))
id, err := Save(repo, packs, idx.IndexIDs.List())
if err != nil {
t.Fatalf("unable to save new index: %v", err)
}
t.Logf("new index saved as %v", id.Str())
for id := range idx.IndexIDs {
t.Logf("remove index %v", id.Str())
err = repo.Backend().Remove(backend.Index, id.String())
if err != nil {
t.Errorf("error removing index %v: %v", id, err)
}
}
idx2 := loadIndex(t, repo)
t.Logf("load new index with %d packs", len(idx2.Packs))
if len(idx2.Packs) != len(packs) {
t.Errorf("wrong number of packs in new index, want %d, got %d", len(packs), len(idx2.Packs))
}
for id := range packs {
if _, ok := idx2.Packs[id]; !ok {
t.Errorf("pack %v is not contained in new index", id.Str())
}
}
for id := range idx2.Packs {
if _, ok := packs[id]; !ok {
t.Errorf("pack %v is not contained in new index", id.Str())
}
}
}
func TestIndexAddRemovePack(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
idx, err := Load(repo, nil)
if err != nil {
t.Fatalf("Load() returned error %v", err)
}
done := make(chan struct{})
defer close(done)
packID := <-repo.List(backend.Data, done)
t.Logf("selected pack %v", packID.Str())
blobs := idx.Packs[packID].Entries
idx.RemovePack(packID)
if _, ok := idx.Packs[packID]; ok {
t.Errorf("removed pack %v found in index.Packs", packID.Str())
}
for _, blob := range blobs {
h := pack.Handle{ID: blob.ID, Type: blob.Type}
_, err := idx.FindBlob(h)
if err == nil {
t.Errorf("removed blob %v found in index", h)
}
if _, ok := idx.Blobs[h]; ok {
t.Errorf("removed blob %v found in index.Blobs", h)
}
}
}
// example index serialization from doc/Design.md
var docExample = []byte(`
{
"supersedes": [
"ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452"
],
"packs": [
{
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
"blobs": [
{
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
"type": "data",
"offset": 0,
"length": 25
},{
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
"type": "tree",
"offset": 38,
"length": 100
},
{
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
"type": "data",
"offset": 150,
"length": 123
}
]
}
]
}
`)
func TestIndexLoadDocReference(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
id, err := repo.SaveUnpacked(backend.Index, docExample)
if err != nil {
t.Fatalf("SaveUnpacked() returned error %v", err)
}
t.Logf("index saved as %v", id.Str())
idx := loadIndex(t, repo)
blobID := ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66")
locs, err := idx.FindBlob(pack.Handle{ID: blobID, Type: pack.Data})
if err != nil {
t.Errorf("FindBlob() returned error %v", err)
}
if len(locs) != 1 {
t.Errorf("blob found %d times, expected just one", len(locs))
}
l := locs[0]
if !l.ID.Equal(blobID) {
t.Errorf("blob IDs are not equal: %v != %v", l.ID, blobID)
}
if l.Type != pack.Data {
t.Errorf("want type %v, got %v", pack.Data, l.Type)
}
if l.Offset != 150 {
t.Errorf("wrong offset, want %d, got %v", 150, l.Offset)
}
if l.Length != 123 {
t.Errorf("wrong length, want %d, got %v", 123, l.Length)
}
}

67
src/restic/list/list.go Normal file
View File

@ -0,0 +1,67 @@
package list
import (
"restic/backend"
"restic/pack"
"restic/worker"
)
const listPackWorkers = 10
// Lister combines lists packs in a repo and blobs in a pack.
type Lister interface {
List(backend.Type, <-chan struct{}) <-chan backend.ID
ListPack(backend.ID) ([]pack.Blob, int64, error)
}
// Result is returned in the channel from LoadBlobsFromAllPacks.
type Result struct {
packID backend.ID
size int64
entries []pack.Blob
}
// PackID returns the pack ID of this result.
func (l Result) PackID() backend.ID {
return l.packID
}
// Size ruturns the size of the pack.
func (l Result) Size() int64 {
return l.size
}
// Entries returns a list of all blobs saved in the pack.
func (l Result) Entries() []pack.Blob {
return l.entries
}
// AllPacks sends the contents of all packs to ch.
func AllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) {
f := func(job worker.Job, done <-chan struct{}) (interface{}, error) {
packID := job.Data.(backend.ID)
entries, size, err := repo.ListPack(packID)
return Result{
packID: packID,
size: size,
entries: entries,
}, err
}
jobCh := make(chan worker.Job)
wp := worker.New(listPackWorkers, f, jobCh, ch)
go func() {
defer close(jobCh)
for id := range repo.List(backend.Data, done) {
select {
case jobCh <- worker.Job{Data: id}:
case <-done:
return
}
}
}()
wp.Wait()
}

View File

@ -188,7 +188,7 @@ var staleTimeout = 30 * time.Minute
// older than 30 minutes or if it was created on the current machine and the // older than 30 minutes or if it was created on the current machine and the
// process isn't alive any more. // process isn't alive any more.
func (l *Lock) Stale() bool { func (l *Lock) Stale() bool {
debug.Log("Lock.Stale", "testing if lock %v for process %d is stale", l.lockID.Str(), l.PID) debug.Log("Lock.Stale", "testing if lock %v for process %d is stale", l, l.PID)
if time.Now().Sub(l.Time) > staleTimeout { if time.Now().Sub(l.Time) > staleTimeout {
debug.Log("Lock.Stale", "lock is stale, timestamp is too old: %v\n", l.Time) debug.Log("Lock.Stale", "lock is stale, timestamp is too old: %v\n", l.Time)
return true return true

View File

@ -12,12 +12,13 @@ import (
"runtime" "runtime"
"github.com/juju/errors"
"restic/backend" "restic/backend"
"restic/debug" "restic/debug"
"restic/fs" "restic/fs"
"restic/pack" "restic/pack"
"restic/repository" "restic/repository"
"github.com/juju/errors"
) )
// Node is a file, directory or other item in a backup. // Node is a file, directory or other item in a backup.
@ -215,17 +216,17 @@ func (node Node) createFileAt(path string, repo *repository.Repository) error {
var buf []byte var buf []byte
for _, id := range node.Content { for _, id := range node.Content {
blob, err := repo.Index().Lookup(id) size, err := repo.LookupBlobSize(id, pack.Data)
if err != nil { if err != nil {
return err return err
} }
buf = buf[:cap(buf)] buf = buf[:cap(buf)]
if uint(len(buf)) < blob.Length { if uint(len(buf)) < size {
buf = make([]byte, blob.Length) buf = make([]byte, size)
} }
buf, err := repo.LoadBlob(pack.Data, id, buf) buf, err := repo.LoadBlob(id, pack.Data, buf)
if err != nil { if err != nil {
return errors.Annotate(err, "Load") return errors.Annotate(err, "Load")
} }

109
src/restic/pack/blob_set.go Normal file
View File

@ -0,0 +1,109 @@
package pack
import "sort"
// BlobSet is a set of blobs.
type BlobSet map[Handle]struct{}
// NewBlobSet returns a new BlobSet, populated with ids.
func NewBlobSet(handles ...Handle) BlobSet {
m := make(BlobSet)
for _, h := range handles {
m[h] = struct{}{}
}
return m
}
// Has returns true iff id is contained in the set.
func (s BlobSet) Has(h Handle) bool {
_, ok := s[h]
return ok
}
// Insert adds id to the set.
func (s BlobSet) Insert(h Handle) {
s[h] = struct{}{}
}
// Delete removes id from the set.
func (s BlobSet) Delete(h Handle) {
delete(s, h)
}
// Equals returns true iff s equals other.
func (s BlobSet) Equals(other BlobSet) bool {
if len(s) != len(other) {
return false
}
for h := range s {
if _, ok := other[h]; !ok {
return false
}
}
return true
}
// Merge adds the blobs in other to the current set.
func (s BlobSet) Merge(other BlobSet) {
for h := range other {
s.Insert(h)
}
}
// Intersect returns a new set containing the handles that are present in both sets.
func (s BlobSet) Intersect(other BlobSet) (result BlobSet) {
result = NewBlobSet()
set1 := s
set2 := other
// iterate over the smaller set
if len(set2) < len(set1) {
set1, set2 = set2, set1
}
for h := range set1 {
if set2.Has(h) {
result.Insert(h)
}
}
return result
}
// Sub returns a new set containing all handles that are present in s but not in
// other.
func (s BlobSet) Sub(other BlobSet) (result BlobSet) {
result = NewBlobSet()
for h := range s {
if !other.Has(h) {
result.Insert(h)
}
}
return result
}
// List returns a slice of all Handles in the set.
func (s BlobSet) List() Handles {
list := make(Handles, 0, len(s))
for h := range s {
list = append(list, h)
}
sort.Sort(list)
return list
}
func (s BlobSet) String() string {
str := s.List().String()
if len(str) < 2 {
return "{}"
}
return "{" + str[1:len(str)-1] + "}"
}

51
src/restic/pack/handle.go Normal file
View File

@ -0,0 +1,51 @@
package pack
import (
"fmt"
"restic/backend"
)
// Handle identifies a blob of a given type.
type Handle struct {
ID backend.ID
Type BlobType
}
func (h Handle) String() string {
return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str())
}
// Handles is an ordered list of Handles that implements sort.Interface.
type Handles []Handle
func (h Handles) Len() int {
return len(h)
}
func (h Handles) Less(i, j int) bool {
for k, b := range h[i].ID {
if b == h[j].ID[k] {
continue
}
if b < h[j].ID[k] {
return true
}
return false
}
return h[i].Type < h[j].Type
}
func (h Handles) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h Handles) String() string {
elements := make([]string, 0, len(h))
for _, e := range h {
elements = append(elements, e.String())
}
return fmt.Sprintf("%v", elements)
}

43
src/restic/pack/loader.go Normal file
View File

@ -0,0 +1,43 @@
package pack
import (
"errors"
"restic/backend"
)
// Loader loads data from somewhere at a given offset. In contrast to
// io.ReaderAt, off may be negative, in which case it references a position
// relative to the end of the file (similar to Seek()).
type Loader interface {
Load(p []byte, off int64) (int, error)
}
// BackendLoader creates a Loader from a Backend and a Handle.
type BackendLoader struct {
Backend backend.Backend
Handle backend.Handle
}
// Load returns data at the given offset.
func (l BackendLoader) Load(p []byte, off int64) (int, error) {
return l.Backend.Load(l.Handle, p, off)
}
// BufferLoader allows using a buffer as a Loader.
type BufferLoader []byte
// Load returns data at the given offset.
func (b BufferLoader) Load(p []byte, off int64) (int, error) {
switch {
case off > int64(len(b)):
return 0, errors.New("offset is larger than data")
case off < -int64(len(b)):
off = 0
case off < 0:
off = int64(len(b)) + off
}
b = b[off:]
return copy(p, b), nil
}

View File

@ -17,8 +17,9 @@ type BlobType uint8
// These are the blob types that can be stored in a pack. // These are the blob types that can be stored in a pack.
const ( const (
Data BlobType = 0 Invalid BlobType = iota
Tree = 1 Data
Tree
) )
func (t BlobType) String() string { func (t BlobType) String() string {
@ -66,15 +67,9 @@ type Blob struct {
Offset uint Offset uint
} }
// GetReader returns an io.Reader for the blob entry e. func (b Blob) String() string {
func (e Blob) GetReader(rd io.ReadSeeker) (io.Reader, error) { return fmt.Sprintf("<Blob %v/%v len %v, off %v>",
// seek to the correct location b.ID.Str(), b.Type, b.Length, b.Offset)
_, err := rd.Seek(int64(e.Offset), 0)
if err != nil {
return nil, err
}
return io.LimitReader(rd, int64(e.Length)), nil
} }
// Packer is used to create a new Pack. // Packer is used to create a new Pack.
@ -118,7 +113,7 @@ var entrySize = uint(binary.Size(BlobType(0)) + binary.Size(uint32(0)) + backend
// headerEntry is used with encoding/binary to read and write header entries // headerEntry is used with encoding/binary to read and write header entries
type headerEntry struct { type headerEntry struct {
Type BlobType Type uint8
Length uint32 Length uint32
ID [backend.IDSize]byte ID [backend.IDSize]byte
} }
@ -176,11 +171,19 @@ func (p *Packer) Finalize() (uint, error) {
func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) { func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) {
for _, b := range p.blobs { for _, b := range p.blobs {
entry := headerEntry{ entry := headerEntry{
Type: b.Type,
Length: uint32(b.Length), Length: uint32(b.Length),
ID: b.ID, ID: b.ID,
} }
switch b.Type {
case Data:
entry.Type = 0
case Tree:
entry.Type = 1
default:
return 0, fmt.Errorf("invalid blob type %v", b.Type)
}
err := binary.Write(wr, binary.LittleEndian, entry) err := binary.Write(wr, binary.LittleEndian, entry)
if err != nil { if err != nil {
return bytesWritten, err return bytesWritten, err
@ -232,42 +235,61 @@ type Unpacker struct {
k *crypto.Key k *crypto.Key
} }
const preloadHeaderSize = 2048
// NewUnpacker returns a pointer to Unpacker which can be used to read // NewUnpacker returns a pointer to Unpacker which can be used to read
// individual Blobs from a pack. // individual Blobs from a pack.
func NewUnpacker(k *crypto.Key, rd io.ReadSeeker) (*Unpacker, error) { func NewUnpacker(k *crypto.Key, ldr Loader) (*Unpacker, error) {
var err error var err error
ls := binary.Size(uint32(0))
// reset to the end to read header length // read the last 2048 byte, this will mostly be enough for the header, so
_, err = rd.Seek(-int64(ls), 2) // we do not need another round trip.
if err != nil { buf := make([]byte, preloadHeaderSize)
return nil, fmt.Errorf("seeking to read header length failed: %v", err) n, err := ldr.Load(buf, -int64(len(buf)))
if err == io.ErrUnexpectedEOF {
err = nil
buf = buf[:n]
} }
var length uint32
err = binary.Read(rd, binary.LittleEndian, &length)
if err != nil { if err != nil {
return nil, fmt.Errorf("reading header length failed: %v", err) return nil, fmt.Errorf("Load at -%d failed: %v", len(buf), err)
}
buf = buf[:n]
bs := binary.Size(uint32(0))
p := len(buf) - bs
// read the length from the end of the buffer
length := int(binary.LittleEndian.Uint32(buf[p : p+bs]))
buf = buf[:p]
// if the header is longer than the preloaded buffer, call the loader again.
if length > len(buf) {
buf = make([]byte, length)
n, err := ldr.Load(buf, -int64(len(buf)+bs))
if err != nil {
return nil, fmt.Errorf("Load at -%d failed: %v", len(buf), err)
}
buf = buf[:n]
} }
// reset to the beginning of the header buf = buf[len(buf)-length:]
_, err = rd.Seek(-int64(ls)-int64(length), 2)
if err != nil {
return nil, fmt.Errorf("seeking to read header length failed: %v", err)
}
// read header // read header
hrd, err := crypto.DecryptFrom(k, io.LimitReader(rd, int64(length))) hdr, err := crypto.Decrypt(k, buf, buf)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rd := bytes.NewReader(hdr)
var entries []Blob var entries []Blob
pos := uint(0) pos := uint(0)
for { for {
e := headerEntry{} e := headerEntry{}
err = binary.Read(hrd, binary.LittleEndian, &e) err = binary.Read(rd, binary.LittleEndian, &e)
if err == io.EOF { if err == io.EOF {
break break
} }
@ -276,21 +298,31 @@ func NewUnpacker(k *crypto.Key, rd io.ReadSeeker) (*Unpacker, error) {
return nil, err return nil, err
} }
entries = append(entries, Blob{ entry := Blob{
Type: e.Type,
Length: uint(e.Length), Length: uint(e.Length),
ID: e.ID, ID: e.ID,
Offset: pos, Offset: pos,
}) }
switch e.Type {
case 0:
entry.Type = Data
case 1:
entry.Type = Tree
default:
return nil, fmt.Errorf("invalid type %d", e.Type)
}
entries = append(entries, entry)
pos += uint(e.Length) pos += uint(e.Length)
} }
p := &Unpacker{ up := &Unpacker{
rd: rd, rd: rd,
k: k, k: k,
Entries: entries, Entries: entries,
} }
return p, nil return up, nil
} }

View File

@ -7,7 +7,6 @@ import (
"encoding/binary" "encoding/binary"
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil"
"testing" "testing"
"restic/backend" "restic/backend"
@ -17,14 +16,14 @@ import (
. "restic/test" . "restic/test"
) )
var lengths = []int{23, 31650, 25860, 10928, 13769, 19862, 5211, 127, 13690, 30231} var testLens = []int{23, 31650, 25860, 10928, 13769, 19862, 5211, 127, 13690, 30231}
type Buf struct { type Buf struct {
data []byte data []byte
id backend.ID id backend.ID
} }
func newPack(t testing.TB, k *crypto.Key) ([]Buf, []byte, uint) { func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) {
bufs := []Buf{} bufs := []Buf{}
for _, l := range lengths { for _, l := range lengths {
@ -48,15 +47,15 @@ func newPack(t testing.TB, k *crypto.Key) ([]Buf, []byte, uint) {
return bufs, packData, p.Size() return bufs, packData, p.Size()
} }
func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReadSeeker, packSize uint) { func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, ldr pack.Loader, packSize uint) {
written := 0 written := 0
for _, l := range lengths { for _, buf := range bufs {
written += l written += len(buf.data)
} }
// header length // header length
written += binary.Size(uint32(0)) written += binary.Size(uint32(0))
// header // header
written += len(lengths) * (binary.Size(pack.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize) written += len(bufs) * (binary.Size(pack.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize)
// header crypto // header crypto
written += crypto.Extension written += crypto.Extension
@ -64,20 +63,24 @@ func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReadSeeker, pack
Equals(t, uint(written), packSize) Equals(t, uint(written), packSize)
// read and parse it again // read and parse it again
np, err := pack.NewUnpacker(k, rd) np, err := pack.NewUnpacker(k, ldr)
OK(t, err) OK(t, err)
Equals(t, len(np.Entries), len(bufs)) Equals(t, len(np.Entries), len(bufs))
var buf []byte
for i, b := range bufs { for i, b := range bufs {
e := np.Entries[i] e := np.Entries[i]
Equals(t, b.id, e.ID) Equals(t, b.id, e.ID)
brd, err := e.GetReader(rd) if len(buf) < int(e.Length) {
OK(t, err) buf = make([]byte, int(e.Length))
data, err := ioutil.ReadAll(brd) }
buf = buf[:int(e.Length)]
n, err := ldr.Load(buf, int64(e.Offset))
OK(t, err) OK(t, err)
buf = buf[:n]
Assert(t, bytes.Equal(b.data, data), Assert(t, bytes.Equal(b.data, buf),
"data for blob %v doesn't match", i) "data for blob %v doesn't match", i)
} }
} }
@ -86,9 +89,9 @@ func TestCreatePack(t *testing.T) {
// create random keys // create random keys
k := crypto.NewRandomKey() k := crypto.NewRandomKey()
bufs, packData, packSize := newPack(t, k) bufs, packData, packSize := newPack(t, k, testLens)
Equals(t, uint(len(packData)), packSize) Equals(t, uint(len(packData)), packSize)
verifyBlobs(t, bufs, k, bytes.NewReader(packData), packSize) verifyBlobs(t, bufs, k, pack.BufferLoader(packData), packSize)
} }
var blobTypeJSON = []struct { var blobTypeJSON = []struct {
@ -118,13 +121,27 @@ func TestUnpackReadSeeker(t *testing.T) {
// create random keys // create random keys
k := crypto.NewRandomKey() k := crypto.NewRandomKey()
bufs, packData, packSize := newPack(t, k) bufs, packData, packSize := newPack(t, k, testLens)
b := mem.New() b := mem.New()
id := backend.Hash(packData) id := backend.Hash(packData)
handle := backend.Handle{Type: backend.Data, Name: id.String()} handle := backend.Handle{Type: backend.Data, Name: id.String()}
OK(t, b.Save(handle, packData)) OK(t, b.Save(handle, packData))
rd := backend.NewReadSeeker(b, handle) ldr := pack.BackendLoader{Backend: b, Handle: handle}
verifyBlobs(t, bufs, k, rd, packSize) verifyBlobs(t, bufs, k, ldr, packSize)
}
func TestShortPack(t *testing.T) {
k := crypto.NewRandomKey()
bufs, packData, packSize := newPack(t, k, []int{23})
b := mem.New()
id := backend.Hash(packData)
handle := backend.Handle{Type: backend.Data, Name: id.String()}
OK(t, b.Save(handle, packData))
ldr := pack.BackendLoader{Backend: b, Handle: handle}
verifyBlobs(t, bufs, k, ldr, packSize)
} }

View File

@ -152,6 +152,7 @@ func (p *Progress) Done() {
if p.OnDone != nil { if p.OnDone != nil {
p.fnM.Lock() p.fnM.Lock()
p.OnUpdate(cur, time.Since(p.start), false)
p.OnDone(cur, time.Since(p.start), false) p.OnDone(cur, time.Since(p.start), false)
p.fnM.Unlock() p.fnM.Unlock()
} }

View File

@ -6,10 +6,12 @@ import (
"encoding/hex" "encoding/hex"
"errors" "errors"
"io" "io"
"testing"
"github.com/restic/chunker"
"restic/backend" "restic/backend"
"restic/debug" "restic/debug"
"github.com/restic/chunker"
) )
// Config contains the configuration for a repository. // Config contains the configuration for a repository.
@ -37,8 +39,8 @@ type JSONUnpackedLoader interface {
} }
// CreateConfig creates a config file with a randomly selected polynomial and // CreateConfig creates a config file with a randomly selected polynomial and
// ID and saves the config in the repository. // ID.
func CreateConfig(r JSONUnpackedSaver) (Config, error) { func CreateConfig() (Config, error) {
var ( var (
err error err error
cfg Config cfg Config
@ -59,9 +61,23 @@ func CreateConfig(r JSONUnpackedSaver) (Config, error) {
cfg.Version = RepoVersion cfg.Version = RepoVersion
debug.Log("Repo.CreateConfig", "New config: %#v", cfg) debug.Log("Repo.CreateConfig", "New config: %#v", cfg)
return cfg, nil
}
_, err = r.SaveJSONUnpacked(backend.Config, cfg) // TestCreateConfig creates a config for use within tests.
return cfg, err func TestCreateConfig(t testing.TB, pol chunker.Pol) (cfg Config) {
cfg.ChunkerPolynomial = pol
newID := make([]byte, repositoryIDSize)
_, err := io.ReadFull(rand.Reader, newID)
if err != nil {
t.Fatalf("unable to create random ID: %v", err)
}
cfg.ID = hex.EncodeToString(newID)
cfg.Version = RepoVersion
return cfg
} }
// LoadConfig returns loads, checks and returns the config for a repository. // LoadConfig returns loads, checks and returns the config for a repository.

View File

@ -32,9 +32,11 @@ func TestConfig(t *testing.T) {
return backend.ID{}, nil return backend.ID{}, nil
} }
cfg1, err := repository.CreateConfig(saver(save)) cfg1, err := repository.CreateConfig()
OK(t, err) OK(t, err)
_, err = saver(save).SaveJSONUnpacked(backend.Config, cfg1)
load := func(tpe backend.Type, id backend.ID, arg interface{}) error { load := func(tpe backend.Type, id backend.ID, arg interface{}) error {
Assert(t, tpe == backend.Config, Assert(t, tpe == backend.Config,
"wrong backend type: got %v, wanted %v", "wrong backend type: got %v, wanted %v",

View File

@ -18,7 +18,7 @@ import (
// Index holds a lookup table for id -> pack. // Index holds a lookup table for id -> pack.
type Index struct { type Index struct {
m sync.Mutex m sync.Mutex
pack map[backend.ID]indexEntry pack map[pack.Handle][]indexEntry
final bool // set to true for all indexes read from the backend ("finalized") final bool // set to true for all indexes read from the backend ("finalized")
id backend.ID // set to the ID of the index when it's finalized id backend.ID // set to the ID of the index when it's finalized
@ -27,7 +27,6 @@ type Index struct {
} }
type indexEntry struct { type indexEntry struct {
tpe pack.BlobType
packID backend.ID packID backend.ID
offset uint offset uint
length uint length uint
@ -36,18 +35,19 @@ type indexEntry struct {
// NewIndex returns a new index. // NewIndex returns a new index.
func NewIndex() *Index { func NewIndex() *Index {
return &Index{ return &Index{
pack: make(map[backend.ID]indexEntry), pack: make(map[pack.Handle][]indexEntry),
created: time.Now(), created: time.Now(),
} }
} }
func (idx *Index) store(blob PackedBlob) { func (idx *Index) store(blob PackedBlob) {
idx.pack[blob.ID] = indexEntry{ newEntry := indexEntry{
tpe: blob.Type,
packID: blob.PackID, packID: blob.PackID,
offset: blob.Offset, offset: blob.Offset,
length: blob.Length, length: blob.Length,
} }
h := pack.Handle{ID: blob.ID, Type: blob.Type}
idx.pack[h] = append(idx.pack[h], newEntry)
} }
// Final returns true iff the index is already written to the repository, it is // Final returns true iff the index is already written to the repository, it is
@ -110,43 +110,36 @@ func (idx *Index) Store(blob PackedBlob) {
idx.store(blob) idx.store(blob)
} }
// StoreBlobs saves information about the blobs to the index in one atomic transaction.
func (idx *Index) StoreBlobs(blobs []PackedBlob) {
idx.m.Lock()
defer idx.m.Unlock()
if idx.final {
panic("store new item in finalized index")
}
debug.Log("Index.StoreBlobs", "stored %d blobs", len(blobs))
for _, blob := range blobs {
idx.store(blob)
}
}
// Lookup queries the index for the blob ID and returns a PackedBlob. // Lookup queries the index for the blob ID and returns a PackedBlob.
func (idx *Index) Lookup(id backend.ID) (pb PackedBlob, err error) { func (idx *Index) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) {
idx.m.Lock() idx.m.Lock()
defer idx.m.Unlock() defer idx.m.Unlock()
if p, ok := idx.pack[id]; ok { h := pack.Handle{ID: id, Type: tpe}
debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d",
id.Str(), p.packID.Str(), p.offset, p.length)
pb := PackedBlob{ if packs, ok := idx.pack[h]; ok {
Type: p.tpe, blobs = make([]PackedBlob, 0, len(packs))
Length: p.length,
ID: id, for _, p := range packs {
Offset: p.offset, debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d",
PackID: p.packID, id.Str(), p.packID.Str(), p.offset, p.length)
blob := PackedBlob{
Type: tpe,
Length: p.length,
ID: id,
Offset: p.offset,
PackID: p.packID,
}
blobs = append(blobs, blob)
} }
return pb, nil
return blobs, nil
} }
debug.Log("Index.Lookup", "id %v not found", id.Str()) debug.Log("Index.Lookup", "id %v not found", id.Str())
return PackedBlob{}, fmt.Errorf("id %v not found in index", id) return nil, fmt.Errorf("id %v not found in index", id)
} }
// ListPack returns a list of blobs contained in a pack. // ListPack returns a list of blobs contained in a pack.
@ -154,15 +147,17 @@ func (idx *Index) ListPack(id backend.ID) (list []PackedBlob) {
idx.m.Lock() idx.m.Lock()
defer idx.m.Unlock() defer idx.m.Unlock()
for blobID, entry := range idx.pack { for h, packList := range idx.pack {
if entry.packID == id { for _, entry := range packList {
list = append(list, PackedBlob{ if entry.packID == id {
ID: blobID, list = append(list, PackedBlob{
Type: entry.tpe, ID: h.ID,
Length: entry.length, Type: h.Type,
Offset: entry.offset, Length: entry.length,
PackID: entry.packID, Offset: entry.offset,
}) PackID: entry.packID,
})
}
} }
} }
@ -170,8 +165,8 @@ func (idx *Index) ListPack(id backend.ID) (list []PackedBlob) {
} }
// Has returns true iff the id is listed in the index. // Has returns true iff the id is listed in the index.
func (idx *Index) Has(id backend.ID) bool { func (idx *Index) Has(id backend.ID, tpe pack.BlobType) bool {
_, err := idx.Lookup(id) _, err := idx.Lookup(id, tpe)
if err == nil { if err == nil {
return true return true
} }
@ -181,28 +176,13 @@ func (idx *Index) Has(id backend.ID) bool {
// LookupSize returns the length of the cleartext content behind the // LookupSize returns the length of the cleartext content behind the
// given id // given id
func (idx *Index) LookupSize(id backend.ID) (cleartextLength uint, err error) { func (idx *Index) LookupSize(id backend.ID, tpe pack.BlobType) (cleartextLength uint, err error) {
blob, err := idx.Lookup(id) blobs, err := idx.Lookup(id, tpe)
if err != nil { if err != nil {
return 0, err return 0, err
} }
return blob.PlaintextLength(), nil
}
// Merge loads all items from other into idx. return blobs[0].PlaintextLength(), nil
func (idx *Index) Merge(other *Index) {
debug.Log("Index.Merge", "Merge index with %p", other)
idx.m.Lock()
defer idx.m.Unlock()
for k, v := range other.pack {
if _, ok := idx.pack[k]; ok {
debug.Log("Index.Merge", "index already has key %v, updating", k.Str())
}
idx.pack[k] = v
}
debug.Log("Index.Merge", "done merging index")
} }
// Supersedes returns the list of indexes this index supersedes, if any. // Supersedes returns the list of indexes this index supersedes, if any.
@ -257,17 +237,19 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob {
close(ch) close(ch)
}() }()
for id, blob := range idx.pack { for h, packs := range idx.pack {
select { for _, blob := range packs {
case <-done: select {
return case <-done:
case ch <- PackedBlob{ return
ID: id, case ch <- PackedBlob{
Offset: blob.offset, ID: h.ID,
Type: blob.tpe, Type: h.Type,
Length: blob.length, Offset: blob.offset,
PackID: blob.packID, Length: blob.length,
}: PackID: blob.packID,
}:
}
} }
} }
}() }()
@ -281,8 +263,10 @@ func (idx *Index) Packs() backend.IDSet {
defer idx.m.Unlock() defer idx.m.Unlock()
packs := backend.NewIDSet() packs := backend.NewIDSet()
for _, entry := range idx.pack { for _, list := range idx.pack {
packs.Insert(entry.packID) for _, entry := range list {
packs.Insert(entry.packID)
}
} }
return packs return packs
@ -294,11 +278,12 @@ func (idx *Index) Count(t pack.BlobType) (n uint) {
idx.m.Lock() idx.m.Lock()
defer idx.m.Unlock() defer idx.m.Unlock()
for id, blob := range idx.pack { for h, list := range idx.pack {
if blob.tpe == t { if h.Type != t {
n++ continue
debug.Log("Index.Count", " blob %v counted: %v", id.Str(), blob)
} }
n += uint(len(list))
} }
return return
@ -330,37 +315,39 @@ func (idx *Index) generatePackList() ([]*packJSON, error) {
list := []*packJSON{} list := []*packJSON{}
packs := make(map[backend.ID]*packJSON) packs := make(map[backend.ID]*packJSON)
for id, blob := range idx.pack { for h, packedBlobs := range idx.pack {
if blob.packID.IsNull() { for _, blob := range packedBlobs {
panic("null pack id") if blob.packID.IsNull() {
panic("null pack id")
}
debug.Log("Index.generatePackList", "handle blob %v", h)
if blob.packID.IsNull() {
debug.Log("Index.generatePackList", "blob %v has no packID! (offset %v, length %v)",
h, blob.offset, blob.length)
return nil, fmt.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", h)
}
// see if pack is already in map
p, ok := packs[blob.packID]
if !ok {
// else create new pack
p = &packJSON{ID: blob.packID}
// and append it to the list and map
list = append(list, p)
packs[p.ID] = p
}
// add blob
p.Blobs = append(p.Blobs, blobJSON{
ID: h.ID,
Type: h.Type,
Offset: blob.offset,
Length: blob.length,
})
} }
debug.Log("Index.generatePackList", "handle blob %v", id.Str())
if blob.packID.IsNull() {
debug.Log("Index.generatePackList", "blob %q has no packID! (type %v, offset %v, length %v)",
id.Str(), blob.tpe, blob.offset, blob.length)
return nil, fmt.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", id)
}
// see if pack is already in map
p, ok := packs[blob.packID]
if !ok {
// else create new pack
p = &packJSON{ID: blob.packID}
// and append it to the list and map
list = append(list, p)
packs[p.ID] = p
}
// add blob
p.Blobs = append(p.Blobs, blobJSON{
ID: id,
Type: blob.tpe,
Offset: blob.offset,
Length: blob.length,
})
} }
debug.Log("Index.generatePackList", "done") debug.Log("Index.generatePackList", "done")

View File

@ -0,0 +1,68 @@
package repository
import (
"fmt"
"os"
"restic/backend"
"restic/debug"
"restic/list"
"restic/worker"
)
// RebuildIndex lists all packs in the repo, writes a new index and removes all
// old indexes. This operation should only be done with an exclusive lock in
// place.
func RebuildIndex(repo *Repository) error {
debug.Log("RebuildIndex", "start rebuilding index")
done := make(chan struct{})
defer close(done)
ch := make(chan worker.Job)
go list.AllPacks(repo, ch, done)
idx := NewIndex()
for job := range ch {
id := job.Data.(backend.ID)
if job.Error != nil {
fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id, job.Error)
continue
}
res := job.Result.(list.Result)
for _, entry := range res.Entries() {
pb := PackedBlob{
ID: entry.ID,
Type: entry.Type,
Length: entry.Length,
Offset: entry.Offset,
PackID: res.PackID(),
}
idx.Store(pb)
}
}
oldIndexes := backend.NewIDSet()
for id := range repo.List(backend.Index, done) {
idx.AddToSupersedes(id)
oldIndexes.Insert(id)
}
id, err := SaveIndex(repo, idx)
if err != nil {
debug.Log("RebuildIndex.RebuildIndex", "error saving index: %v", err)
return err
}
debug.Log("RebuildIndex.RebuildIndex", "new index saved as %v", id.Str())
for indexID := range oldIndexes {
err := repo.Backend().Remove(backend.Index, indexID.String())
if err != nil {
fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", indexID.Str(), err)
}
}
return nil
}

View File

@ -2,8 +2,6 @@ package repository_test
import ( import (
"bytes" "bytes"
"crypto/rand"
"io"
"testing" "testing"
"restic/backend" "restic/backend"
@ -12,15 +10,6 @@ import (
. "restic/test" . "restic/test"
) )
func randomID() backend.ID {
id := backend.ID{}
_, err := io.ReadFull(rand.Reader, id[:])
if err != nil {
panic(err)
}
return id
}
func TestIndexSerialize(t *testing.T) { func TestIndexSerialize(t *testing.T) {
type testEntry struct { type testEntry struct {
id backend.ID id backend.ID
@ -34,11 +23,11 @@ func TestIndexSerialize(t *testing.T) {
// create 50 packs with 20 blobs each // create 50 packs with 20 blobs each
for i := 0; i < 50; i++ { for i := 0; i < 50; i++ {
packID := randomID() packID := backend.RandomID()
pos := uint(0) pos := uint(0)
for j := 0; j < 20; j++ { for j := 0; j < 20; j++ {
id := randomID() id := backend.RandomID()
length := uint(i*100 + j) length := uint(i*100 + j)
idx.Store(repository.PackedBlob{ idx.Store(repository.PackedBlob{
Type: pack.Data, Type: pack.Data,
@ -74,17 +63,27 @@ func TestIndexSerialize(t *testing.T) {
OK(t, err) OK(t, err)
for _, testBlob := range tests { for _, testBlob := range tests {
result, err := idx.Lookup(testBlob.id) list, err := idx.Lookup(testBlob.id, testBlob.tpe)
OK(t, err) OK(t, err)
if len(list) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list), list)
}
result := list[0]
Equals(t, testBlob.pack, result.PackID) Equals(t, testBlob.pack, result.PackID)
Equals(t, testBlob.tpe, result.Type) Equals(t, testBlob.tpe, result.Type)
Equals(t, testBlob.offset, result.Offset) Equals(t, testBlob.offset, result.Offset)
Equals(t, testBlob.length, result.Length) Equals(t, testBlob.length, result.Length)
result2, err := idx2.Lookup(testBlob.id) list2, err := idx2.Lookup(testBlob.id, testBlob.tpe)
OK(t, err) OK(t, err)
if len(list2) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list2), list2)
}
result2 := list2[0]
Equals(t, testBlob.pack, result2.PackID) Equals(t, testBlob.pack, result2.PackID)
Equals(t, testBlob.tpe, result2.Type) Equals(t, testBlob.tpe, result2.Type)
Equals(t, testBlob.offset, result2.Offset) Equals(t, testBlob.offset, result2.Offset)
@ -94,11 +93,11 @@ func TestIndexSerialize(t *testing.T) {
// add more blobs to idx // add more blobs to idx
newtests := []testEntry{} newtests := []testEntry{}
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
packID := randomID() packID := backend.RandomID()
pos := uint(0) pos := uint(0)
for j := 0; j < 10; j++ { for j := 0; j < 10; j++ {
id := randomID() id := backend.RandomID()
length := uint(i*100 + j) length := uint(i*100 + j)
idx.Store(repository.PackedBlob{ idx.Store(repository.PackedBlob{
Type: pack.Data, Type: pack.Data,
@ -128,7 +127,7 @@ func TestIndexSerialize(t *testing.T) {
Assert(t, idx.Final(), Assert(t, idx.Final(),
"index not final after encoding") "index not final after encoding")
id := randomID() id := backend.RandomID()
OK(t, idx.SetID(id)) OK(t, idx.SetID(id))
id2, err := idx.ID() id2, err := idx.ID()
Assert(t, id2.Equal(id), Assert(t, id2.Equal(id),
@ -143,9 +142,15 @@ func TestIndexSerialize(t *testing.T) {
// all new blobs must be in the index // all new blobs must be in the index
for _, testBlob := range newtests { for _, testBlob := range newtests {
blob, err := idx3.Lookup(testBlob.id) list, err := idx3.Lookup(testBlob.id, testBlob.tpe)
OK(t, err) OK(t, err)
if len(list) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list), list)
}
blob := list[0]
Equals(t, testBlob.pack, blob.PackID) Equals(t, testBlob.pack, blob.PackID)
Equals(t, testBlob.tpe, blob.Type) Equals(t, testBlob.tpe, blob.Type)
Equals(t, testBlob.offset, blob.Offset) Equals(t, testBlob.offset, blob.Offset)
@ -159,11 +164,11 @@ func TestIndexSize(t *testing.T) {
packs := 200 packs := 200
blobs := 100 blobs := 100
for i := 0; i < packs; i++ { for i := 0; i < packs; i++ {
packID := randomID() packID := backend.RandomID()
pos := uint(0) pos := uint(0)
for j := 0; j < blobs; j++ { for j := 0; j < blobs; j++ {
id := randomID() id := backend.RandomID()
length := uint(i*100 + j) length := uint(i*100 + j)
idx.Store(repository.PackedBlob{ idx.Store(repository.PackedBlob{
Type: pack.Data, Type: pack.Data,
@ -265,13 +270,13 @@ var exampleTests = []struct {
var exampleLookupTest = struct { var exampleLookupTest = struct {
packID backend.ID packID backend.ID
blobs backend.IDSet blobs map[backend.ID]pack.BlobType
}{ }{
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
backend.IDSet{ map[backend.ID]pack.BlobType{
ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): struct{}{}, ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): pack.Data,
ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): struct{}{}, ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): pack.Tree,
ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): struct{}{}, ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): pack.Data,
}, },
} }
@ -282,9 +287,16 @@ func TestIndexUnserialize(t *testing.T) {
OK(t, err) OK(t, err)
for _, test := range exampleTests { for _, test := range exampleTests {
blob, err := idx.Lookup(test.id) list, err := idx.Lookup(test.id, test.tpe)
OK(t, err) OK(t, err)
if len(list) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list)
}
blob := list[0]
t.Logf("looking for blob %v/%v, got %v", test.tpe, test.id.Str(), blob)
Equals(t, test.packID, blob.PackID) Equals(t, test.packID, blob.PackID)
Equals(t, test.tpe, blob.Type) Equals(t, test.tpe, blob.Type)
Equals(t, test.offset, blob.Offset) Equals(t, test.offset, blob.Offset)
@ -299,9 +311,13 @@ func TestIndexUnserialize(t *testing.T) {
} }
for _, blob := range blobs { for _, blob := range blobs {
if !exampleLookupTest.blobs.Has(blob.ID) { b, ok := exampleLookupTest.blobs[blob.ID]
if !ok {
t.Errorf("unexpected blob %v found", blob.ID.Str()) t.Errorf("unexpected blob %v found", blob.ID.Str())
} }
if blob.Type != b {
t.Errorf("unexpected type for blob %v: want %v, got %v", blob.ID.Str(), b, blob.Type)
}
} }
} }
@ -310,9 +326,14 @@ func TestIndexUnserializeOld(t *testing.T) {
OK(t, err) OK(t, err)
for _, test := range exampleTests { for _, test := range exampleTests {
blob, err := idx.Lookup(test.id) list, err := idx.Lookup(test.id, test.tpe)
OK(t, err) OK(t, err)
if len(list) != 1 {
t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list)
}
blob := list[0]
Equals(t, test.packID, blob.PackID) Equals(t, test.packID, blob.PackID)
Equals(t, test.tpe, blob.Type) Equals(t, test.tpe, blob.Type)
Equals(t, test.offset, blob.Offset) Equals(t, test.offset, blob.Offset)
@ -327,10 +348,10 @@ func TestIndexPacks(t *testing.T) {
packs := backend.NewIDSet() packs := backend.NewIDSet()
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
packID := randomID() packID := backend.RandomID()
idx.Store(repository.PackedBlob{ idx.Store(repository.PackedBlob{
Type: pack.Data, Type: pack.Data,
ID: randomID(), ID: backend.RandomID(),
PackID: packID, PackID: packID,
Offset: 0, Offset: 0,
Length: 23, Length: 23,

View File

@ -21,32 +21,32 @@ func NewMasterIndex() *MasterIndex {
} }
// Lookup queries all known Indexes for the ID and returns the first match. // Lookup queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) Lookup(id backend.ID) (blob PackedBlob, err error) { func (mi *MasterIndex) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) {
mi.idxMutex.RLock() mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock() defer mi.idxMutex.RUnlock()
debug.Log("MasterIndex.Lookup", "looking up id %v", id.Str()) debug.Log("MasterIndex.Lookup", "looking up id %v, tpe %v", id.Str(), tpe)
for _, idx := range mi.idx { for _, idx := range mi.idx {
blob, err = idx.Lookup(id) blobs, err = idx.Lookup(id, tpe)
if err == nil { if err == nil {
debug.Log("MasterIndex.Lookup", debug.Log("MasterIndex.Lookup",
"found id %v: %v", id.Str(), blob) "found id %v: %v", id.Str(), blobs)
return return
} }
} }
debug.Log("MasterIndex.Lookup", "id %v not found in any index", id.Str()) debug.Log("MasterIndex.Lookup", "id %v not found in any index", id.Str())
return PackedBlob{}, fmt.Errorf("id %v not found in any index", id) return nil, fmt.Errorf("id %v not found in any index", id)
} }
// LookupSize queries all known Indexes for the ID and returns the first match. // LookupSize queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) LookupSize(id backend.ID) (uint, error) { func (mi *MasterIndex) LookupSize(id backend.ID, tpe pack.BlobType) (uint, error) {
mi.idxMutex.RLock() mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock() defer mi.idxMutex.RUnlock()
for _, idx := range mi.idx { for _, idx := range mi.idx {
length, err := idx.LookupSize(id) length, err := idx.LookupSize(id, tpe)
if err == nil { if err == nil {
return length, nil return length, nil
} }
@ -72,12 +72,12 @@ func (mi *MasterIndex) ListPack(id backend.ID) (list []PackedBlob) {
} }
// Has queries all known Indexes for the ID and returns the first match. // Has queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) Has(id backend.ID) bool { func (mi *MasterIndex) Has(id backend.ID, tpe pack.BlobType) bool {
mi.idxMutex.RLock() mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock() defer mi.idxMutex.RUnlock()
for _, idx := range mi.idx { for _, idx := range mi.idx {
if idx.Has(id) { if idx.Has(id, tpe) {
return true return true
} }
} }

View File

@ -83,7 +83,7 @@ func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc {
id, err := backend.ParseID(s) id, err := backend.ParseID(s)
if err != nil { if err != nil {
debug.Log("repository.ParallelWorkFuncParseID", "invalid ID %q: %v", id, err) debug.Log("repository.ParallelWorkFuncParseID", "invalid ID %q: %v", id, err)
return nil return err
} }
return f(id, done) return f(id, done)

View File

@ -0,0 +1,79 @@
package repository
import (
"io"
"math/rand"
)
// RandReader allows reading from a rand.Rand.
type RandReader struct {
rnd *rand.Rand
buf []byte
}
// NewRandReader creates a new Reader from a random source.
func NewRandReader(rnd *rand.Rand) io.Reader {
return &RandReader{rnd: rnd, buf: make([]byte, 0, 7)}
}
func (rd *RandReader) read(p []byte) (n int, err error) {
if len(p)%7 != 0 {
panic("invalid buffer length, not multiple of 7")
}
rnd := rd.rnd
for i := 0; i < len(p); i += 7 {
val := rnd.Int63()
p[i+0] = byte(val >> 0)
p[i+1] = byte(val >> 8)
p[i+2] = byte(val >> 16)
p[i+3] = byte(val >> 24)
p[i+4] = byte(val >> 32)
p[i+5] = byte(val >> 40)
p[i+6] = byte(val >> 48)
}
return len(p), nil
}
func (rd *RandReader) Read(p []byte) (int, error) {
// first, copy buffer to p
pos := copy(p, rd.buf)
copy(rd.buf, rd.buf[pos:])
// shorten buf and p accordingly
rd.buf = rd.buf[:len(rd.buf)-pos]
p = p[pos:]
// if this is enough to fill p, return
if len(p) == 0 {
return pos, nil
}
// load multiple of 7 byte
l := (len(p) / 7) * 7
n, err := rd.read(p[:l])
pos += n
if err != nil {
return pos, err
}
p = p[n:]
// load 7 byte to temp buffer
rd.buf = rd.buf[:7]
n, err = rd.read(rd.buf)
if err != nil {
return pos, err
}
// copy the remaining bytes from the buffer to p
n = copy(p, rd.buf)
pos += n
// save the remaining bytes in rd.buf
n = copy(rd.buf, rd.buf[n:])
rd.buf = rd.buf[:n]
return pos, nil
}

View File

@ -0,0 +1,84 @@
package repository
import (
"io"
"restic/backend"
"restic/crypto"
"restic/debug"
"restic/pack"
)
// Repack takes a list of packs together with a list of blobs contained in
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
// into a new pack. Afterwards, the packs are removed. This operation requires
// an exclusive lock on the repo.
func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err error) {
debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
buf := make([]byte, 0, maxPackSize)
for packID := range packs {
// load the complete pack
h := backend.Handle{Type: backend.Data, Name: packID.String()}
l, err := repo.Backend().Load(h, buf[:cap(buf)], 0)
if err == io.ErrUnexpectedEOF {
err = nil
buf = buf[:l]
}
if err != nil {
return err
}
debug.Log("Repack", "pack %v loaded (%d bytes)", packID.Str(), len(buf))
unpck, err := pack.NewUnpacker(repo.Key(), pack.BufferLoader(buf))
if err != nil {
return err
}
debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), len(unpck.Entries))
var plaintext []byte
for _, entry := range unpck.Entries {
h := pack.Handle{ID: entry.ID, Type: entry.Type}
if !keepBlobs.Has(h) {
continue
}
ciphertext := buf[entry.Offset : entry.Offset+entry.Length]
if cap(plaintext) < len(ciphertext) {
plaintext = make([]byte, len(ciphertext))
}
plaintext, err = crypto.Decrypt(repo.Key(), plaintext, ciphertext)
if err != nil {
return err
}
_, err = repo.SaveAndEncrypt(entry.Type, plaintext, &entry.ID)
if err != nil {
return err
}
debug.Log("Repack", " saved blob %v", entry.ID.Str())
keepBlobs.Delete(h)
}
}
if err := repo.Flush(); err != nil {
return err
}
for packID := range packs {
err := repo.Backend().Remove(backend.Data, packID.String())
if err != nil {
debug.Log("Repack", "error removing pack %v: %v", packID.Str(), err)
return err
}
debug.Log("Repack", "removed pack %v", packID.Str())
}
return nil
}

View File

@ -0,0 +1,221 @@
package repository_test
import (
"io"
"math/rand"
"restic/backend"
"restic/pack"
"restic/repository"
"testing"
)
func randomSize(min, max int) int {
return rand.Intn(max-min) + min
}
func random(t testing.TB, length int) []byte {
rd := repository.NewRandReader(rand.New(rand.NewSource(int64(length))))
buf := make([]byte, length)
_, err := io.ReadFull(rd, buf)
if err != nil {
t.Fatalf("unable to read %d random bytes: %v", length, err)
}
return buf
}
func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pData float32) {
for i := 0; i < blobs; i++ {
var (
tpe pack.BlobType
length int
)
if rand.Float32() < pData {
tpe = pack.Data
length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data
} else {
tpe = pack.Tree
length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB
}
buf := random(t, length)
id := backend.Hash(buf)
if repo.Index().Has(id, pack.Data) {
t.Errorf("duplicate blob %v/%v ignored", id, pack.Data)
continue
}
_, err := repo.SaveAndEncrypt(tpe, buf, &id)
if err != nil {
t.Fatalf("SaveFrom() error %v", err)
}
if rand.Float32() < 0.2 {
if err = repo.Flush(); err != nil {
t.Fatalf("repo.Flush() returned error %v", err)
}
}
}
if err := repo.Flush(); err != nil {
t.Fatalf("repo.Flush() returned error %v", err)
}
}
// selectBlobs splits the list of all blobs randomly into two lists. A blob
// will be contained in the firstone ith probability p.
func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, list2 pack.BlobSet) {
done := make(chan struct{})
defer close(done)
list1 = pack.NewBlobSet()
list2 = pack.NewBlobSet()
blobs := pack.NewBlobSet()
for id := range repo.List(backend.Data, done) {
entries, _, err := repo.ListPack(id)
if err != nil {
t.Fatalf("error listing pack %v: %v", id, err)
}
for _, entry := range entries {
h := pack.Handle{ID: entry.ID, Type: entry.Type}
if blobs.Has(h) {
t.Errorf("ignoring duplicate blob %v", h)
continue
}
blobs.Insert(h)
if rand.Float32() <= p {
list1.Insert(pack.Handle{ID: entry.ID, Type: entry.Type})
} else {
list2.Insert(pack.Handle{ID: entry.ID, Type: entry.Type})
}
}
}
return list1, list2
}
func listPacks(t *testing.T, repo *repository.Repository) backend.IDSet {
done := make(chan struct{})
defer close(done)
list := backend.NewIDSet()
for id := range repo.List(backend.Data, done) {
list.Insert(id)
}
return list
}
func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.BlobSet) backend.IDSet {
packs := backend.NewIDSet()
idx := repo.Index()
for h := range blobs {
list, err := idx.Lookup(h.ID, h.Type)
if err != nil {
t.Fatal(err)
}
for _, pb := range list {
packs.Insert(pb.PackID)
}
}
return packs
}
func repack(t *testing.T, repo *repository.Repository, packs backend.IDSet, blobs pack.BlobSet) {
err := repository.Repack(repo, packs, blobs)
if err != nil {
t.Fatal(err)
}
}
func saveIndex(t *testing.T, repo *repository.Repository) {
if err := repo.SaveIndex(); err != nil {
t.Fatalf("repo.SaveIndex() %v", err)
}
}
func rebuildIndex(t *testing.T, repo *repository.Repository) {
if err := repository.RebuildIndex(repo); err != nil {
t.Fatalf("error rebuilding index: %v", err)
}
}
func reloadIndex(t *testing.T, repo *repository.Repository) {
repo.SetIndex(repository.NewMasterIndex())
if err := repo.LoadIndex(); err != nil {
t.Fatalf("error loading new index: %v", err)
}
}
func TestRepack(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
createRandomBlobs(t, repo, 100, 0.7)
packsBefore := listPacks(t, repo)
// Running repack on empty ID sets should not do anything at all.
repack(t, repo, nil, nil)
packsAfter := listPacks(t, repo)
if !packsAfter.Equals(packsBefore) {
t.Fatalf("packs are not equal, Repack modified something. Before:\n %v\nAfter:\n %v",
packsBefore, packsAfter)
}
saveIndex(t, repo)
removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2)
removePacks := findPacksForBlobs(t, repo, removeBlobs)
repack(t, repo, removePacks, keepBlobs)
rebuildIndex(t, repo)
reloadIndex(t, repo)
packsAfter = listPacks(t, repo)
for id := range removePacks {
if packsAfter.Has(id) {
t.Errorf("pack %v still present although it should have been repacked and removed", id.Str())
}
}
idx := repo.Index()
for h := range keepBlobs {
list, err := idx.Lookup(h.ID, h.Type)
if err != nil {
t.Errorf("unable to find blob %v in repo", h.ID.Str())
continue
}
if len(list) != 1 {
t.Errorf("expected one pack in the list, got: %v", list)
continue
}
pb := list[0]
if removePacks.Has(pb.PackID) {
t.Errorf("lookup returned pack ID %v that should've been removed", pb.PackID)
}
}
for h := range removeBlobs {
if _, err := idx.Lookup(h.ID, h.Type); err == nil {
t.Errorf("blob %v still contained in the repo", h)
}
}
}

View File

@ -77,56 +77,70 @@ func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, erro
// LoadBlob tries to load and decrypt content identified by t and id from a // LoadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend, the result is stored in plaintextBuf, which must be // pack from the backend, the result is stored in plaintextBuf, which must be
// large enough to hold the complete blob. // large enough to hold the complete blob.
func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) { func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byte) ([]byte, error) {
debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str()) debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
// lookup pack
blob, err := r.idx.Lookup(id) // lookup plaintext size of blob
size, err := r.idx.LookupSize(id, t)
if err != nil {
return nil, err
}
// make sure the plaintext buffer is large enough, extend otherwise
plaintextBufSize := uint(cap(plaintextBuf))
if size > plaintextBufSize {
debug.Log("Repo.LoadBlob", "need to expand buffer: want %d bytes, got %d",
size, plaintextBufSize)
plaintextBuf = make([]byte, size)
}
// lookup packs
blobs, err := r.idx.Lookup(id, t)
if err != nil { if err != nil {
debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err) debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err)
return nil, err return nil, err
} }
plaintextBufSize := uint(cap(plaintextBuf)) for _, blob := range blobs {
if blob.PlaintextLength() > plaintextBufSize { debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob)
debug.Log("Repo.LoadBlob", "need to expand buffer: want %d bytes, got %d",
blob.PlaintextLength(), plaintextBufSize) if blob.Type != t {
plaintextBuf = make([]byte, blob.PlaintextLength()) debug.Log("Repo.LoadBlob", "blob %v has wrong block type, want %v", blob, t)
}
// load blob from pack
h := backend.Handle{Type: backend.Data, Name: blob.PackID.String()}
ciphertextBuf := make([]byte, blob.Length)
n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset))
if err != nil {
debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err)
fmt.Fprintf(os.Stderr, "error loading blob %v: %v", id, err)
continue
}
if uint(n) != blob.Length {
debug.Log("Repo.LoadBlob", "error loading blob %v: wrong length returned, want %d, got %d",
blob.Length, uint(n))
continue
}
// decrypt
plaintextBuf, err = r.decryptTo(plaintextBuf, ciphertextBuf)
if err != nil {
fmt.Fprintf(os.Stderr, "decrypting blob %v failed: %v", id, err)
continue
}
// check hash
if !backend.Hash(plaintextBuf).Equal(id) {
fmt.Fprintf(os.Stderr, "blob %v returned invalid hash", id)
continue
}
return plaintextBuf, nil
} }
if blob.Type != t { return nil, fmt.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs))
debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, blob.Type)
return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", blob.Type, t)
}
debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob)
// load blob from pack
h := backend.Handle{Type: backend.Data, Name: blob.PackID.String()}
ciphertextBuf := make([]byte, blob.Length)
n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset))
if err != nil {
debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err)
return nil, err
}
if uint(n) != blob.Length {
debug.Log("Repo.LoadBlob", "error loading blob %v: wrong length returned, want %d, got %d",
blob.Length, uint(n))
return nil, errors.New("wrong length returned")
}
// decrypt
plaintextBuf, err = r.decryptTo(plaintextBuf, ciphertextBuf)
if err != nil {
return nil, err
}
// check hash
if !backend.Hash(plaintextBuf).Equal(id) {
return nil, errors.New("invalid data returned")
}
return plaintextBuf, nil
} }
// closeOrErr calls cl.Close() and sets err to the returned error value if // closeOrErr calls cl.Close() and sets err to the returned error value if
@ -153,7 +167,7 @@ func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interf
// LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the // LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the
// data and afterwards call json.Unmarshal on the item. // data and afterwards call json.Unmarshal on the item.
func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) (err error) { func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) (err error) {
buf, err := r.LoadBlob(t, id, nil) buf, err := r.LoadBlob(id, t, nil)
if err != nil { if err != nil {
return err return err
} }
@ -162,8 +176,8 @@ func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface
} }
// LookupBlobSize returns the size of blob id. // LookupBlobSize returns the size of blob id.
func (r *Repository) LookupBlobSize(id backend.ID) (uint, error) { func (r *Repository) LookupBlobSize(id backend.ID, tpe pack.BlobType) (uint, error) {
return r.idx.LookupSize(id) return r.idx.LookupSize(id, tpe)
} }
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data // SaveAndEncrypt encrypts data and stores it to the backend as type t. If data
@ -416,6 +430,17 @@ func (r *Repository) Init(password string) error {
return errors.New("repository master key and config already initialized") return errors.New("repository master key and config already initialized")
} }
cfg, err := CreateConfig()
if err != nil {
return err
}
return r.init(password, cfg)
}
// init creates a new master key with the supplied password and uses it to save
// the config into the repo.
func (r *Repository) init(password string, cfg Config) error {
key, err := createMasterKey(r, password) key, err := createMasterKey(r, password)
if err != nil { if err != nil {
return err return err
@ -424,7 +449,8 @@ func (r *Repository) Init(password string) error {
r.key = key.master r.key = key.master
r.packerManager.key = key.master r.packerManager.key = key.master
r.keyName = key.Name() r.keyName = key.Name()
r.Config, err = CreateConfig(r) r.Config = cfg
_, err = r.SaveJSONUnpacked(backend.Config, cfg)
return err return err
} }
@ -518,17 +544,24 @@ func (r *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.I
return outCh return outCh
} }
// ListPack returns the list of blobs saved in the pack id. // ListPack returns the list of blobs saved in the pack id and the length of
func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, error) { // the file as stored in the backend.
func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, int64, error) {
h := backend.Handle{Type: backend.Data, Name: id.String()} h := backend.Handle{Type: backend.Data, Name: id.String()}
rd := backend.NewReadSeeker(r.Backend(), h)
unpacker, err := pack.NewUnpacker(r.Key(), rd) blobInfo, err := r.Backend().Stat(h)
if err != nil { if err != nil {
return nil, err return nil, 0, err
} }
return unpacker.Entries, nil ldr := pack.BackendLoader{Backend: r.Backend(), Handle: h}
unpacker, err := pack.NewUnpacker(r.Key(), ldr)
if err != nil {
return nil, 0, err
}
return unpacker.Entries, blobInfo.Size, nil
} }
// Delete calls backend.Delete() if implemented, and returns an error // Delete calls backend.Delete() if implemented, and returns an error

View File

@ -92,7 +92,7 @@ func TestSave(t *testing.T) {
// OK(t, repo.SaveIndex()) // OK(t, repo.SaveIndex())
// read back // read back
buf, err := repo.LoadBlob(pack.Data, id, make([]byte, size)) buf, err := repo.LoadBlob(id, pack.Data, make([]byte, size))
OK(t, err) OK(t, err)
Assert(t, len(buf) == len(data), Assert(t, len(buf) == len(data),
@ -124,7 +124,7 @@ func TestSaveFrom(t *testing.T) {
OK(t, repo.Flush()) OK(t, repo.Flush())
// read back // read back
buf, err := repo.LoadBlob(pack.Data, id, make([]byte, size)) buf, err := repo.LoadBlob(id, pack.Data, make([]byte, size))
OK(t, err) OK(t, err)
Assert(t, len(buf) == len(data), Assert(t, len(buf) == len(data),

View File

@ -6,6 +6,8 @@ import (
"restic/backend/local" "restic/backend/local"
"restic/backend/mem" "restic/backend/mem"
"testing" "testing"
"github.com/restic/chunker"
) )
// TestBackend returns a fully configured in-memory backend. // TestBackend returns a fully configured in-memory backend.
@ -16,8 +18,11 @@ func TestBackend(t testing.TB) (be backend.Backend, cleanup func()) {
// TestPassword is used for all repositories created by the Test* functions. // TestPassword is used for all repositories created by the Test* functions.
const TestPassword = "geheim" const TestPassword = "geheim"
const testChunkerPol = chunker.Pol(0x3DA3358B4DC173)
// TestRepositoryWithBackend returns a repository initialized with a test // TestRepositoryWithBackend returns a repository initialized with a test
// password. If be is nil, an in-memory backend is used. // password. If be is nil, an in-memory backend is used. A constant polynomial
// is used for the chunker.
func TestRepositoryWithBackend(t testing.TB, be backend.Backend) (r *Repository, cleanup func()) { func TestRepositoryWithBackend(t testing.TB, be backend.Backend) (r *Repository, cleanup func()) {
var beCleanup func() var beCleanup func()
if be == nil { if be == nil {
@ -26,9 +31,10 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend) (r *Repository,
r = New(be) r = New(be)
err := r.Init(TestPassword) cfg := TestCreateConfig(t, testChunkerPol)
err := r.init(TestPassword, cfg)
if err != nil { if err != nil {
t.Fatalf("TestRepopository(): initialize repo failed: %v", err) t.Fatalf("TestRepository(): initialize repo failed: %v", err)
} }
return r, func() { return r, func() {
@ -41,7 +47,7 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend) (r *Repository,
// TestRepository returns a repository initialized with a test password on an // TestRepository returns a repository initialized with a test password on an
// in-memory backend. When the environment variable RESTIC_TEST_REPO is set to // in-memory backend. When the environment variable RESTIC_TEST_REPO is set to
// a non-existing directory, a local backend is created there and this is used // a non-existing directory, a local backend is created there and this is used
// instead. The directory is not removed. // instead. The directory is not removed, but left there for inspection.
func TestRepository(t testing.TB) (r *Repository, cleanup func()) { func TestRepository(t testing.TB) (r *Repository, cleanup func()) {
dir := os.Getenv("RESTIC_TEST_REPO") dir := os.Getenv("RESTIC_TEST_REPO")
if dir != "" { if dir != "" {

View File

@ -83,7 +83,8 @@ func LoadAllSnapshots(repo *repository.Repository) (snapshots []*Snapshot, err e
} }
func (sn Snapshot) String() string { func (sn Snapshot) String() string {
return fmt.Sprintf("<Snapshot %s of %v at %s>", sn.id.Str(), sn.Paths, sn.Time) return fmt.Sprintf("<Snapshot %s of %v at %s by %s@%s>",
sn.id.Str(), sn.Paths, sn.Time, sn.Username, sn.Hostname)
} }
// ID retuns the snapshot's ID. // ID retuns the snapshot's ID.
@ -125,7 +126,7 @@ func SamePaths(expected, actual []string) bool {
return true return true
} }
// Error when no snapshot is found for the given criteria // ErrNoSnapshotFound is returned when no snapshot for the given criteria could be found.
var ErrNoSnapshotFound = errors.New("no snapshot found") var ErrNoSnapshotFound = errors.New("no snapshot found")
// FindLatestSnapshot finds latest snapshot with optional target/directory and source filters // FindLatestSnapshot finds latest snapshot with optional target/directory and source filters

View File

@ -0,0 +1,207 @@
package restic
import (
"fmt"
"reflect"
"sort"
"time"
)
// Snapshots is a list of snapshots.
type Snapshots []*Snapshot
// Len returns the number of snapshots in sn.
func (sn Snapshots) Len() int {
return len(sn)
}
// Less returns true iff the ith snapshot has been made after the jth.
func (sn Snapshots) Less(i, j int) bool {
return sn[i].Time.After(sn[j].Time)
}
// Swap exchanges the two snapshots.
func (sn Snapshots) Swap(i, j int) {
sn[i], sn[j] = sn[j], sn[i]
}
// SnapshotFilter configures criteria for filtering snapshots before an
// ExpirePolicy can be applied.
type SnapshotFilter struct {
Hostname string
Username string
Paths []string
}
// FilterSnapshots returns the snapshots from s which match the filter f.
func FilterSnapshots(s Snapshots, f SnapshotFilter) (result Snapshots) {
for _, snap := range s {
if f.Hostname != "" && f.Hostname != snap.Hostname {
continue
}
if f.Username != "" && f.Username != snap.Username {
continue
}
if f.Paths != nil && !reflect.DeepEqual(f.Paths, snap.Paths) {
continue
}
result = append(result, snap)
}
return result
}
// ExpirePolicy configures which snapshots should be automatically removed.
type ExpirePolicy struct {
Last int // keep the last n snapshots
Hourly int // keep the last n hourly snapshots
Daily int // keep the last n daily snapshots
Weekly int // keep the last n weekly snapshots
Monthly int // keep the last n monthly snapshots
Yearly int // keep the last n yearly snapshots
}
// Sum returns the maximum number of snapshots to be kept according to this
// policy.
func (e ExpirePolicy) Sum() int {
return e.Last + e.Hourly + e.Daily + e.Weekly + e.Monthly + e.Yearly
}
// Empty returns true iff no policy has been configured (all values zero).
func (e ExpirePolicy) Empty() bool {
empty := ExpirePolicy{}
return e == empty
}
// filter is used to split a list of snapshots into those to keep and those to
// remove according to a policy.
type filter struct {
Unprocessed Snapshots
Remove Snapshots
Keep Snapshots
}
func (f filter) String() string {
return fmt.Sprintf("<filter %d todo, %d keep, %d remove>", len(f.Unprocessed), len(f.Keep), len(f.Remove))
}
// ymdh returns an integer in the form YYYYMMDDHH.
func ymdh(d time.Time) int {
return d.Year()*1000000 + int(d.Month())*10000 + d.Day()*100 + d.Hour()
}
// ymd returns an integer in the form YYYYMMDD.
func ymd(d time.Time) int {
return d.Year()*10000 + int(d.Month())*100 + d.Day()
}
// yw returns an integer in the form YYYYWW, where WW is the week number.
func yw(d time.Time) int {
year, week := d.ISOWeek()
return year*100 + week
}
// ym returns an integer in the form YYYYMM.
func ym(d time.Time) int {
return d.Year()*100 + int(d.Month())
}
// y returns the year of d.
func y(d time.Time) int {
return d.Year()
}
// apply moves snapshots from Unprocess to either Keep or Remove. It sorts the
// snapshots into buckets according to the return of fn, and then moves the
// newest snapshot in each bucket to Keep and all others to Remove. When max
// snapshots were found, processing stops.
func (f *filter) apply(fn func(time.Time) int, max int) {
if max == 0 || len(f.Unprocessed) == 0 {
return
}
sameDay := Snapshots{}
lastDay := fn(f.Unprocessed[0].Time)
for len(f.Unprocessed) > 0 {
cur := f.Unprocessed[0]
day := fn(cur.Time)
// if the snapshots are from a new day, forget all but the first (=last
// in time) snapshot from the previous day.
if day != lastDay {
f.Keep = append(f.Keep, sameDay[0])
for _, snapshot := range sameDay[1:] {
f.Remove = append(f.Remove, snapshot)
}
sameDay = Snapshots{}
lastDay = day
max--
if max == 0 {
break
}
}
// collect all snapshots for the current day
sameDay = append(sameDay, cur)
f.Unprocessed = f.Unprocessed[1:]
}
if len(sameDay) > 0 {
f.Keep = append(f.Keep, sameDay[0])
for _, snapshot := range sameDay[1:] {
f.Remove = append(f.Remove, snapshot)
}
}
}
// keepLast marks the last n snapshots as to be kept.
func (f *filter) keepLast(n int) {
if n > len(f.Unprocessed) {
n = len(f.Unprocessed)
}
f.Keep = append(f.Keep, f.Unprocessed[:n]...)
f.Unprocessed = f.Unprocessed[n:]
}
// finish moves all remaining snapshots to remove.
func (f *filter) finish() {
f.Remove = append(f.Remove, f.Unprocessed...)
}
// ApplyPolicy runs returns the snapshots from s that are to be deleted according
// to the policy p. s is sorted in the process.
func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots) {
sort.Sort(list)
if p.Empty() {
return list, remove
}
if len(list) == 0 {
return list, remove
}
f := filter{
Unprocessed: list,
Remove: Snapshots{},
Keep: Snapshots{},
}
f.keepLast(p.Last)
f.apply(ymdh, p.Hourly)
f.apply(ymd, p.Daily)
f.apply(yw, p.Weekly)
f.apply(ym, p.Monthly)
f.apply(y, p.Yearly)
f.finish()
return f.Keep, f.Remove
}

View File

@ -0,0 +1,263 @@
package restic
import (
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"reflect"
"sort"
"testing"
"time"
)
func parseTime(s string) time.Time {
t, err := time.Parse("2006-01-02 15:04:05", s)
if err != nil {
panic(err)
}
return t.UTC()
}
var testFilterSnapshots = Snapshots{
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-01 01:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 01:03:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-03 07:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 07:08:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 10:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 11:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:24:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:28:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:30:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 16:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-05 09:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-06 08:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-07 10:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "root", Time: parseTime("2016-01-08 20:02:03"), Paths: []string{"/usr", "/sbin"}},
{Hostname: "foo", Username: "root", Time: parseTime("2016-01-09 21:02:03"), Paths: []string{"/usr", "/sbin"}},
{Hostname: "bar", Username: "root", Time: parseTime("2016-01-12 21:02:03"), Paths: []string{"/usr", "/sbin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-12 21:08:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-18 12:02:03"), Paths: []string{"/usr", "/bin"}},
}
var filterTests = []SnapshotFilter{
{Hostname: "foo"},
{Username: "root"},
{Hostname: "foo", Username: "root"},
{Paths: []string{"/usr", "/bin"}},
{Hostname: "bar", Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "root", Paths: []string{"/usr", "/sbin"}},
}
func TestFilterSnapshots(t *testing.T) {
sort.Sort(testFilterSnapshots)
for i, f := range filterTests {
res := FilterSnapshots(testFilterSnapshots, f)
goldenFilename := filepath.Join("testdata", fmt.Sprintf("filter_snapshots_%d", i))
if *updateGoldenFiles {
buf, err := json.MarshalIndent(res, "", " ")
if err != nil {
t.Fatalf("error marshaling result: %v", err)
}
if err = ioutil.WriteFile(goldenFilename, buf, 0644); err != nil {
t.Fatalf("unable to update golden file: %v", err)
}
}
buf, err := ioutil.ReadFile(goldenFilename)
if err != nil {
t.Errorf("error loading golden file %v: %v", goldenFilename, err)
continue
}
var want Snapshots
err = json.Unmarshal(buf, &want)
if !reflect.DeepEqual(res, want) {
t.Errorf("test %v: wrong result, want:\n %#v\ngot:\n %#v", i, want, res)
continue
}
}
}
var testExpireSnapshots = Snapshots{
{Time: parseTime("2014-09-01 10:20:30")},
{Time: parseTime("2014-09-02 10:20:30")},
{Time: parseTime("2014-09-05 10:20:30")},
{Time: parseTime("2014-09-06 10:20:30")},
{Time: parseTime("2014-09-08 10:20:30")},
{Time: parseTime("2014-09-09 10:20:30")},
{Time: parseTime("2014-09-10 10:20:30")},
{Time: parseTime("2014-09-11 10:20:30")},
{Time: parseTime("2014-09-20 10:20:30")},
{Time: parseTime("2014-09-22 10:20:30")},
{Time: parseTime("2014-08-08 10:20:30")},
{Time: parseTime("2014-08-10 10:20:30")},
{Time: parseTime("2014-08-12 10:20:30")},
{Time: parseTime("2014-08-13 10:20:30")},
{Time: parseTime("2014-08-13 10:20:30")},
{Time: parseTime("2014-08-15 10:20:30")},
{Time: parseTime("2014-08-18 10:20:30")},
{Time: parseTime("2014-08-20 10:20:30")},
{Time: parseTime("2014-08-21 10:20:30")},
{Time: parseTime("2014-08-22 10:20:30")},
{Time: parseTime("2014-10-01 10:20:30")},
{Time: parseTime("2014-10-02 10:20:30")},
{Time: parseTime("2014-10-05 10:20:30")},
{Time: parseTime("2014-10-06 10:20:30")},
{Time: parseTime("2014-10-08 10:20:30")},
{Time: parseTime("2014-10-09 10:20:30")},
{Time: parseTime("2014-10-10 10:20:30")},
{Time: parseTime("2014-10-11 10:20:30")},
{Time: parseTime("2014-10-20 10:20:30")},
{Time: parseTime("2014-10-22 10:20:30")},
{Time: parseTime("2014-11-08 10:20:30")},
{Time: parseTime("2014-11-10 10:20:30")},
{Time: parseTime("2014-11-12 10:20:30")},
{Time: parseTime("2014-11-13 10:20:30")},
{Time: parseTime("2014-11-13 10:20:30")},
{Time: parseTime("2014-11-15 10:20:30")},
{Time: parseTime("2014-11-18 10:20:30")},
{Time: parseTime("2014-11-20 10:20:30")},
{Time: parseTime("2014-11-21 10:20:30")},
{Time: parseTime("2014-11-22 10:20:30")},
{Time: parseTime("2015-09-01 10:20:30")},
{Time: parseTime("2015-09-02 10:20:30")},
{Time: parseTime("2015-09-05 10:20:30")},
{Time: parseTime("2015-09-06 10:20:30")},
{Time: parseTime("2015-09-08 10:20:30")},
{Time: parseTime("2015-09-09 10:20:30")},
{Time: parseTime("2015-09-10 10:20:30")},
{Time: parseTime("2015-09-11 10:20:30")},
{Time: parseTime("2015-09-20 10:20:30")},
{Time: parseTime("2015-09-22 10:20:30")},
{Time: parseTime("2015-08-08 10:20:30")},
{Time: parseTime("2015-08-10 10:20:30")},
{Time: parseTime("2015-08-12 10:20:30")},
{Time: parseTime("2015-08-13 10:20:30")},
{Time: parseTime("2015-08-13 10:20:30")},
{Time: parseTime("2015-08-15 10:20:30")},
{Time: parseTime("2015-08-18 10:20:30")},
{Time: parseTime("2015-08-20 10:20:30")},
{Time: parseTime("2015-08-21 10:20:30")},
{Time: parseTime("2015-08-22 10:20:30")},
{Time: parseTime("2015-10-01 10:20:30")},
{Time: parseTime("2015-10-02 10:20:30")},
{Time: parseTime("2015-10-05 10:20:30")},
{Time: parseTime("2015-10-06 10:20:30")},
{Time: parseTime("2015-10-08 10:20:30")},
{Time: parseTime("2015-10-09 10:20:30")},
{Time: parseTime("2015-10-10 10:20:30")},
{Time: parseTime("2015-10-11 10:20:30")},
{Time: parseTime("2015-10-20 10:20:30")},
{Time: parseTime("2015-10-22 10:20:30")},
{Time: parseTime("2015-11-08 10:20:30")},
{Time: parseTime("2015-11-10 10:20:30")},
{Time: parseTime("2015-11-12 10:20:30")},
{Time: parseTime("2015-11-13 10:20:30")},
{Time: parseTime("2015-11-13 10:20:30")},
{Time: parseTime("2015-11-15 10:20:30")},
{Time: parseTime("2015-11-18 10:20:30")},
{Time: parseTime("2015-11-20 10:20:30")},
{Time: parseTime("2015-11-21 10:20:30")},
{Time: parseTime("2015-11-22 10:20:30")},
{Time: parseTime("2016-01-01 01:02:03")},
{Time: parseTime("2016-01-01 01:03:03")},
{Time: parseTime("2016-01-01 07:08:03")},
{Time: parseTime("2016-01-03 07:02:03")},
{Time: parseTime("2016-01-04 10:23:03")},
{Time: parseTime("2016-01-04 11:23:03")},
{Time: parseTime("2016-01-04 12:23:03")},
{Time: parseTime("2016-01-04 12:24:03")},
{Time: parseTime("2016-01-04 12:28:03")},
{Time: parseTime("2016-01-04 12:30:03")},
{Time: parseTime("2016-01-04 16:23:03")},
{Time: parseTime("2016-01-05 09:02:03")},
{Time: parseTime("2016-01-06 08:02:03")},
{Time: parseTime("2016-01-07 10:02:03")},
{Time: parseTime("2016-01-08 20:02:03")},
{Time: parseTime("2016-01-09 21:02:03")},
{Time: parseTime("2016-01-12 21:02:03")},
{Time: parseTime("2016-01-12 21:08:03")},
{Time: parseTime("2016-01-18 12:02:03")},
}
var expireTests = []ExpirePolicy{
{},
{Last: 10},
{Last: 15},
{Last: 99},
{Last: 200},
{Hourly: 20},
{Daily: 3},
{Daily: 10},
{Daily: 30},
{Last: 5, Daily: 5},
{Last: 2, Daily: 10},
{Weekly: 2},
{Weekly: 4},
{Daily: 3, Weekly: 4},
{Monthly: 6},
{Daily: 2, Weekly: 2, Monthly: 6},
{Yearly: 10},
{Daily: 7, Weekly: 2, Monthly: 3, Yearly: 10},
}
func TestApplyPolicy(t *testing.T) {
for i, p := range expireTests {
keep, remove := ApplyPolicy(testExpireSnapshots, p)
t.Logf("test %d: returned keep %v, remove %v (of %v) expired snapshots for policy %v",
i, len(keep), len(remove), len(testExpireSnapshots), p)
if len(keep)+len(remove) != len(testExpireSnapshots) {
t.Errorf("test %d: len(keep)+len(remove) = %d != len(testExpireSnapshots) = %d",
i, len(keep)+len(remove), len(testExpireSnapshots))
}
if p.Sum() > 0 && len(keep) > p.Sum() {
t.Errorf("not enough snapshots removed: policy allows %v snapshots to remain, but ended up with %v",
p.Sum(), len(keep))
}
for _, sn := range keep {
t.Logf("test %d: keep snapshot at %v\n", i, sn.Time)
}
for _, sn := range remove {
t.Logf("test %d: forget snapshot at %v\n", i, sn.Time)
}
goldenFilename := filepath.Join("testdata", fmt.Sprintf("expired_snapshots_%d", i))
if *updateGoldenFiles {
buf, err := json.MarshalIndent(keep, "", " ")
if err != nil {
t.Fatalf("error marshaling result: %v", err)
}
if err = ioutil.WriteFile(goldenFilename, buf, 0644); err != nil {
t.Fatalf("unable to update golden file: %v", err)
}
}
buf, err := ioutil.ReadFile(goldenFilename)
if err != nil {
t.Errorf("error loading golden file %v: %v", goldenFilename, err)
continue
}
var want Snapshots
err = json.Unmarshal(buf, &want)
if !reflect.DeepEqual(keep, want) {
t.Errorf("test %v: wrong result, want:\n %v\ngot:\n %v", i, want, keep)
continue
}
}
}

497
src/restic/testdata/expired_snapshots_0 vendored Normal file
View File

@ -0,0 +1,497 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-07T10:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:30:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:28:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:24:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T11:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T10:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T07:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T01:03:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T01:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-12T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-11T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-09T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-06T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-05T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-02T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-01T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-11T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-09T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-06T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-05T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-02T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-01T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-12T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-12T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-11T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-09T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-06T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-05T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-02T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-01T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-11T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-09T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-06T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-05T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-02T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-01T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-12T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-08T10:20:30Z",
"tree": null,
"paths": null
}
]

52
src/restic/testdata/expired_snapshots_1 vendored Normal file
View File

@ -0,0 +1,52 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-07T10:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:30:03Z",
"tree": null,
"paths": null
}
]

View File

@ -0,0 +1,62 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-07T10:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T07:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-22T10:20:30Z",
"tree": null,
"paths": null
}
]

View File

@ -0,0 +1,12 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
}
]

View File

@ -0,0 +1,22 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
}
]

View File

@ -0,0 +1,37 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-15T10:20:30Z",
"tree": null,
"paths": null
}
]

View File

@ -0,0 +1,32 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-22T10:20:30Z",
"tree": null,
"paths": null
}
]

View File

@ -0,0 +1,52 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-22T10:20:30Z",
"tree": null,
"paths": null
}
]

View File

@ -0,0 +1,17 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-22T10:20:30Z",
"tree": null,
"paths": null
}
]

View File

@ -0,0 +1,72 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-07T10:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-22T10:20:30Z",
"tree": null,
"paths": null
}
]

77
src/restic/testdata/expired_snapshots_2 vendored Normal file
View File

@ -0,0 +1,77 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-07T10:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:30:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:28:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:24:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T11:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T10:23:03Z",
"tree": null,
"paths": null
}
]

497
src/restic/testdata/expired_snapshots_3 vendored Normal file
View File

@ -0,0 +1,497 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-07T10:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:30:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:28:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:24:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T11:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T10:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T07:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T01:03:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T01:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-12T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-11T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-09T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-06T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-05T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-02T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-01T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-11T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-09T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-06T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-05T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-02T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-01T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-12T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-12T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-11T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-09T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-06T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-05T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-02T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-01T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-11T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-09T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-06T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-05T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-02T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-01T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-12T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-08T10:20:30Z",
"tree": null,
"paths": null
}
]

497
src/restic/testdata/expired_snapshots_4 vendored Normal file
View File

@ -0,0 +1,497 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-07T10:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:30:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:28:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:24:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T11:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T10:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T07:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T01:03:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T01:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-12T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-11T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-09T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-06T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-05T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-02T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-01T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-11T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-09T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-06T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-05T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-02T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-01T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-12T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-12T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-11-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-11T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-09T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-06T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-05T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-02T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-10-01T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-11T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-09T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-06T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-05T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-02T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-09-01T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-12T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2014-08-08T10:20:30Z",
"tree": null,
"paths": null
}
]

102
src/restic/testdata/expired_snapshots_5 vendored Normal file
View File

@ -0,0 +1,102 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-07T10:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T12:30:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T11:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T10:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T07:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T01:03:03Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-13T10:20:30Z",
"tree": null,
"paths": null
}
]

17
src/restic/testdata/expired_snapshots_6 vendored Normal file
View File

@ -0,0 +1,17 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
}
]

52
src/restic/testdata/expired_snapshots_7 vendored Normal file
View File

@ -0,0 +1,52 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-07T10:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T07:08:03Z",
"tree": null,
"paths": null
}
]

152
src/restic/testdata/expired_snapshots_8 vendored Normal file
View File

@ -0,0 +1,152 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-07T10:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-01T07:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-21T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-18T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-15T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-13T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-12T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-20T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-11T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-10T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-09T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-08T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-06T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-05T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-02T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-10-01T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-09-22T10:20:30Z",
"tree": null,
"paths": null
}
]

52
src/restic/testdata/expired_snapshots_9 vendored Normal file
View File

@ -0,0 +1,52 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-12T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-07T10:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
}
]

162
src/restic/testdata/filter_snapshots_0 vendored Normal file
View File

@ -0,0 +1,162 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": [
"/usr",
"/sbin"
],
"hostname": "foo",
"username": "root"
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": [
"/usr",
"/sbin"
],
"hostname": "foo",
"username": "root"
},
{
"time": "2016-01-07T10:02:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T12:30:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T12:28:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T12:24:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T12:23:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T11:23:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T10:23:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-01T01:02:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
}
]

32
src/restic/testdata/filter_snapshots_1 vendored Normal file
View File

@ -0,0 +1,32 @@
[
{
"time": "2016-01-12T21:02:03Z",
"tree": null,
"paths": [
"/usr",
"/sbin"
],
"hostname": "bar",
"username": "root"
},
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": [
"/usr",
"/sbin"
],
"hostname": "foo",
"username": "root"
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": [
"/usr",
"/sbin"
],
"hostname": "foo",
"username": "root"
}
]

22
src/restic/testdata/filter_snapshots_2 vendored Normal file
View File

@ -0,0 +1,22 @@
[
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": [
"/usr",
"/sbin"
],
"hostname": "foo",
"username": "root"
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": [
"/usr",
"/sbin"
],
"hostname": "foo",
"username": "root"
}
]

162
src/restic/testdata/filter_snapshots_3 vendored Normal file
View File

@ -0,0 +1,162 @@
[
{
"time": "2016-01-18T12:02:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-12T21:08:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-07T10:02:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T12:30:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T12:28:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T12:24:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T12:23:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T11:23:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-04T10:23:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
},
{
"time": "2016-01-01T07:08:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "bar",
"username": "testuser"
},
{
"time": "2016-01-01T01:03:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "bar",
"username": "testuser"
},
{
"time": "2016-01-01T01:02:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "foo",
"username": "testuser"
}
]

22
src/restic/testdata/filter_snapshots_4 vendored Normal file
View File

@ -0,0 +1,22 @@
[
{
"time": "2016-01-01T07:08:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "bar",
"username": "testuser"
},
{
"time": "2016-01-01T01:03:03Z",
"tree": null,
"paths": [
"/usr",
"/bin"
],
"hostname": "bar",
"username": "testuser"
}
]

22
src/restic/testdata/filter_snapshots_5 vendored Normal file
View File

@ -0,0 +1,22 @@
[
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": [
"/usr",
"/sbin"
],
"hostname": "foo",
"username": "root"
},
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": [
"/usr",
"/sbin"
],
"hostname": "foo",
"username": "root"
}
]

View File

@ -0,0 +1,37 @@
{"ID":"087e8d5f45f93a78e52a938ac0b7864f92f8910091c0da69201a156242df3b78","Type":"data"}
{"ID":"0bf505951741c44714527d252313b6959ce4f19d2e5512fca1c1b2da14424da3","Type":"data"}
{"ID":"0c82d00e6ee78b48559cda2f9cc909beeb8769183b115dfda0a5767832accc8d","Type":"data"}
{"ID":"2941bfd03b8933bb150b085a2252b69675495af64523bf8d38e67429e7cccb45","Type":"data"}
{"ID":"378a9b6862c8fa5c6915f158d16e4416243159bb9da44c564896c065bc6c1cf4","Type":"data"}
{"ID":"3ffcf5128fc404c2a363e3e8a8d4c8a7ae8c36fcacba7fdfe71ec9dabcadd567","Type":"data"}
{"ID":"40f5ca234e5eed1dc967c83fa99076ef636619148082f300cf877676728ebf14","Type":"data"}
{"ID":"42aad1ab6cc964043e53e5da13ed0f2b44a3bf6ae7702f60a805f13028377524","Type":"data"}
{"ID":"42bc8f509dbd6b9881cab4c1684d5cf74207046336f654db1b884197f15cae7b","Type":"data"}
{"ID":"47cf470c1c6de9af00b3b1ee963de8b94f51a2870b3338b3f33cfc565c0f8be4","Type":"data"}
{"ID":"587045d0ec69e47a3cc91b13c959aa80add9118ecfac47232ea992650f25f0b9","Type":"data"}
{"ID":"615e8851030f318751f3c8baf8fbfa9958e2dd7f25dc1a87dcf6d6f79d1f1a9f","Type":"data"}
{"ID":"63ec5e835e11203bbeef69095523344dd975f1ab52bdbf4a1db7a53914d967ca","Type":"tree"}
{"ID":"714f9e16404b9ec83de56715e5387b2c4c2ed0af1889166a4e767822f971bf52","Type":"data"}
{"ID":"80ba9a145bf46cae605e911c18165c02213e8d11d68dc5b7824f259d17b7b6d0","Type":"data"}
{"ID":"86af714d79d18be1c9c0ae23cca9dbd7cef44530e253e80af5bd5c34eab09714","Type":"data"}
{"ID":"8a445cf5b6313cbe3b5872a55adde52aa8d1ae188f41d56f176e40a3137ac058","Type":"data"}
{"ID":"8e171f7367d1b68012ed1ceec8f54b7b9b8654ebaf63a760017c34d761b17878","Type":"tree"}
{"ID":"8e98f35e65fb42c85eb4a2ab4793e294148e3f318252cb850a896274d2aa90bc","Type":"data"}
{"ID":"9d65ba6443863394a8c6582fef4a8aaab2fb46417eef41f1792cdbdb38ee0b4c","Type":"data"}
{"ID":"9da502ea8e7a768ee0dbafdc613db3df4a7cd9c98af08328265c4d2e953e8efa","Type":"data"}
{"ID":"9f2899688d2f23391cfd86e7b6d326a54f352bb294160878178639aab4aa378f","Type":"tree"}
{"ID":"a2f3ccf973b3600c06c42dc3b867b263a788c18aa57f4448fea2525b7cbfd784","Type":"data"}
{"ID":"b2deaf9174086129ec3b9f79e05401fdb3baf8b75335addffac1950182d779df","Type":"data"}
{"ID":"b81870ebe27b98f6b8746349e8ea444c96bf2eaac5dbd6236175150ce579f46b","Type":"tree"}
{"ID":"bd4dacd46031b2b837bc9bd06145b0571156fa496408ce728c003ae50b265aaf","Type":"data"}
{"ID":"c0775cfc822f59524b4ed714d257607fd5f2c9f0dc9f65763a86ffc33aac325b","Type":"data"}
{"ID":"c3596f717c495d20c33561e991d4295550b6d7544687f2363e999bdc0266224d","Type":"data"}
{"ID":"c54c4899c4d7dcda8b9e597aebfbaf7d65c9c7a760527d77e7fc9894283d736e","Type":"data"}
{"ID":"ca51ecf1633896f852929cb2d56ad1b5bed4ab6055bdcf370ced4011bed164aa","Type":"data"}
{"ID":"ce8b656cead478c34060510962daf97cea52abde68bbef7934dd5c5513cf6f3b","Type":"data"}
{"ID":"dafbb65569781083b627de833fb931cf98401299a62d747f03d8fc135ab57279","Type":"data"}
{"ID":"e193d395410520580e76a5b89b8d23a1d162c0e28c52cb8194d409a74a120f7d","Type":"data"}
{"ID":"e752efd93f9850ba0cafbbac01bb283c10095ac923cdb8ff027393001123d406","Type":"tree"}
{"ID":"f728e5576d4ab63248c310396d67d9afa3267dd2dea3cfba690dbd04efe181fb","Type":"data"}
{"ID":"f75b6460b68d254f2195b08c606672fb55c05fb7bed7e16699b3231104b673ea","Type":"tree"}
{"ID":"fe19f084021bdac5a9a5d270042ff53ef36357dd0743318d0480dee1a43de266","Type":"data"}

View File

@ -0,0 +1,34 @@
{"ID":"011a951a9796979c2b515ef4209662013bd1f16a20a1b35d1d950d7408bdc8b4","Type":"tree"}
{"ID":"087e8d5f45f93a78e52a938ac0b7864f92f8910091c0da69201a156242df3b78","Type":"data"}
{"ID":"0bad18b7f2d82d7c9cf8e405262ad2f3dbe57928aa242c1070b917042a99072d","Type":"data"}
{"ID":"0bf505951741c44714527d252313b6959ce4f19d2e5512fca1c1b2da14424da3","Type":"data"}
{"ID":"0c82d00e6ee78b48559cda2f9cc909beeb8769183b115dfda0a5767832accc8d","Type":"data"}
{"ID":"2941bfd03b8933bb150b085a2252b69675495af64523bf8d38e67429e7cccb45","Type":"data"}
{"ID":"3ffcf5128fc404c2a363e3e8a8d4c8a7ae8c36fcacba7fdfe71ec9dabcadd567","Type":"data"}
{"ID":"40f5ca234e5eed1dc967c83fa99076ef636619148082f300cf877676728ebf14","Type":"data"}
{"ID":"42bc8f509dbd6b9881cab4c1684d5cf74207046336f654db1b884197f15cae7b","Type":"data"}
{"ID":"47cf470c1c6de9af00b3b1ee963de8b94f51a2870b3338b3f33cfc565c0f8be4","Type":"data"}
{"ID":"4b2e91022c34c756b7bd8ece046a2bab6f0dcad89f46c52d1f84cd48e8da55df","Type":"tree"}
{"ID":"6416bc2321cdeb8758188af2b3925f2c82ffde014bf53b7a69c0f113a5c460fe","Type":"data"}
{"ID":"714f9e16404b9ec83de56715e5387b2c4c2ed0af1889166a4e767822f971bf52","Type":"data"}
{"ID":"80ba9a145bf46cae605e911c18165c02213e8d11d68dc5b7824f259d17b7b6d0","Type":"data"}
{"ID":"83bf0196cf45bbca0be7e292688a3622af7888c0e9ec01bb78edaff302cced06","Type":"data"}
{"ID":"8a445cf5b6313cbe3b5872a55adde52aa8d1ae188f41d56f176e40a3137ac058","Type":"data"}
{"ID":"8e98f35e65fb42c85eb4a2ab4793e294148e3f318252cb850a896274d2aa90bc","Type":"data"}
{"ID":"907acef01e05c3e0140858423e9284ddd3d64145ba8b0c3293371c5c7ab3d6b7","Type":"data"}
{"ID":"9d65ba6443863394a8c6582fef4a8aaab2fb46417eef41f1792cdbdb38ee0b4c","Type":"data"}
{"ID":"9da502ea8e7a768ee0dbafdc613db3df4a7cd9c98af08328265c4d2e953e8efa","Type":"data"}
{"ID":"a2f3ccf973b3600c06c42dc3b867b263a788c18aa57f4448fea2525b7cbfd784","Type":"data"}
{"ID":"b2deaf9174086129ec3b9f79e05401fdb3baf8b75335addffac1950182d779df","Type":"data"}
{"ID":"b3915971171e049292e28d7bc61fe362e94f73aa49b578f4ca1322b47d7fc39c","Type":"data"}
{"ID":"bd4dacd46031b2b837bc9bd06145b0571156fa496408ce728c003ae50b265aaf","Type":"data"}
{"ID":"c3596f717c495d20c33561e991d4295550b6d7544687f2363e999bdc0266224d","Type":"data"}
{"ID":"c54c4899c4d7dcda8b9e597aebfbaf7d65c9c7a760527d77e7fc9894283d736e","Type":"data"}
{"ID":"ca51ecf1633896f852929cb2d56ad1b5bed4ab6055bdcf370ced4011bed164aa","Type":"data"}
{"ID":"cb8001715217b4f6960aa24c1abb4b60a20c10f23abc1e5f69e0f5436bd788c8","Type":"data"}
{"ID":"d39c4c264e01ec47b0386da3775c6b0cc337974627ff55792938cca4895ac6c4","Type":"data"}
{"ID":"dafbb65569781083b627de833fb931cf98401299a62d747f03d8fc135ab57279","Type":"data"}
{"ID":"e193d395410520580e76a5b89b8d23a1d162c0e28c52cb8194d409a74a120f7d","Type":"data"}
{"ID":"e791912a7fad8954c764fae41d2958d2feeae2278e403429add9119ab43a36f5","Type":"tree"}
{"ID":"f728e5576d4ab63248c310396d67d9afa3267dd2dea3cfba690dbd04efe181fb","Type":"data"}
{"ID":"fe19f084021bdac5a9a5d270042ff53ef36357dd0743318d0480dee1a43de266","Type":"data"}

View File

@ -0,0 +1,9 @@
{"ID":"35e13e123748cd27d1634c4e07e5ff2fc86901b09b215f3125331d1226c782be","Type":"tree"}
{"ID":"378a9b6862c8fa5c6915f158d16e4416243159bb9da44c564896c065bc6c1cf4","Type":"data"}
{"ID":"42aad1ab6cc964043e53e5da13ed0f2b44a3bf6ae7702f60a805f13028377524","Type":"data"}
{"ID":"47cf470c1c6de9af00b3b1ee963de8b94f51a2870b3338b3f33cfc565c0f8be4","Type":"data"}
{"ID":"615e8851030f318751f3c8baf8fbfa9958e2dd7f25dc1a87dcf6d6f79d1f1a9f","Type":"data"}
{"ID":"83bf0196cf45bbca0be7e292688a3622af7888c0e9ec01bb78edaff302cced06","Type":"data"}
{"ID":"9d65ba6443863394a8c6582fef4a8aaab2fb46417eef41f1792cdbdb38ee0b4c","Type":"data"}
{"ID":"b3915971171e049292e28d7bc61fe362e94f73aa49b578f4ca1322b47d7fc39c","Type":"data"}
{"ID":"c0775cfc822f59524b4ed714d257607fd5f2c9f0dc9f65763a86ffc33aac325b","Type":"data"}

View File

@ -1,6 +1,7 @@
package restic package restic
import ( import (
"encoding/json"
"fmt" "fmt"
"io" "io"
"math/rand" "math/rand"
@ -13,86 +14,23 @@ import (
"github.com/restic/chunker" "github.com/restic/chunker"
) )
type randReader struct {
rnd *rand.Rand
buf []byte
}
func newRandReader(rnd *rand.Rand) io.Reader {
return &randReader{rnd: rnd, buf: make([]byte, 0, 7)}
}
func (rd *randReader) read(p []byte) (n int, err error) {
if len(p)%7 != 0 {
panic("invalid buffer length, not multiple of 7")
}
rnd := rd.rnd
for i := 0; i < len(p); i += 7 {
val := rnd.Int63()
p[i+0] = byte(val >> 0)
p[i+1] = byte(val >> 8)
p[i+2] = byte(val >> 16)
p[i+3] = byte(val >> 24)
p[i+4] = byte(val >> 32)
p[i+5] = byte(val >> 40)
p[i+6] = byte(val >> 48)
}
return len(p), nil
}
func (rd *randReader) Read(p []byte) (int, error) {
// first, copy buffer to p
pos := copy(p, rd.buf)
copy(rd.buf, rd.buf[pos:])
// shorten buf and p accordingly
rd.buf = rd.buf[:len(rd.buf)-pos]
p = p[pos:]
// if this is enough to fill p, return
if len(p) == 0 {
return pos, nil
}
// load multiple of 7 byte
l := (len(p) / 7) * 7
n, err := rd.read(p[:l])
pos += n
if err != nil {
return pos, err
}
p = p[n:]
// load 7 byte to temp buffer
rd.buf = rd.buf[:7]
n, err = rd.read(rd.buf)
if err != nil {
return pos, err
}
// copy the remaining bytes from the buffer to p
n = copy(p, rd.buf)
pos += n
// save the remaining bytes in rd.buf
n = copy(rd.buf, rd.buf[n:])
rd.buf = rd.buf[:n]
return pos, nil
}
// fakeFile returns a reader which yields deterministic pseudo-random data. // fakeFile returns a reader which yields deterministic pseudo-random data.
func fakeFile(t testing.TB, seed, size int64) io.Reader { func fakeFile(t testing.TB, seed, size int64) io.Reader {
return io.LimitReader(newRandReader(rand.New(rand.NewSource(seed))), size) return io.LimitReader(repository.NewRandReader(rand.New(rand.NewSource(seed))), size)
}
type fakeFileSystem struct {
t testing.TB
repo *repository.Repository
knownBlobs backend.IDSet
duplication float32
} }
// saveFile reads from rd and saves the blobs in the repository. The list of // saveFile reads from rd and saves the blobs in the repository. The list of
// IDs is returned. // IDs is returned.
func saveFile(t testing.TB, repo *repository.Repository, rd io.Reader) (blobs backend.IDs) { func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs backend.IDs) {
ch := chunker.New(rd, repo.Config.ChunkerPolynomial) blobs = backend.IDs{}
ch := chunker.New(rd, fs.repo.Config.ChunkerPolynomial)
for { for {
chunk, err := ch.Next(getBuf()) chunk, err := ch.Next(getBuf())
@ -101,47 +39,107 @@ func saveFile(t testing.TB, repo *repository.Repository, rd io.Reader) (blobs ba
} }
if err != nil { if err != nil {
t.Fatalf("unabel to save chunk in repo: %v", err) fs.t.Fatalf("unable to save chunk in repo: %v", err)
} }
id, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil) id := backend.Hash(chunk.Data)
if err != nil { if !fs.blobIsKnown(id, pack.Data) {
t.Fatalf("error saving chunk: %v", err) _, err := fs.repo.SaveAndEncrypt(pack.Data, chunk.Data, &id)
if err != nil {
fs.t.Fatalf("error saving chunk: %v", err)
}
fs.knownBlobs.Insert(id)
} }
freeBuf(chunk.Data)
blobs = append(blobs, id) blobs = append(blobs, id)
} }
return blobs return blobs
} }
const maxFileSize = 1500000 const (
const maxSeed = 100 maxFileSize = 1500000
maxSeed = 32
maxNodes = 32
)
func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, backend.ID) {
data, err := json.Marshal(tree)
if err != nil {
fs.t.Fatalf("json.Marshal(tree) returned error: %v", err)
return false, backend.ID{}
}
data = append(data, '\n')
id := backend.Hash(data)
return fs.blobIsKnown(id, pack.Tree), id
}
func (fs fakeFileSystem) blobIsKnown(id backend.ID, t pack.BlobType) bool {
if rand.Float32() < fs.duplication {
return false
}
if fs.knownBlobs.Has(id) {
return true
}
if fs.repo.Index().Has(id, t) {
return true
}
fs.knownBlobs.Insert(id)
return false
}
// saveTree saves a tree of fake files in the repo and returns the ID. // saveTree saves a tree of fake files in the repo and returns the ID.
func saveTree(t testing.TB, repo *repository.Repository, seed int64) backend.ID { func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID {
rnd := rand.NewSource(seed) rnd := rand.NewSource(seed)
numNodes := int(rnd.Int63() % 64) numNodes := int(rnd.Int63() % maxNodes)
t.Logf("create %v nodes", numNodes)
var tree Tree var tree Tree
for i := 0; i < numNodes; i++ { for i := 0; i < numNodes; i++ {
seed := rnd.Int63() % maxSeed
size := rnd.Int63() % maxFileSize
node := &Node{ // randomly select the type of the node, either tree (p = 1/4) or file (p = 3/4).
Name: fmt.Sprintf("file-%v", seed), if depth > 1 && rnd.Int63()%4 == 0 {
Type: "file", treeSeed := rnd.Int63() % maxSeed
Mode: 0644, id := fs.saveTree(treeSeed, depth-1)
Size: uint64(size),
node := &Node{
Name: fmt.Sprintf("dir-%v", treeSeed),
Type: "dir",
Mode: 0755,
Subtree: &id,
}
tree.Nodes = append(tree.Nodes, node)
continue
} }
node.Content = saveFile(t, repo, fakeFile(t, seed, size)) fileSeed := rnd.Int63() % maxSeed
fileSize := (maxFileSize / maxSeed) * fileSeed
node := &Node{
Name: fmt.Sprintf("file-%v", fileSeed),
Type: "file",
Mode: 0644,
Size: uint64(fileSize),
}
node.Content = fs.saveFile(fakeFile(fs.t, fileSeed, fileSize))
tree.Nodes = append(tree.Nodes, node) tree.Nodes = append(tree.Nodes, node)
} }
id, err := repo.SaveJSON(pack.Tree, tree) if known, id := fs.treeIsKnown(&tree); known {
return id
}
id, err := fs.repo.SaveJSON(pack.Tree, tree)
if err != nil { if err != nil {
t.Fatal(err) fs.t.Fatal(err)
} }
return id return id
@ -149,8 +147,13 @@ func saveTree(t testing.TB, repo *repository.Repository, seed int64) backend.ID
// TestCreateSnapshot creates a snapshot filled with fake data. The // TestCreateSnapshot creates a snapshot filled with fake data. The
// fake data is generated deterministically from the timestamp `at`, which is // fake data is generated deterministically from the timestamp `at`, which is
// also used as the snapshot's timestamp. // also used as the snapshot's timestamp. The tree's depth can be specified
func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time) backend.ID { // with the parameter depth. The parameter duplication is a probability that
// the same blob will saved again.
func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, depth int, duplication float32) *Snapshot {
seed := at.Unix()
t.Logf("create fake snapshot at %s with seed %d", at, seed)
fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05")) fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05"))
snapshot, err := NewSnapshot([]string{fakedir}) snapshot, err := NewSnapshot([]string{fakedir})
if err != nil { if err != nil {
@ -158,7 +161,14 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time)
} }
snapshot.Time = at snapshot.Time = at
treeID := saveTree(t, repo, at.UnixNano()) fs := fakeFileSystem{
t: t,
repo: repo,
knownBlobs: backend.NewIDSet(),
duplication: duplication,
}
treeID := fs.saveTree(seed, depth)
snapshot.Tree = &treeID snapshot.Tree = &treeID
id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot) id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot)
@ -166,6 +176,8 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time)
t.Fatal(err) t.Fatal(err)
} }
snapshot.id = &id
t.Logf("saved snapshot %v", id.Str()) t.Logf("saved snapshot %v", id.Str())
err = repo.Flush() err = repo.Flush()
@ -178,5 +190,22 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time)
t.Fatal(err) t.Fatal(err)
} }
return id return snapshot
}
// TestResetRepository removes all packs and indexes from the repository.
func TestResetRepository(t testing.TB, repo *repository.Repository) {
done := make(chan struct{})
defer close(done)
for _, tpe := range []backend.Type{backend.Snapshot, backend.Index, backend.Data} {
for id := range repo.Backend().List(tpe, done) {
err := repo.Backend().Remove(tpe, id)
if err != nil {
t.Errorf("removing %v (%v) failed: %v", id[0:12], tpe, err)
}
}
}
repo.SetIndex(repository.NewMasterIndex())
} }

View File

@ -10,14 +10,17 @@ import (
var testSnapshotTime = time.Unix(1460289341, 207401672) var testSnapshotTime = time.Unix(1460289341, 207401672)
const testCreateSnapshots = 3 const (
testCreateSnapshots = 3
testDepth = 2
)
func TestCreateSnapshot(t *testing.T) { func TestCreateSnapshot(t *testing.T) {
repo, cleanup := repository.TestRepository(t) repo, cleanup := repository.TestRepository(t)
defer cleanup() defer cleanup()
for i := 0; i < testCreateSnapshots; i++ { for i := 0; i < testCreateSnapshots; i++ {
restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second)) restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth, 0)
} }
snapshots, err := restic.LoadAllSnapshots(repo) snapshots, err := restic.LoadAllSnapshots(repo)
@ -42,30 +45,17 @@ func TestCreateSnapshot(t *testing.T) {
t.Fatalf("snapshot has zero tree ID") t.Fatalf("snapshot has zero tree ID")
} }
chkr := checker.New(repo) checker.TestCheckRepo(t, repo)
}
hints, errs := chkr.LoadIndex() func BenchmarkCreateSnapshot(b *testing.B) {
if len(errs) != 0 { repo, cleanup := repository.TestRepository(b)
t.Fatalf("errors loading index: %v", errs) defer cleanup()
}
if len(hints) != 0 { b.ResetTimer()
t.Fatalf("errors loading index: %v", hints)
}
done := make(chan struct{}) for i := 0; i < b.N; i++ {
defer close(done) restic.TestCreateSnapshot(b, repo, testSnapshotTime, testDepth, 0)
errChan := make(chan error) restic.TestResetRepository(b, repo)
go chkr.Structure(errChan, done)
for err := range errChan {
t.Error(err)
}
errChan = make(chan error)
go chkr.ReadData(nil, errChan, done)
for err := range errChan {
t.Error(err)
} }
} }

View File

@ -0,0 +1,20 @@
package types
import (
"restic/backend"
"restic/pack"
)
// Repository manages encrypted and packed data stored in a backend.
type Repository interface {
LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error
SaveJSONUnpacked(backend.Type, interface{}) (backend.ID, error)
Lister
}
// Lister combines lists packs in a repo and blobs in a pack.
type Lister interface {
List(backend.Type, <-chan struct{}) <-chan backend.ID
ListPack(backend.ID) ([]pack.Blob, int64, error)
}