2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-16 18:15:20 +00:00

Merge pull request #203 from restic/add-locking

Add locking
This commit is contained in:
Alexander Neumann 2015-06-28 22:52:46 +02:00
commit 9e10c21c12
27 changed files with 924 additions and 196 deletions

View File

@ -3,7 +3,6 @@ package restic_test
import ( import (
"bytes" "bytes"
"crypto/sha256" "crypto/sha256"
"flag"
"io" "io"
"testing" "testing"
@ -15,7 +14,6 @@ import (
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
) )
var benchArchiveDirectory = flag.String("test.benchdir", ".", "benchmark archiving a real directory (default: .)")
var testPol = chunker.Pol(0x3DA3358B4DC173) var testPol = chunker.Pol(0x3DA3358B4DC173)
type Rdr interface { type Rdr interface {
@ -48,12 +46,12 @@ func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.K
} }
func BenchmarkChunkEncrypt(b *testing.B) { func BenchmarkChunkEncrypt(b *testing.B) {
repo := SetupRepo()
defer TeardownRepo(repo)
data := Random(23, 10<<20) // 10MiB data := Random(23, 10<<20) // 10MiB
rd := bytes.NewReader(data) rd := bytes.NewReader(data)
s := SetupRepo(b)
defer TeardownRepo(b, s)
buf := make([]byte, chunker.MaxSize) buf := make([]byte, chunker.MaxSize)
buf2 := make([]byte, chunker.MaxSize) buf2 := make([]byte, chunker.MaxSize)
@ -61,7 +59,7 @@ func BenchmarkChunkEncrypt(b *testing.B) {
b.SetBytes(int64(len(data))) b.SetBytes(int64(len(data)))
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
benchmarkChunkEncrypt(b, buf, buf2, rd, s.Key()) benchmarkChunkEncrypt(b, buf, buf2, rd, repo.Key())
} }
} }
@ -82,8 +80,8 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key)
} }
func BenchmarkChunkEncryptParallel(b *testing.B) { func BenchmarkChunkEncryptParallel(b *testing.B) {
s := SetupRepo(b) repo := SetupRepo()
defer TeardownRepo(b, s) defer TeardownRepo(repo)
data := Random(23, 10<<20) // 10MiB data := Random(23, 10<<20) // 10MiB
@ -95,25 +93,25 @@ func BenchmarkChunkEncryptParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) { b.RunParallel(func(pb *testing.PB) {
for pb.Next() { for pb.Next() {
rd := bytes.NewReader(data) rd := bytes.NewReader(data)
benchmarkChunkEncryptP(pb, buf, rd, s.Key()) benchmarkChunkEncryptP(pb, buf, rd, repo.Key())
} }
}) })
} }
func archiveDirectory(b testing.TB) { func archiveDirectory(b testing.TB) {
repo := SetupRepo(b) repo := SetupRepo()
defer TeardownRepo(b, repo) defer TeardownRepo(repo)
arch := restic.NewArchiver(repo) arch := restic.NewArchiver(repo)
_, id, err := arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil) _, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
OK(b, err) OK(b, err)
b.Logf("snapshot archived as %v", id) b.Logf("snapshot archived as %v", id)
} }
func TestArchiveDirectory(t *testing.T) { func TestArchiveDirectory(t *testing.T) {
if *benchArchiveDirectory == "" { if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiveDirectory") t.Skip("benchdir not set, skipping TestArchiveDirectory")
} }
@ -121,7 +119,7 @@ func TestArchiveDirectory(t *testing.T) {
} }
func BenchmarkArchiveDirectory(b *testing.B) { func BenchmarkArchiveDirectory(b *testing.B) {
if *benchArchiveDirectory == "" { if BenchArchiveDirectory == "" {
b.Skip("benchdir not set, skipping BenchmarkArchiveDirectory") b.Skip("benchdir not set, skipping BenchmarkArchiveDirectory")
} }
@ -131,13 +129,13 @@ func BenchmarkArchiveDirectory(b *testing.B) {
} }
func archiveWithDedup(t testing.TB) { func archiveWithDedup(t testing.TB) {
if *benchArchiveDirectory == "" { repo := SetupRepo()
defer TeardownRepo(repo)
if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiverDedup") t.Skip("benchdir not set, skipping TestArchiverDedup")
} }
repo := SetupRepo(t)
defer TeardownRepo(t, repo)
var cnt struct { var cnt struct {
before, after, after2 struct { before, after, after2 struct {
packs, dataBlobs, treeBlobs uint packs, dataBlobs, treeBlobs uint
@ -145,7 +143,7 @@ func archiveWithDedup(t testing.TB) {
} }
// archive a few files // archive a few files
sn := SnapshotDir(t, repo, *benchArchiveDirectory, nil) sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil)
t.Logf("archived snapshot %v", sn.ID().Str()) t.Logf("archived snapshot %v", sn.ID().Str())
// get archive stats // get archive stats
@ -156,7 +154,7 @@ func archiveWithDedup(t testing.TB) {
cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs) cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs)
// archive the same files again, without parent snapshot // archive the same files again, without parent snapshot
sn2 := SnapshotDir(t, repo, *benchArchiveDirectory, nil) sn2 := SnapshotDir(t, repo, BenchArchiveDirectory, nil)
t.Logf("archived snapshot %v", sn2.ID().Str()) t.Logf("archived snapshot %v", sn2.ID().Str())
// get archive stats again // get archive stats again
@ -173,7 +171,7 @@ func archiveWithDedup(t testing.TB) {
} }
// archive the same files again, with a parent snapshot // archive the same files again, with a parent snapshot
sn3 := SnapshotDir(t, repo, *benchArchiveDirectory, sn2.ID()) sn3 := SnapshotDir(t, repo, BenchArchiveDirectory, sn2.ID())
t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str()) t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())
// get archive stats again // get archive stats again
@ -195,23 +193,23 @@ func TestArchiveDedup(t *testing.T) {
} }
func BenchmarkLoadTree(t *testing.B) { func BenchmarkLoadTree(t *testing.B) {
if *benchArchiveDirectory == "" { repo := SetupRepo()
defer TeardownRepo(repo)
if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiverDedup") t.Skip("benchdir not set, skipping TestArchiverDedup")
} }
s := SetupRepo(t)
defer TeardownRepo(t, s)
// archive a few files // archive a few files
arch := restic.NewArchiver(s) arch := restic.NewArchiver(repo)
sn, _, err := arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil) sn, _, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
OK(t, err) OK(t, err)
t.Logf("archived snapshot %v", sn.ID()) t.Logf("archived snapshot %v", sn.ID())
list := make([]backend.ID, 0, 10) list := make([]backend.ID, 0, 10)
done := make(chan struct{}) done := make(chan struct{})
for blob := range s.Index().Each(done) { for blob := range repo.Index().Each(done) {
if blob.Type != pack.Tree { if blob.Type != pack.Tree {
continue continue
} }
@ -228,7 +226,7 @@ func BenchmarkLoadTree(t *testing.B) {
for i := 0; i < t.N; i++ { for i := 0; i < t.N; i++ {
for _, id := range list { for _, id := range list {
_, err := restic.LoadTree(s, id) _, err := restic.LoadTree(repo, id)
OK(t, err) OK(t, err)
} }
} }

View File

@ -8,8 +8,8 @@ import (
) )
func TestCache(t *testing.T) { func TestCache(t *testing.T) {
repo := SetupRepo(t) repo := SetupRepo()
defer TeardownRepo(t, repo) defer TeardownRepo(repo)
_, err := restic.NewCache(repo, "") _, err := restic.NewCache(repo, "")
OK(t, err) OK(t, err)
@ -17,7 +17,7 @@ func TestCache(t *testing.T) {
arch := restic.NewArchiver(repo) arch := restic.NewArchiver(repo)
// archive some files, this should automatically cache all blobs from the snapshot // archive some files, this should automatically cache all blobs from the snapshot
_, _, err = arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil) _, _, err = arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
// TODO: test caching index // TODO: test caching index
} }

View File

@ -5,12 +5,10 @@ import (
"crypto/md5" "crypto/md5"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"flag"
"hash" "hash"
"io" "io"
"io/ioutil" "io/ioutil"
"math/rand" "math/rand"
"os"
"testing" "testing"
"time" "time"
@ -18,8 +16,6 @@ import (
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
) )
var benchmarkFile = flag.String("bench.file", "", "read from this file for benchmark")
func parseDigest(s string) []byte { func parseDigest(s string) []byte {
d, err := hex.DecodeString(s) d, err := hex.DecodeString(s)
if err != nil { if err != nil {
@ -247,29 +243,8 @@ func TestChunkerWithoutHash(t *testing.T) {
} }
func benchmarkChunker(b *testing.B, hash hash.Hash) { func benchmarkChunker(b *testing.B, hash hash.Hash) {
var ( size := 10 * 1024 * 1024
rd io.ReadSeeker rd := bytes.NewReader(getRandom(23, size))
size int
)
if *benchmarkFile != "" {
b.Logf("using file %q for benchmark", *benchmarkFile)
f, err := os.Open(*benchmarkFile)
if err != nil {
b.Fatalf("open(%q): %v", *benchmarkFile, err)
}
fi, err := f.Stat()
if err != nil {
b.Fatalf("lstat(%q): %v", *benchmarkFile, err)
}
size = int(fi.Size())
rd = f
} else {
size = 10 * 1024 * 1024
rd = bytes.NewReader(getRandom(23, size))
}
b.ResetTimer() b.ResetTimer()
b.SetBytes(int64(size)) b.SetBytes(int64(size))

View File

@ -215,12 +215,18 @@ func (cmd CmdBackup) Execute(args []string) error {
target = append(target, d) target = append(target, d)
} }
s, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
if err != nil { if err != nil {
return err return err
} }
err = s.LoadIndex() lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
err = repo.LoadIndex()
if err != nil { if err != nil {
return err return err
} }
@ -229,7 +235,7 @@ func (cmd CmdBackup) Execute(args []string) error {
// Force using a parent // Force using a parent
if !cmd.Force && cmd.Parent != "" { if !cmd.Force && cmd.Parent != "" {
parentSnapshotID, err = restic.FindSnapshot(s, cmd.Parent) parentSnapshotID, err = restic.FindSnapshot(repo, cmd.Parent)
if err != nil { if err != nil {
return fmt.Errorf("invalid id %q: %v", cmd.Parent, err) return fmt.Errorf("invalid id %q: %v", cmd.Parent, err)
} }
@ -239,7 +245,7 @@ func (cmd CmdBackup) Execute(args []string) error {
// Find last snapshot to set it as parent, if not already set // Find last snapshot to set it as parent, if not already set
if !cmd.Force && parentSnapshotID == nil { if !cmd.Force && parentSnapshotID == nil {
parentSnapshotID, err = findLatestSnapshot(s, target) parentSnapshotID, err = findLatestSnapshot(repo, target)
if err != nil { if err != nil {
return err return err
} }
@ -258,7 +264,7 @@ func (cmd CmdBackup) Execute(args []string) error {
// return true // return true
// } // }
arch := restic.NewArchiver(s) arch := restic.NewArchiver(repo)
arch.Error = func(dir string, fi os.FileInfo, err error) error { arch.Error = func(dir string, fi os.FileInfo, err error) error {
// TODO: make ignoring errors configurable // TODO: make ignoring errors configurable

View File

@ -29,18 +29,24 @@ func (cmd CmdCache) Execute(args []string) error {
// return fmt.Errorf("wrong number of parameters, Usage: %s", cmd.Usage()) // return fmt.Errorf("wrong number of parameters, Usage: %s", cmd.Usage())
// } // }
s, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
if err != nil { if err != nil {
return err return err
} }
cache, err := restic.NewCache(s, cmd.global.CacheDir) lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
cache, err := restic.NewCache(repo, cmd.global.CacheDir)
if err != nil { if err != nil {
return err return err
} }
fmt.Printf("clear cache for old snapshots\n") fmt.Printf("clear cache for old snapshots\n")
err = cache.Clear(s) err = cache.Clear(repo)
if err != nil { if err != nil {
return err return err
} }

View File

@ -37,7 +37,13 @@ func (cmd CmdCat) Execute(args []string) error {
return fmt.Errorf("type or ID not specified, Usage: %s", cmd.Usage()) return fmt.Errorf("type or ID not specified, Usage: %s", cmd.Usage())
} }
s, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil { if err != nil {
return err return err
} }
@ -55,7 +61,7 @@ func (cmd CmdCat) Execute(args []string) error {
} }
// find snapshot id with prefix // find snapshot id with prefix
id, err = restic.FindSnapshot(s, args[1]) id, err = restic.FindSnapshot(repo, args[1])
if err != nil { if err != nil {
return err return err
} }
@ -65,7 +71,7 @@ func (cmd CmdCat) Execute(args []string) error {
// handle all types that don't need an index // handle all types that don't need an index
switch tpe { switch tpe {
case "config": case "config":
buf, err := json.MarshalIndent(s.Config, "", " ") buf, err := json.MarshalIndent(repo.Config, "", " ")
if err != nil { if err != nil {
return err return err
} }
@ -73,7 +79,7 @@ func (cmd CmdCat) Execute(args []string) error {
fmt.Println(string(buf)) fmt.Println(string(buf))
return nil return nil
case "index": case "index":
buf, err := s.Load(backend.Index, id) buf, err := repo.Load(backend.Index, id)
if err != nil { if err != nil {
return err return err
} }
@ -83,7 +89,7 @@ func (cmd CmdCat) Execute(args []string) error {
case "snapshot": case "snapshot":
sn := &restic.Snapshot{} sn := &restic.Snapshot{}
err = s.LoadJSONUnpacked(backend.Snapshot, id, sn) err = repo.LoadJSONUnpacked(backend.Snapshot, id, sn)
if err != nil { if err != nil {
return err return err
} }
@ -97,7 +103,7 @@ func (cmd CmdCat) Execute(args []string) error {
return nil return nil
case "key": case "key":
rd, err := s.Backend().Get(backend.Key, id.String()) rd, err := repo.Backend().Get(backend.Key, id.String())
if err != nil { if err != nil {
return err return err
} }
@ -118,7 +124,7 @@ func (cmd CmdCat) Execute(args []string) error {
fmt.Println(string(buf)) fmt.Println(string(buf))
return nil return nil
case "masterkey": case "masterkey":
buf, err := json.MarshalIndent(s.Key(), "", " ") buf, err := json.MarshalIndent(repo.Key(), "", " ")
if err != nil { if err != nil {
return err return err
} }
@ -126,18 +132,30 @@ func (cmd CmdCat) Execute(args []string) error {
fmt.Println(string(buf)) fmt.Println(string(buf))
return nil return nil
case "lock": case "lock":
return errors.New("not yet implemented") lock, err := restic.LoadLock(repo, id)
if err != nil {
return err
}
buf, err := json.MarshalIndent(&lock, "", " ")
if err != nil {
return err
}
fmt.Println(string(buf))
return nil
} }
// load index, handle all the other types // load index, handle all the other types
err = s.LoadIndex() err = repo.LoadIndex()
if err != nil { if err != nil {
return err return err
} }
switch tpe { switch tpe {
case "pack": case "pack":
rd, err := s.Backend().Get(backend.Data, id.String()) rd, err := repo.Backend().Get(backend.Data, id.String())
if err != nil { if err != nil {
return err return err
} }
@ -146,7 +164,7 @@ func (cmd CmdCat) Execute(args []string) error {
return err return err
case "blob": case "blob":
data, err := s.LoadBlob(pack.Data, id) data, err := repo.LoadBlob(pack.Data, id)
if err == nil { if err == nil {
_, err = os.Stdout.Write(data) _, err = os.Stdout.Write(data)
return err return err
@ -158,7 +176,7 @@ func (cmd CmdCat) Execute(args []string) error {
case "tree": case "tree":
debug.Log("cat", "cat tree %v", id.Str()) debug.Log("cat", "cat tree %v", id.Str())
tree := restic.NewTree() tree := restic.NewTree()
err = s.LoadJSONPack(pack.Tree, id, tree) err = repo.LoadJSONPack(pack.Tree, id, tree)
if err != nil { if err != nil {
debug.Log("cat", "unable to load tree %v: %v", id.Str(), err) debug.Log("cat", "unable to load tree %v: %v", id.Str(), err)
return err return err

View File

@ -109,6 +109,12 @@ func (cmd CmdDump) Execute(args []string) error {
return err return err
} }
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
err = repo.LoadIndex() err = repo.LoadIndex()
if err != nil { if err != nil {
return err return err

View File

@ -157,7 +157,13 @@ func (c CmdFind) Execute(args []string) error {
} }
} }
s, err := c.global.OpenRepository() repo, err := c.global.OpenRepository()
if err != nil {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil { if err != nil {
return err return err
} }
@ -165,18 +171,18 @@ func (c CmdFind) Execute(args []string) error {
c.pattern = args[0] c.pattern = args[0]
if c.Snapshot != "" { if c.Snapshot != "" {
snapshotID, err := restic.FindSnapshot(s, c.Snapshot) snapshotID, err := restic.FindSnapshot(repo, c.Snapshot)
if err != nil { if err != nil {
return fmt.Errorf("invalid id %q: %v", args[1], err) return fmt.Errorf("invalid id %q: %v", args[1], err)
} }
return c.findInSnapshot(s, snapshotID) return c.findInSnapshot(repo, snapshotID)
} }
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
for snapshotID := range s.List(backend.Snapshot, done) { for snapshotID := range repo.List(backend.Snapshot, done) {
err := c.findInSnapshot(s, snapshotID) err := c.findInSnapshot(repo, snapshotID)
if err != nil { if err != nil {
return err return err

View File

@ -190,23 +190,29 @@ func (cmd CmdFsck) Execute(args []string) error {
cmd.Orphaned = true cmd.Orphaned = true
} }
s, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
if err != nil { if err != nil {
return err return err
} }
err = s.LoadIndex() lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
err = repo.LoadIndex()
if err != nil { if err != nil {
return err return err
} }
if cmd.Snapshot != "" { if cmd.Snapshot != "" {
id, err := restic.FindSnapshot(s, cmd.Snapshot) id, err := restic.FindSnapshot(repo, cmd.Snapshot)
if err != nil { if err != nil {
return fmt.Errorf("invalid id %q: %v", cmd.Snapshot, err) return fmt.Errorf("invalid id %q: %v", cmd.Snapshot, err)
} }
err = fsckSnapshot(cmd, s, id) err = fsckSnapshot(cmd, repo, id)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "check for snapshot %v failed\n", id) fmt.Fprintf(os.Stderr, "check for snapshot %v failed\n", id)
} }
@ -223,8 +229,8 @@ func (cmd CmdFsck) Execute(args []string) error {
defer close(done) defer close(done)
var firstErr error var firstErr error
for id := range s.List(backend.Snapshot, done) { for id := range repo.List(backend.Snapshot, done) {
err = fsckSnapshot(cmd, s, id) err = fsckSnapshot(cmd, repo, id)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "check for snapshot %v failed\n", id) fmt.Fprintf(os.Stderr, "check for snapshot %v failed\n", id)
firstErr = err firstErr = err
@ -241,7 +247,7 @@ func (cmd CmdFsck) Execute(args []string) error {
cnt[pack.Data] = cmd.o_data cnt[pack.Data] = cmd.o_data
cnt[pack.Tree] = cmd.o_trees cnt[pack.Tree] = cmd.o_trees
for blob := range s.Index().Each(done) { for blob := range repo.Index().Each(done) {
debug.Log("restic.fsck", "checking %v blob %v\n", blob.Type, blob.ID) debug.Log("restic.fsck", "checking %v blob %v\n", blob.Type, blob.ID)
err = cnt[blob.Type].Find(blob.ID) err = cnt[blob.Type].Find(blob.ID)

View File

@ -116,25 +116,49 @@ func (cmd CmdKey) Execute(args []string) error {
return fmt.Errorf("wrong number of arguments, Usage: %s", cmd.Usage()) return fmt.Errorf("wrong number of arguments, Usage: %s", cmd.Usage())
} }
s, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
if err != nil { if err != nil {
return err return err
} }
switch args[0] { switch args[0] {
case "list": case "list":
return cmd.listKeys(s) lock, err := lockRepo(repo)
case "add": defer unlockRepo(lock)
return cmd.addKey(s)
case "rm":
id, err := backend.Find(s.Backend(), backend.Key, args[1])
if err != nil { if err != nil {
return err return err
} }
return cmd.deleteKey(s, id) return cmd.listKeys(repo)
case "add":
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
return cmd.addKey(repo)
case "rm":
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
id, err := backend.Find(repo.Backend(), backend.Key, args[1])
if err != nil {
return err
}
return cmd.deleteKey(repo, id)
case "passwd": case "passwd":
return cmd.changePassword(s) lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
return cmd.changePassword(repo)
} }
return nil return nil

View File

@ -30,7 +30,13 @@ func (cmd CmdList) Execute(args []string) error {
return fmt.Errorf("type not specified, Usage: %s", cmd.Usage()) return fmt.Errorf("type not specified, Usage: %s", cmd.Usage())
} }
s, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil { if err != nil {
return err return err
} }
@ -38,12 +44,12 @@ func (cmd CmdList) Execute(args []string) error {
var t backend.Type var t backend.Type
switch args[0] { switch args[0] {
case "blobs": case "blobs":
err = s.LoadIndex() err = repo.LoadIndex()
if err != nil { if err != nil {
return err return err
} }
for blob := range s.Index().Each(nil) { for blob := range repo.Index().Each(nil) {
cmd.global.Printf("%s\n", blob.ID) cmd.global.Printf("%s\n", blob.ID)
} }
@ -62,7 +68,7 @@ func (cmd CmdList) Execute(args []string) error {
return errors.New("invalid type") return errors.New("invalid type")
} }
for id := range s.List(t, nil) { for id := range repo.List(t, nil) {
cmd.global.Printf("%s\n", id) cmd.global.Printf("%s\n", id)
} }

View File

@ -69,27 +69,27 @@ func (cmd CmdLs) Execute(args []string) error {
return fmt.Errorf("wrong number of arguments, Usage: %s", cmd.Usage()) return fmt.Errorf("wrong number of arguments, Usage: %s", cmd.Usage())
} }
s, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
if err != nil { if err != nil {
return err return err
} }
err = s.LoadIndex() err = repo.LoadIndex()
if err != nil { if err != nil {
return err return err
} }
id, err := restic.FindSnapshot(s, args[0]) id, err := restic.FindSnapshot(repo, args[0])
if err != nil { if err != nil {
return err return err
} }
sn, err := restic.LoadSnapshot(s, id) sn, err := restic.LoadSnapshot(repo, id)
if err != nil { if err != nil {
return err return err
} }
fmt.Printf("snapshot of %v at %s:\n", sn.Paths, sn.Time) fmt.Printf("snapshot of %v at %s:\n", sn.Paths, sn.Time)
return printTree("", s, sn.Tree) return printTree("", repo, sn.Tree)
} }

View File

@ -30,17 +30,23 @@ func (cmd CmdRestore) Execute(args []string) error {
return fmt.Errorf("wrong number of arguments, Usage: %s", cmd.Usage()) return fmt.Errorf("wrong number of arguments, Usage: %s", cmd.Usage())
} }
s, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
if err != nil { if err != nil {
return err return err
} }
err = s.LoadIndex() lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil { if err != nil {
return err return err
} }
id, err := restic.FindSnapshot(s, args[0]) err = repo.LoadIndex()
if err != nil {
return err
}
id, err := restic.FindSnapshot(repo, args[0])
if err != nil { if err != nil {
cmd.global.Exitf(1, "invalid id %q: %v", args[0], err) cmd.global.Exitf(1, "invalid id %q: %v", args[0], err)
} }
@ -48,7 +54,7 @@ func (cmd CmdRestore) Execute(args []string) error {
target := args[1] target := args[1]
// create restorer // create restorer
res, err := restic.NewRestorer(s, id) res, err := restic.NewRestorer(repo, id)
if err != nil { if err != nil {
cmd.global.Exitf(2, "creating restorer failed: %v\n", err) cmd.global.Exitf(2, "creating restorer failed: %v\n", err)
} }

View File

@ -94,7 +94,13 @@ func (cmd CmdSnapshots) Execute(args []string) error {
return fmt.Errorf("wrong number of arguments, usage: %s", cmd.Usage()) return fmt.Errorf("wrong number of arguments, usage: %s", cmd.Usage())
} }
s, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil { if err != nil {
return err return err
} }
@ -107,8 +113,8 @@ func (cmd CmdSnapshots) Execute(args []string) error {
defer close(done) defer close(done)
list := []*restic.Snapshot{} list := []*restic.Snapshot{}
for id := range s.List(backend.Snapshot, done) { for id := range repo.List(backend.Snapshot, done) {
sn, err := restic.LoadSnapshot(s, id) sn, err := restic.LoadSnapshot(repo, id)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "error loading snapshot %s: %v\n", id, err) fmt.Fprintf(os.Stderr, "error loading snapshot %s: %v\n", id, err)
continue continue
@ -127,7 +133,7 @@ func (cmd CmdSnapshots) Execute(args []string) error {
} }
} }
plen, err := s.PrefixLength(backend.Snapshot) plen, err := repo.PrefixLength(backend.Snapshot)
if err != nil { if err != nil {
return err return err
} }

43
cmd/restic/cmd_unlock.go Normal file
View File

@ -0,0 +1,43 @@
package main
import "github.com/restic/restic"
type CmdUnlock struct {
RemoveAll bool `long:"remove-all" description:"Remove all locks, even stale ones"`
global *GlobalOptions
}
func init() {
_, err := parser.AddCommand("unlock",
"remove locks",
"The unlock command checks for stale locks and removes them",
&CmdUnlock{global: &globalOpts})
if err != nil {
panic(err)
}
}
func (cmd CmdUnlock) Usage() string {
return "[unlock-options]"
}
func (cmd CmdUnlock) Execute(args []string) error {
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
fn := restic.RemoveStaleLocks
if cmd.RemoveAll {
fn = restic.RemoveAllLocks
}
err = fn(repo)
if err != nil {
return err
}
cmd.global.Verbosef("successfully removed locks\n")
return nil
}

93
cmd/restic/lock.go Normal file
View File

@ -0,0 +1,93 @@
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/restic/restic"
"github.com/restic/restic/debug"
"github.com/restic/restic/repository"
)
var globalLocks []*restic.Lock
func lockRepo(repo *repository.Repository) (*restic.Lock, error) {
return lockRepository(repo, false)
}
func lockRepoExclusive(repo *repository.Repository) (*restic.Lock, error) {
return lockRepository(repo, true)
}
func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock, error) {
lockFn := restic.NewLock
if exclusive {
lockFn = restic.NewExclusiveLock
}
lock, err := lockFn(repo)
if err != nil {
if restic.IsAlreadyLocked(err) {
tpe := ""
if exclusive {
tpe = " exclusive"
}
fmt.Fprintf(os.Stderr, "unable to acquire%s lock for operation:\n", tpe)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintf(os.Stderr, "\nthe `unlock` command can be used to remove stale locks\n")
os.Exit(1)
}
return nil, err
}
globalLocks = append(globalLocks, lock)
return lock, err
}
func unlockRepo(lock *restic.Lock) error {
if err := lock.Unlock(); err != nil {
return err
}
for i := 0; i < len(globalLocks); i++ {
if lock == globalLocks[i] {
globalLocks = append(globalLocks[:i], globalLocks[i+1:]...)
return nil
}
}
return nil
}
func unlockAll() error {
debug.Log("unlockAll", "unlocking %d locks", len(globalLocks))
for _, lock := range globalLocks {
if err := lock.Unlock(); err != nil {
return err
}
}
return nil
}
func init() {
c := make(chan os.Signal)
signal.Notify(c, syscall.SIGINT)
go CleanupHandler(c)
}
// CleanupHandler handles the SIGINT signal.
func CleanupHandler(c <-chan os.Signal) {
for s := range c {
debug.Log("CleanupHandler", "signal %v received, cleaning up", s)
fmt.Println("\x1b[2KInterrupt received, cleaning up")
unlockAll()
fmt.Println("exiting")
os.Exit(0)
}
}

View File

@ -2,7 +2,6 @@ package crypto_test
import ( import (
"bytes" "bytes"
"flag"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -13,13 +12,13 @@ import (
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
) )
var testLargeCrypto = flag.Bool("test.largecrypto", false, "also test crypto functions with large payloads") const testLargeCrypto = false
func TestEncryptDecrypt(t *testing.T) { func TestEncryptDecrypt(t *testing.T) {
k := crypto.NewRandomKey() k := crypto.NewRandomKey()
tests := []int{5, 23, 2<<18 + 23, 1 << 20} tests := []int{5, 23, 2<<18 + 23, 1 << 20}
if *testLargeCrypto { if testLargeCrypto {
tests = append(tests, 7<<20+123) tests = append(tests, 7<<20+123)
} }
@ -117,7 +116,7 @@ func TestCornerCases(t *testing.T) {
} }
func TestLargeEncrypt(t *testing.T) { func TestLargeEncrypt(t *testing.T) {
if !*testLargeCrypto { if !testLargeCrypto {
t.SkipNow() t.SkipNow()
} }
@ -252,7 +251,7 @@ func TestEncryptStreamWriter(t *testing.T) {
k := crypto.NewRandomKey() k := crypto.NewRandomKey()
tests := []int{5, 23, 2<<18 + 23, 1 << 20} tests := []int{5, 23, 2<<18 + 23, 1 << 20}
if *testLargeCrypto { if testLargeCrypto {
tests = append(tests, 7<<20+123) tests = append(tests, 7<<20+123)
} }
@ -286,7 +285,7 @@ func TestDecryptStreamReader(t *testing.T) {
k := crypto.NewRandomKey() k := crypto.NewRandomKey()
tests := []int{5, 23, 2<<18 + 23, 1 << 20} tests := []int{5, 23, 2<<18 + 23, 1 << 20}
if *testLargeCrypto { if testLargeCrypto {
tests = append(tests, 7<<20+123) tests = append(tests, 7<<20+123)
} }
@ -320,7 +319,7 @@ func TestEncryptWriter(t *testing.T) {
k := crypto.NewRandomKey() k := crypto.NewRandomKey()
tests := []int{5, 23, 2<<18 + 23, 1 << 20} tests := []int{5, 23, 2<<18 + 23, 1 << 20}
if *testLargeCrypto { if testLargeCrypto {
tests = append(tests, 7<<20+123) tests = append(tests, 7<<20+123)
} }

View File

@ -375,6 +375,47 @@ As can be seen from the output of the program `sha256sum`, the hash matches the
plaintext hash from the map included in the tree above, so the correct data has plaintext hash from the map included in the tree above, so the correct data has
been returned. been returned.
Locks
-----
The restic repository structure is designed in a way that allows parallel
access of multiple instance of restic and even parallel writes. However, there
are some functions that work more efficient or even require exclusive access of
the repository. In order to implement these functions, restic processes are
required to create a lock on the repository before doing anything.
Locks come in two types: Exclusive and non-exclusive locks. At most one
process can have an exclusive lock on the repository, and during that time
there mustn't be any other locks (exclusive and non-exclusive). There may be
multiple non-exclusive locks in parallel.
A lock is a file in the subdir `locks` whose filename is the storage ID of
the contents. It is encrypted and authenticated the same way as other files
in the repository and contains the following JSON structure:
{
"time": "2015-06-27T12:18:51.759239612+02:00",
"exclusive": false,
"hostname": "kasimir",
"username": "fd0",
"pid": 13607,
"uid": 1000,
"gid": 100
}
The field `exclusive` defines the type of lock. When a new lock is to be
created, restic checks all locks in the repository. When a lock is found, it
is tested if the lock is stale, which is the case for locks with timestamps
older than 30 minutes. If the lock was created on the same machine, even for
younger locks it is tested whether the process is still alive by sending a
signal to it. If that fails, restic assumes that the process is dead and
considers the lock to be stale.
When a new lock is to be created and no other conflicting locks are
detected, restic creates a new lock, waits, and checks if other locks
appeared in the repository. Depending on the type of the other locks and the
lock to be created, restic either continues or fails.
Backups and Deduplication Backups and Deduplication
========================= =========================

282
lock.go Normal file
View File

@ -0,0 +1,282 @@
package restic
import (
"fmt"
"os"
"os/signal"
"os/user"
"strconv"
"sync"
"syscall"
"time"
"github.com/restic/restic/backend"
"github.com/restic/restic/debug"
"github.com/restic/restic/repository"
)
// Lock represents a process locking the repository for an operation.
//
// There are two types of locks: exclusive and non-exclusive. There may be many
// different non-exclusive locks, but at most one exclusive lock, which can
// only be acquired while no non-exclusive lock is held.
type Lock struct {
Time time.Time `json:"time"`
Exclusive bool `json:"exclusive"`
Hostname string `json:"hostname"`
Username string `json:"username"`
PID int `json:"pid"`
UID uint32 `json:"uid,omitempty"`
GID uint32 `json:"gid,omitempty"`
repo *repository.Repository
lockID backend.ID
}
// ErrAlreadyLocked is returned when NewLock or NewExclusiveLock are unable to
// acquire the desired lock.
type ErrAlreadyLocked struct {
otherLock *Lock
}
func (e ErrAlreadyLocked) Error() string {
return fmt.Sprintf("repository is already locked by %v", e.otherLock)
}
// IsAlreadyLocked returns true iff err is an instance of ErrAlreadyLocked.
func IsAlreadyLocked(err error) bool {
if _, ok := err.(ErrAlreadyLocked); ok {
return true
}
return false
}
// NewLock returns a new, non-exclusive lock for the repository. If an
// exclusive lock is already held by another process, ErrAlreadyLocked is
// returned.
func NewLock(repo *repository.Repository) (*Lock, error) {
return newLock(repo, false)
}
// NewExclusiveLock returns a new, exclusive lock for the repository. If
// another lock (normal and exclusive) is already held by another process,
// ErrAlreadyLocked is returned.
func NewExclusiveLock(repo *repository.Repository) (*Lock, error) {
return newLock(repo, true)
}
const waitBeforeLockCheck = 200 * time.Millisecond
func newLock(repo *repository.Repository, excl bool) (*Lock, error) {
lock := &Lock{
Time: time.Now(),
PID: os.Getpid(),
Exclusive: excl,
repo: repo,
}
hn, err := os.Hostname()
if err == nil {
lock.Hostname = hn
}
if err = lock.fillUserInfo(); err != nil {
return nil, err
}
if err = lock.checkForOtherLocks(); err != nil {
return nil, err
}
err = lock.createLock()
if err != nil {
return nil, err
}
time.Sleep(waitBeforeLockCheck)
if err = lock.checkForOtherLocks(); err != nil {
lock.Unlock()
return nil, err
}
return lock, nil
}
func (l *Lock) fillUserInfo() error {
usr, err := user.Current()
if err != nil {
return nil
}
l.Username = usr.Username
uid, err := strconv.ParseInt(usr.Uid, 10, 32)
if err != nil {
return err
}
l.UID = uint32(uid)
gid, err := strconv.ParseInt(usr.Gid, 10, 32)
if err != nil {
return err
}
l.GID = uint32(gid)
return nil
}
// checkForOtherLocks looks for other locks that currently exist in the repository.
//
// If an exclusive lock is to be created, checkForOtherLocks returns an error
// if there are any other locks, regardless if exclusive or not. If a
// non-exclusive lock is to be created, an error is only returned when an
// exclusive lock is found.
func (l *Lock) checkForOtherLocks() error {
return eachLock(l.repo, func(id backend.ID, lock *Lock, err error) error {
if id.Equal(l.lockID) {
return nil
}
// ignore locks that cannot be loaded
if err != nil {
return nil
}
if l.Exclusive {
return ErrAlreadyLocked{otherLock: lock}
}
if !l.Exclusive && lock.Exclusive {
return ErrAlreadyLocked{otherLock: lock}
}
return nil
})
}
func eachLock(repo *repository.Repository, f func(backend.ID, *Lock, error) error) error {
done := make(chan struct{})
defer close(done)
for id := range repo.List(backend.Lock, done) {
lock, err := LoadLock(repo, id)
err = f(id, lock, err)
if err != nil {
return err
}
}
return nil
}
// createLock acquires the lock by creating a file in the repository.
func (l *Lock) createLock() error {
id, err := l.repo.SaveJSONUnpacked(backend.Lock, l)
if err != nil {
return err
}
l.lockID = id
return nil
}
// Unlock removes the lock from the repository.
func (l *Lock) Unlock() error {
if l == nil || l.lockID == nil {
return nil
}
return l.repo.Backend().Remove(backend.Lock, l.lockID.String())
}
var staleTimeout = 30 * time.Minute
// Stale returns true if the lock is stale. A lock is stale if the timestamp is
// older than 30 minutes or if it was created on the current machine and the
// process isn't alive any more.
func (l *Lock) Stale() bool {
debug.Log("Lock.Stale", "testing if lock %v for process %d is stale", l.lockID.Str(), l.PID)
if time.Now().Sub(l.Time) > staleTimeout {
debug.Log("Lock.Stale", "lock is stale, timestamp is too old: %v\n", l.Time)
return true
}
proc, err := os.FindProcess(l.PID)
defer proc.Release()
if err != nil {
debug.Log("Lock.Stale", "error searching for process %d: %v\n", l.PID, err)
return true
}
debug.Log("Lock.Stale", "sending SIGHUP to process %d\n", l.PID)
err = proc.Signal(syscall.SIGHUP)
if err != nil {
debug.Log("Lock.Stale", "signal error: %v, lock is probably stale\n", err)
return true
}
debug.Log("Lock.Stale", "lock not stale\n")
return false
}
func (l Lock) String() string {
text := fmt.Sprintf("PID %d on %s by %s (UID %d, GID %d)\nlock was created at %s (%s ago)\nstorage ID %v",
l.PID, l.Hostname, l.Username, l.UID, l.GID,
l.Time.Format("2006-01-02 15:04:05"), time.Since(l.Time),
l.lockID.Str())
if l.Stale() {
text += " (stale)"
}
return text
}
// listen for incoming SIGHUP and ignore
var ignoreSIGHUP sync.Once
func init() {
ignoreSIGHUP.Do(func() {
go func() {
c := make(chan os.Signal)
signal.Notify(c, syscall.SIGHUP)
for s := range c {
debug.Log("lock.ignoreSIGHUP", "Signal received: %v\n", s)
}
}()
})
}
// LoadLock loads and unserializes a lock from a repository.
func LoadLock(repo *repository.Repository, id backend.ID) (*Lock, error) {
lock := &Lock{}
if err := repo.LoadJSONUnpacked(backend.Lock, id, lock); err != nil {
return nil, err
}
lock.lockID = id
return lock, nil
}
// RemoveStaleLocks deletes all locks detected as stale from the repository.
func RemoveStaleLocks(repo *repository.Repository) error {
return eachLock(repo, func(id backend.ID, lock *Lock, err error) error {
// ignore locks that cannot be loaded
if err != nil {
return nil
}
if lock.Stale() {
return repo.Backend().Remove(backend.Lock, id.String())
}
return nil
})
}
func RemoveAllLocks(repo *repository.Repository) error {
return eachLock(repo, func(id backend.ID, lock *Lock, err error) error {
return repo.Backend().Remove(backend.Lock, id.String())
})
}

197
lock_test.go Normal file
View File

@ -0,0 +1,197 @@
package restic_test
import (
"os"
"testing"
"time"
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/repository"
. "github.com/restic/restic/test"
)
func TestLock(t *testing.T) {
repo := SetupRepo()
defer TeardownRepo(repo)
lock, err := restic.NewLock(repo)
OK(t, err)
OK(t, lock.Unlock())
}
func TestDoubleUnlock(t *testing.T) {
repo := SetupRepo()
defer TeardownRepo(repo)
lock, err := restic.NewLock(repo)
OK(t, err)
OK(t, lock.Unlock())
err = lock.Unlock()
Assert(t, err != nil,
"double unlock didn't return an error, got %v", err)
}
func TestMultipleLock(t *testing.T) {
repo := SetupRepo()
defer TeardownRepo(repo)
lock1, err := restic.NewLock(repo)
OK(t, err)
lock2, err := restic.NewLock(repo)
OK(t, err)
OK(t, lock1.Unlock())
OK(t, lock2.Unlock())
}
func TestLockExclusive(t *testing.T) {
repo := SetupRepo()
defer TeardownRepo(repo)
elock, err := restic.NewExclusiveLock(repo)
OK(t, err)
OK(t, elock.Unlock())
}
func TestLockOnExclusiveLockedRepo(t *testing.T) {
repo := SetupRepo()
defer TeardownRepo(repo)
elock, err := restic.NewExclusiveLock(repo)
OK(t, err)
lock, err := restic.NewLock(repo)
Assert(t, err != nil,
"create normal lock with exclusively locked repo didn't return an error")
Assert(t, restic.IsAlreadyLocked(err),
"create normal lock with exclusively locked repo didn't return the correct error")
OK(t, lock.Unlock())
OK(t, elock.Unlock())
}
func TestExclusiveLockOnLockedRepo(t *testing.T) {
repo := SetupRepo()
defer TeardownRepo(repo)
elock, err := restic.NewLock(repo)
OK(t, err)
lock, err := restic.NewExclusiveLock(repo)
Assert(t, err != nil,
"create normal lock with exclusively locked repo didn't return an error")
Assert(t, restic.IsAlreadyLocked(err),
"create normal lock with exclusively locked repo didn't return the correct error")
OK(t, lock.Unlock())
OK(t, elock.Unlock())
}
func createFakeLock(repo *repository.Repository, t time.Time, pid int) (backend.ID, error) {
newLock := &restic.Lock{Time: t, PID: pid}
return repo.SaveJSONUnpacked(backend.Lock, &newLock)
}
func removeLock(repo *repository.Repository, id backend.ID) error {
return repo.Backend().Remove(backend.Lock, id.String())
}
var staleLockTests = []struct {
timestamp time.Time
stale bool
pid int
}{
{
timestamp: time.Now(),
stale: false,
pid: os.Getpid(),
},
{
timestamp: time.Now().Add(-time.Hour),
stale: true,
pid: os.Getpid(),
},
{
timestamp: time.Now().Add(3 * time.Minute),
stale: false,
pid: os.Getpid(),
},
{
timestamp: time.Now(),
stale: true,
pid: os.Getpid() + 500,
},
}
func TestLockStale(t *testing.T) {
for i, test := range staleLockTests {
lock := restic.Lock{
Time: test.timestamp,
PID: test.pid,
}
Assert(t, lock.Stale() == test.stale,
"TestStaleLock: test %d failed: expected stale: %v, got %v",
i, test.stale, !test.stale)
}
}
func lockExists(repo *repository.Repository, t testing.TB, id backend.ID) bool {
exists, err := repo.Backend().Test(backend.Lock, id.String())
OK(t, err)
return exists
}
func TestLockWithStaleLock(t *testing.T) {
repo := SetupRepo()
defer TeardownRepo(repo)
id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid())
OK(t, err)
id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid())
OK(t, err)
id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500)
OK(t, err)
OK(t, restic.RemoveStaleLocks(repo))
Assert(t, lockExists(repo, t, id1) == false,
"stale lock still exists after RemoveStaleLocks was called")
Assert(t, lockExists(repo, t, id2) == true,
"non-stale lock was removed by RemoveStaleLocks")
Assert(t, lockExists(repo, t, id3) == false,
"stale lock still exists after RemoveStaleLocks was called")
OK(t, removeLock(repo, id2))
}
func TestRemoveAllLocks(t *testing.T) {
repo := SetupRepo()
defer TeardownRepo(repo)
id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid())
OK(t, err)
id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid())
OK(t, err)
id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500)
OK(t, err)
OK(t, restic.RemoveAllLocks(repo))
Assert(t, lockExists(repo, t, id1) == false,
"lock still exists after RemoveAllLocks was called")
Assert(t, lockExists(repo, t, id2) == false,
"lock still exists after RemoveAllLocks was called")
Assert(t, lockExists(repo, t, id3) == false,
"lock still exists after RemoveAllLocks was called")
}

View File

@ -1,7 +1,6 @@
package pipe_test package pipe_test
import ( import (
"flag"
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
@ -12,9 +11,6 @@ import (
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
) )
var testWalkerPath = flag.String("test.walkerpath", ".", "pipeline walker testpath (default: .)")
var maxWorkers = flag.Int("test.workers", 100, "max concurrency (default: 100)")
func isFile(fi os.FileInfo) bool { func isFile(fi os.FileInfo) bool {
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
} }
@ -27,7 +23,7 @@ func statPath(path string) (stats, error) {
var s stats var s stats
// count files and directories with filepath.Walk() // count files and directories with filepath.Walk()
err := filepath.Walk(*testWalkerPath, func(p string, fi os.FileInfo, err error) error { err := filepath.Walk(TestWalkerPath, func(p string, fi os.FileInfo, err error) error {
if fi == nil { if fi == nil {
return err return err
} }
@ -44,15 +40,17 @@ func statPath(path string) (stats, error) {
return s, err return s, err
} }
const maxWorkers = 100
func TestPipelineWalkerWithSplit(t *testing.T) { func TestPipelineWalkerWithSplit(t *testing.T) {
if *testWalkerPath == "" { if TestWalkerPath == "" {
t.Skipf("walkerpath not set, skipping TestPipelineWalker") t.Skipf("walkerpath not set, skipping TestPipelineWalker")
} }
before, err := statPath(*testWalkerPath) before, err := statPath(TestWalkerPath)
OK(t, err) OK(t, err)
t.Logf("walking path %s with %d dirs, %d files", *testWalkerPath, t.Logf("walking path %s with %d dirs, %d files", TestWalkerPath,
before.dirs, before.files) before.dirs, before.files)
// account for top level dir // account for top level dir
@ -105,7 +103,7 @@ func TestPipelineWalkerWithSplit(t *testing.T) {
entCh := make(chan pipe.Entry) entCh := make(chan pipe.Entry)
dirCh := make(chan pipe.Dir) dirCh := make(chan pipe.Dir)
for i := 0; i < *maxWorkers; i++ { for i := 0; i < maxWorkers; i++ {
wg.Add(1) wg.Add(1)
go worker(&wg, done, entCh, dirCh) go worker(&wg, done, entCh, dirCh)
} }
@ -120,7 +118,7 @@ func TestPipelineWalkerWithSplit(t *testing.T) {
}() }()
resCh := make(chan pipe.Result, 1) resCh := make(chan pipe.Result, 1)
err = pipe.Walk([]string{*testWalkerPath}, done, jobs, resCh) err = pipe.Walk([]string{TestWalkerPath}, done, jobs, resCh)
OK(t, err) OK(t, err)
// wait for all workers to terminate // wait for all workers to terminate
@ -129,21 +127,21 @@ func TestPipelineWalkerWithSplit(t *testing.T) {
// wait for top-level blob // wait for top-level blob
<-resCh <-resCh
t.Logf("walked path %s with %d dirs, %d files", *testWalkerPath, t.Logf("walked path %s with %d dirs, %d files", TestWalkerPath,
after.dirs, after.files) after.dirs, after.files)
Assert(t, before == after, "stats do not match, expected %v, got %v", before, after) Assert(t, before == after, "stats do not match, expected %v, got %v", before, after)
} }
func TestPipelineWalker(t *testing.T) { func TestPipelineWalker(t *testing.T) {
if *testWalkerPath == "" { if TestWalkerPath == "" {
t.Skipf("walkerpath not set, skipping TestPipelineWalker") t.Skipf("walkerpath not set, skipping TestPipelineWalker")
} }
before, err := statPath(*testWalkerPath) before, err := statPath(TestWalkerPath)
OK(t, err) OK(t, err)
t.Logf("walking path %s with %d dirs, %d files", *testWalkerPath, t.Logf("walking path %s with %d dirs, %d files", TestWalkerPath,
before.dirs, before.files) before.dirs, before.files)
// account for top level dir // account for top level dir
@ -194,13 +192,13 @@ func TestPipelineWalker(t *testing.T) {
done := make(chan struct{}) done := make(chan struct{})
jobs := make(chan pipe.Job) jobs := make(chan pipe.Job)
for i := 0; i < *maxWorkers; i++ { for i := 0; i < maxWorkers; i++ {
wg.Add(1) wg.Add(1)
go worker(&wg, done, jobs) go worker(&wg, done, jobs)
} }
resCh := make(chan pipe.Result, 1) resCh := make(chan pipe.Result, 1)
err = pipe.Walk([]string{*testWalkerPath}, done, jobs, resCh) err = pipe.Walk([]string{TestWalkerPath}, done, jobs, resCh)
OK(t, err) OK(t, err)
// wait for all workers to terminate // wait for all workers to terminate
@ -209,14 +207,14 @@ func TestPipelineWalker(t *testing.T) {
// wait for top-level blob // wait for top-level blob
<-resCh <-resCh
t.Logf("walked path %s with %d dirs, %d files", *testWalkerPath, t.Logf("walked path %s with %d dirs, %d files", TestWalkerPath,
after.dirs, after.files) after.dirs, after.files)
Assert(t, before == after, "stats do not match, expected %v, got %v", before, after) Assert(t, before == after, "stats do not match, expected %v, got %v", before, after)
} }
func BenchmarkPipelineWalker(b *testing.B) { func BenchmarkPipelineWalker(b *testing.B) {
if *testWalkerPath == "" { if TestWalkerPath == "" {
b.Skipf("walkerpath not set, skipping BenchPipelineWalker") b.Skipf("walkerpath not set, skipping BenchPipelineWalker")
} }
@ -283,8 +281,8 @@ func BenchmarkPipelineWalker(b *testing.B) {
dirCh := make(chan pipe.Dir, 200) dirCh := make(chan pipe.Dir, 200)
var wg sync.WaitGroup var wg sync.WaitGroup
b.Logf("starting %d workers", *maxWorkers) b.Logf("starting %d workers", maxWorkers)
for i := 0; i < *maxWorkers; i++ { for i := 0; i < maxWorkers; i++ {
wg.Add(2) wg.Add(2)
go dirWorker(&wg, done, dirCh) go dirWorker(&wg, done, dirCh)
go fileWorker(&wg, done, entCh) go fileWorker(&wg, done, entCh)
@ -300,7 +298,7 @@ func BenchmarkPipelineWalker(b *testing.B) {
}() }()
resCh := make(chan pipe.Result, 1) resCh := make(chan pipe.Result, 1)
err := pipe.Walk([]string{*testWalkerPath}, done, jobs, resCh) err := pipe.Walk([]string{TestWalkerPath}, done, jobs, resCh)
OK(b, err) OK(b, err)
// wait for all workers to terminate // wait for all workers to terminate
@ -314,13 +312,13 @@ func BenchmarkPipelineWalker(b *testing.B) {
} }
func TestPipelineWalkerMultiple(t *testing.T) { func TestPipelineWalkerMultiple(t *testing.T) {
if *testWalkerPath == "" { if TestWalkerPath == "" {
t.Skipf("walkerpath not set, skipping TestPipelineWalker") t.Skipf("walkerpath not set, skipping TestPipelineWalker")
} }
paths, err := filepath.Glob(filepath.Join(*testWalkerPath, "*")) paths, err := filepath.Glob(filepath.Join(TestWalkerPath, "*"))
before, err := statPath(*testWalkerPath) before, err := statPath(TestWalkerPath)
OK(t, err) OK(t, err)
t.Logf("walking paths %v with %d dirs, %d files", paths, t.Logf("walking paths %v with %d dirs, %d files", paths,
@ -371,7 +369,7 @@ func TestPipelineWalkerMultiple(t *testing.T) {
done := make(chan struct{}) done := make(chan struct{})
jobs := make(chan pipe.Job) jobs := make(chan pipe.Job)
for i := 0; i < *maxWorkers; i++ { for i := 0; i < maxWorkers; i++ {
wg.Add(1) wg.Add(1)
go worker(&wg, done, jobs) go worker(&wg, done, jobs)
} }

View File

@ -372,7 +372,7 @@ func (s *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend
if err != nil { if err != nil {
return nil, err return nil, err
} }
debug.Log("Repo.SaveJSONUnpacked", "create new file %p", blob) debug.Log("Repo.SaveJSONUnpacked", "create new blob %v", t)
// hash // hash
hw := backend.NewHashingWriter(blob, sha256.New()) hw := backend.NewHashingWriter(blob, sha256.New())
@ -396,9 +396,12 @@ func (s *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend
err = blob.Finalize(t, sid.String()) err = blob.Finalize(t, sid.String())
if err != nil { if err != nil {
debug.Log("Repo.SaveJSONUnpacked", "error saving blob %v as %v: %v", t, sid, err)
return nil, err return nil, err
} }
debug.Log("Repo.SaveJSONUnpacked", "new blob %v saved as %v", t, sid)
return sid, nil return sid, nil
} }

View File

@ -5,7 +5,6 @@ import (
"crypto/rand" "crypto/rand"
"crypto/sha256" "crypto/sha256"
"encoding/json" "encoding/json"
"flag"
"io" "io"
"testing" "testing"
@ -15,8 +14,6 @@ import (
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
) )
var benchTestDir = flag.String("test.dir", ".", "dir used in benchmarks (default: .)")
type testJSONStruct struct { type testJSONStruct struct {
Foo uint32 Foo uint32
Bar string Bar string
@ -28,8 +25,8 @@ var repoTests = []testJSONStruct{
} }
func TestSaveJSON(t *testing.T) { func TestSaveJSON(t *testing.T) {
repo := SetupRepo(t) repo := SetupRepo()
defer TeardownRepo(t, repo) defer TeardownRepo(repo)
for _, obj := range repoTests { for _, obj := range repoTests {
data, err := json.Marshal(obj) data, err := json.Marshal(obj)
@ -47,8 +44,8 @@ func TestSaveJSON(t *testing.T) {
} }
func BenchmarkSaveJSON(t *testing.B) { func BenchmarkSaveJSON(t *testing.B) {
repo := SetupRepo(t) repo := SetupRepo()
defer TeardownRepo(t, repo) defer TeardownRepo(repo)
obj := repoTests[0] obj := repoTests[0]
@ -72,8 +69,8 @@ func BenchmarkSaveJSON(t *testing.B) {
var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20} var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20}
func TestSave(t *testing.T) { func TestSave(t *testing.T) {
repo := SetupRepo(t) repo := SetupRepo()
defer TeardownRepo(t, repo) defer TeardownRepo(repo)
for _, size := range testSizes { for _, size := range testSizes {
data := make([]byte, size) data := make([]byte, size)
@ -104,8 +101,8 @@ func TestSave(t *testing.T) {
} }
func TestSaveFrom(t *testing.T) { func TestSaveFrom(t *testing.T) {
repo := SetupRepo(t) repo := SetupRepo()
defer TeardownRepo(t, repo) defer TeardownRepo(repo)
for _, size := range testSizes { for _, size := range testSizes {
data := make([]byte, size) data := make([]byte, size)
@ -134,8 +131,8 @@ func TestSaveFrom(t *testing.T) {
} }
func BenchmarkSaveFrom(t *testing.B) { func BenchmarkSaveFrom(t *testing.B) {
repo := SetupRepo(t) repo := SetupRepo()
defer TeardownRepo(t, repo) defer TeardownRepo(repo)
size := 4 << 20 // 4MiB size := 4 << 20 // 4MiB
@ -156,15 +153,15 @@ func BenchmarkSaveFrom(t *testing.B) {
} }
func TestLoadJSONPack(t *testing.T) { func TestLoadJSONPack(t *testing.T) {
if *benchTestDir == "" { repo := SetupRepo()
defer TeardownRepo(repo)
if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping") t.Skip("benchdir not set, skipping")
} }
repo := SetupRepo(t)
defer TeardownRepo(t, repo)
// archive a few files // archive a few files
sn := SnapshotDir(t, repo, *benchTestDir, nil) sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil)
OK(t, repo.Flush()) OK(t, repo.Flush())
tree := restic.NewTree() tree := restic.NewTree()
@ -173,13 +170,13 @@ func TestLoadJSONPack(t *testing.T) {
} }
func TestLoadJSONUnpacked(t *testing.T) { func TestLoadJSONUnpacked(t *testing.T) {
if *benchTestDir == "" { repo := SetupRepo()
defer TeardownRepo(repo)
if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping") t.Skip("benchdir not set, skipping")
} }
repo := SetupRepo(t)
defer TeardownRepo(t, repo)
// archive a snapshot // archive a snapshot
sn := restic.Snapshot{} sn := restic.Snapshot{}
sn.Hostname = "foobar" sn.Hostname = "foobar"

View File

@ -8,9 +8,6 @@ import (
) )
func TestNewSnapshot(t *testing.T) { func TestNewSnapshot(t *testing.T) {
s := SetupRepo(t)
defer TeardownRepo(t, s)
paths := []string{"/home/foobar"} paths := []string{"/home/foobar"}
_, err := restic.NewSnapshot(paths) _, err := restic.NewSnapshot(paths)

View File

@ -18,8 +18,9 @@ var (
TestCleanup = getBoolVar("RESTIC_TEST_CLEANUP", true) TestCleanup = getBoolVar("RESTIC_TEST_CLEANUP", true)
TestTempDir = getStringVar("RESTIC_TEST_TMPDIR", "") TestTempDir = getStringVar("RESTIC_TEST_TMPDIR", "")
RunIntegrationTest = getBoolVar("RESTIC_TEST_INTEGRATION", true) RunIntegrationTest = getBoolVar("RESTIC_TEST_INTEGRATION", true)
TestSFTPPath = getStringVar("RESTIC_TEST_SFTPPATH", TestSFTPPath = getStringVar("RESTIC_TEST_SFTPPATH", "/usr/lib/ssh:/usr/lib/openssh")
"/usr/lib/ssh:/usr/lib/openssh") TestWalkerPath = getStringVar("RESTIC_TEST_PATH", ".")
BenchArchiveDirectory = getStringVar("RESTIC_BENCH_DIR", ".")
) )
func getStringVar(name, defaultValue string) string { func getStringVar(name, defaultValue string) string {
@ -45,27 +46,38 @@ func getBoolVar(name string, defaultValue bool) bool {
return defaultValue return defaultValue
} }
func SetupRepo(t testing.TB) *repository.Repository { func SetupRepo() *repository.Repository {
tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-")
OK(t, err) if err != nil {
panic(err)
}
// create repository below temp dir // create repository below temp dir
b, err := local.Create(filepath.Join(tempdir, "repo")) b, err := local.Create(filepath.Join(tempdir, "repo"))
OK(t, err) if err != nil {
panic(err)
}
repo := repository.New(b) repo := repository.New(b)
OK(t, repo.Init(TestPassword)) err = repo.Init(TestPassword)
if err != nil {
panic(err)
}
return repo return repo
} }
func TeardownRepo(t testing.TB, repo *repository.Repository) { func TeardownRepo(repo *repository.Repository) {
if !TestCleanup { if !TestCleanup {
l := repo.Backend().(*local.Local) l := repo.Backend().(*local.Local)
t.Logf("leaving local backend at %s\n", l.Location()) fmt.Printf("leaving local backend at %s\n", l.Location())
return return
} }
OK(t, repo.Delete()) err := repo.Delete()
if err != nil {
panic(err)
}
} }
func SnapshotDir(t testing.TB, repo *repository.Repository, path string, parent backend.ID) *restic.Snapshot { func SnapshotDir(t testing.TB, repo *repository.Repository, path string, parent backend.ID) *restic.Snapshot {
@ -74,3 +86,9 @@ func SnapshotDir(t testing.TB, repo *repository.Repository, path string, parent
OK(t, err) OK(t, err)
return sn return sn
} }
func WithRepo(t testing.TB, f func(*repository.Repository)) {
repo := SetupRepo()
f(repo)
TeardownRepo(repo)
}

View File

@ -93,8 +93,8 @@ func TestNodeComparison(t *testing.T) {
} }
func TestLoadTree(t *testing.T) { func TestLoadTree(t *testing.T) {
repo := SetupRepo(t) repo := SetupRepo()
defer TeardownRepo(t, repo) defer TeardownRepo(repo)
// save tree // save tree
tree := restic.NewTree() tree := restic.NewTree()

View File

@ -1,7 +1,6 @@
package restic_test package restic_test
import ( import (
"flag"
"path/filepath" "path/filepath"
"testing" "testing"
@ -10,14 +9,12 @@ import (
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
) )
var testWalkDirectory = flag.String("test.walkdir", ".", "test walking a directory (globbing pattern, default: .)")
func TestWalkTree(t *testing.T) { func TestWalkTree(t *testing.T) {
dirs, err := filepath.Glob(*testWalkDirectory) repo := SetupRepo()
OK(t, err) defer TeardownRepo(repo)
repo := SetupRepo(t) dirs, err := filepath.Glob(TestWalkerPath)
defer TeardownRepo(t, repo) OK(t, err)
// archive a few files // archive a few files
arch := restic.NewArchiver(repo) arch := restic.NewArchiver(repo)