2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-22 21:05:10 +00:00

Move Server and Key to new sub-package

This commit is contained in:
Alexander Neumann 2015-04-26 14:46:15 +02:00
parent 8498753eb7
commit d19b23d4f1
28 changed files with 317 additions and 356 deletions

View File

@ -15,6 +15,7 @@ import (
"github.com/restic/restic/chunker"
"github.com/restic/restic/debug"
"github.com/restic/restic/pipe"
"github.com/restic/restic/server"
)
const (
@ -27,7 +28,7 @@ const (
)
type Archiver struct {
s Server
s *server.Server
m *Map
c *Cache
@ -37,7 +38,7 @@ type Archiver struct {
Filter func(item string, fi os.FileInfo) bool
}
func NewArchiver(s Server) (*Archiver, error) {
func NewArchiver(s *server.Server) (*Archiver, error) {
var err error
arch := &Archiver{
s: s,
@ -101,7 +102,7 @@ func (arch *Archiver) Preload() error {
return nil
}
func (arch *Archiver) Save(t backend.Type, id backend.ID, length uint, rd io.Reader) (Blob, error) {
func (arch *Archiver) Save(t backend.Type, id backend.ID, length uint, rd io.Reader) (server.Blob, error) {
debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
// test if this blob is already known
@ -127,7 +128,7 @@ func (arch *Archiver) Save(t backend.Type, id backend.ID, length uint, rd io.Rea
// TODO: implement a list of blobs in transport, so this doesn't happen so often
err = arch.s.Remove(t, blob.Storage.String())
if err != nil {
return Blob{}, err
return server.Blob{}, err
}
}
@ -136,13 +137,13 @@ func (arch *Archiver) Save(t backend.Type, id backend.ID, length uint, rd io.Rea
return smapblob, nil
}
func (arch *Archiver) SaveTreeJSON(item interface{}) (Blob, error) {
func (arch *Archiver) SaveTreeJSON(item interface{}) (server.Blob, error) {
// convert to json
data, err := json.Marshal(item)
// append newline
data = append(data, '\n')
if err != nil {
return Blob{}, err
return server.Blob{}, err
}
// check if tree has been saved before
@ -157,7 +158,7 @@ func (arch *Archiver) SaveTreeJSON(item interface{}) (Blob, error) {
// otherwise save the data
blob, err = arch.s.SaveJSON(backend.Tree, item)
if err != nil {
return Blob{}, err
return server.Blob{}, err
}
// store blob in storage map
@ -168,7 +169,7 @@ func (arch *Archiver) SaveTreeJSON(item interface{}) (Blob, error) {
// SaveFile stores the content of the file on the backend as a Blob by calling
// Save for each chunk.
func (arch *Archiver) SaveFile(p *Progress, node *Node) (Blobs, error) {
func (arch *Archiver) SaveFile(p *Progress, node *Node) (server.Blobs, error) {
file, err := node.OpenForReading()
defer file.Close()
if err != nil {
@ -197,12 +198,12 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) (Blobs, error) {
}
}
var blobs Blobs
var blobs server.Blobs
// store all chunks
chnker := GetChunker("archiver.SaveFile")
chnker.Reset(file, arch.s.ChunkerPolynomial())
chans := [](<-chan Blob){}
chans := [](<-chan server.Blob){}
defer FreeChunker("archiver.SaveFile", chnker)
chunks := 0
@ -221,9 +222,9 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) (Blobs, error) {
// acquire token, start goroutine to save chunk
token := <-arch.blobToken
resCh := make(chan Blob, 1)
resCh := make(chan server.Blob, 1)
go func(ch chan<- Blob) {
go func(ch chan<- server.Blob) {
blob, err := arch.Save(backend.Data, chunk.Digest, chunk.Length, chunk.Reader(file))
// TODO handle error
if err != nil {
@ -238,7 +239,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) (Blobs, error) {
chans = append(chans, resCh)
}
blobs = []Blob{}
blobs = []server.Blob{}
for _, ch := range chans {
blobs = append(blobs, <-ch)
}
@ -267,7 +268,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) (Blobs, error) {
return blobs, nil
}
func (arch *Archiver) saveTree(p *Progress, t *Tree) (Blob, error) {
func (arch *Archiver) saveTree(p *Progress, t *Tree) (server.Blob, error) {
debug.Log("Archiver.saveTree", "saveTree(%v)\n", t)
var wg sync.WaitGroup
@ -279,7 +280,7 @@ func (arch *Archiver) saveTree(p *Progress, t *Tree) (Blob, error) {
if node.tree != nil {
b, err := arch.saveTree(p, node.tree)
if err != nil {
return Blob{}, err
return server.Blob{}, err
}
node.Subtree = b.ID
t.Map.Insert(b)
@ -321,7 +322,7 @@ func (arch *Archiver) saveTree(p *Progress, t *Tree) (Blob, error) {
go func(n *Node) {
defer wg.Done()
var blobs Blobs
var blobs server.Blobs
blobs, n.err = arch.SaveFile(p, n)
for _, b := range blobs {
t.Map.Insert(b)
@ -340,7 +341,7 @@ func (arch *Archiver) saveTree(p *Progress, t *Tree) (Blob, error) {
// check for invalid file nodes
for _, node := range t.Nodes {
if node.Type == "file" && node.Content == nil && node.err == nil {
return Blob{}, fmt.Errorf("node %v has empty content", node.Name)
return server.Blob{}, fmt.Errorf("node %v has empty content", node.Name)
}
// remember used hashes
@ -357,7 +358,7 @@ func (arch *Archiver) saveTree(p *Progress, t *Tree) (Blob, error) {
if node.err != nil {
err := arch.Error(node.path, nil, node.err)
if err != nil {
return Blob{}, err
return server.Blob{}, err
}
// save error message in node
@ -375,7 +376,7 @@ func (arch *Archiver) saveTree(p *Progress, t *Tree) (Blob, error) {
blob, err := arch.SaveTreeJSON(t)
if err != nil {
return Blob{}, err
return server.Blob{}, err
}
return blob, nil
@ -531,7 +532,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
debug.Log("Archiver.dirWorker", "save tree for %s: %v", dir.Path(), blob)
node.Subtree = blob.ID
node.blobs = Blobs{blob}
node.blobs = server.Blobs{blob}
dir.Result() <- node
if dir.Path() != "" {

View File

@ -9,6 +9,7 @@ import (
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/chunker"
"github.com/restic/restic/server"
. "github.com/restic/restic/test"
)
@ -22,7 +23,7 @@ type Rdr interface {
io.ReaderAt
}
func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *restic.Key) {
func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *server.Key) {
ch := restic.GetChunker("BenchmarkChunkEncrypt")
rd.Seek(0, 0)
ch.Reset(rd, testPol)
@ -53,9 +54,9 @@ func BenchmarkChunkEncrypt(b *testing.B) {
data := Random(23, 10<<20) // 10MiB
rd := bytes.NewReader(data)
be := setupBackend(b)
defer teardownBackend(b, be)
key := setupKey(b, be, "geheim")
be := SetupBackend(b)
defer TeardownBackend(b, be)
key := SetupKey(b, be, "geheim")
buf := restic.GetChunkBuf("BenchmarkChunkEncrypt")
buf2 := restic.GetChunkBuf("BenchmarkChunkEncrypt")
@ -71,7 +72,7 @@ func BenchmarkChunkEncrypt(b *testing.B) {
restic.FreeChunkBuf("BenchmarkChunkEncrypt", buf2)
}
func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *restic.Key) {
func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *server.Key) {
ch := restic.GetChunker("BenchmarkChunkEncryptP")
rd.Seek(0, 0)
ch.Reset(rd, testPol)
@ -92,9 +93,9 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *restic.Key)
}
func BenchmarkChunkEncryptParallel(b *testing.B) {
be := setupBackend(b)
defer teardownBackend(b, be)
key := setupKey(b, be, "geheim")
be := SetupBackend(b)
defer TeardownBackend(b, be)
key := SetupKey(b, be, "geheim")
data := Random(23, 10<<20) // 10MiB
@ -118,9 +119,9 @@ func BenchmarkArchiveDirectory(b *testing.B) {
b.Skip("benchdir not set, skipping BenchmarkArchiveDirectory")
}
server := setupBackend(b)
defer teardownBackend(b, server)
key := setupKey(b, server, "geheim")
server := SetupBackend(b)
defer TeardownBackend(b, server)
key := SetupKey(b, server, "geheim")
server.SetKey(key)
arch, err := restic.NewArchiver(server)
@ -131,16 +132,7 @@ func BenchmarkArchiveDirectory(b *testing.B) {
b.Logf("snapshot archived as %v", id)
}
func snapshot(t testing.TB, server restic.Server, path string, parent backend.ID) *restic.Snapshot {
arch, err := restic.NewArchiver(server)
OK(t, err)
OK(t, arch.Preload())
sn, _, err := arch.Snapshot(nil, []string{path}, parent)
OK(t, err)
return sn
}
func countBlobs(t testing.TB, server restic.Server) (trees int, data int) {
func countBlobs(t testing.TB, server *server.Server) (trees int, data int) {
return server.Count(backend.Tree), server.Count(backend.Data)
}
@ -149,13 +141,13 @@ func archiveWithPreload(t testing.TB) {
t.Skip("benchdir not set, skipping TestArchiverPreload")
}
server := setupBackend(t)
defer teardownBackend(t, server)
key := setupKey(t, server, "geheim")
server := SetupBackend(t)
defer TeardownBackend(t, server)
key := SetupKey(t, server, "geheim")
server.SetKey(key)
// archive a few files
sn := snapshot(t, server, *benchArchiveDirectory, nil)
sn := SnapshotDir(t, server, *benchArchiveDirectory, nil)
t.Logf("archived snapshot %v", sn.ID().Str())
// get archive stats
@ -163,7 +155,7 @@ func archiveWithPreload(t testing.TB) {
t.Logf("found %v trees, %v data blobs", beforeTrees, beforeData)
// archive the same files again, without parent snapshot
sn2 := snapshot(t, server, *benchArchiveDirectory, nil)
sn2 := SnapshotDir(t, server, *benchArchiveDirectory, nil)
t.Logf("archived snapshot %v", sn2.ID().Str())
// get archive stats
@ -177,7 +169,7 @@ func archiveWithPreload(t testing.TB) {
}
// archive the same files again, with a parent snapshot
sn3 := snapshot(t, server, *benchArchiveDirectory, sn2.ID())
sn3 := SnapshotDir(t, server, *benchArchiveDirectory, sn2.ID())
t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())
// get archive stats
@ -200,9 +192,9 @@ func BenchmarkPreload(t *testing.B) {
t.Skip("benchdir not set, skipping TestArchiverPreload")
}
server := setupBackend(t)
defer teardownBackend(t, server)
key := setupKey(t, server, "geheim")
server := SetupBackend(t)
defer TeardownBackend(t, server)
key := SetupKey(t, server, "geheim")
server.SetKey(key)
// archive a few files
@ -228,13 +220,13 @@ func BenchmarkLoadTree(t *testing.B) {
t.Skip("benchdir not set, skipping TestArchiverPreload")
}
server := setupBackend(t)
defer teardownBackend(t, server)
key := setupKey(t, server, "geheim")
server.SetKey(key)
s := SetupBackend(t)
defer TeardownBackend(t, s)
key := SetupKey(t, s, "geheim")
s.SetKey(key)
// archive a few files
arch, err := restic.NewArchiver(server)
arch, err := restic.NewArchiver(s)
OK(t, err)
sn, _, err := arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil)
OK(t, err)
@ -243,7 +235,7 @@ func BenchmarkLoadTree(t *testing.B) {
list := make([]backend.ID, 0, 10)
done := make(chan struct{})
for name := range server.List(backend.Tree, done) {
for name := range s.List(backend.Tree, done) {
id, err := backend.ParseID(name)
if err != nil {
t.Logf("invalid id for tree %v", name)
@ -262,7 +254,7 @@ func BenchmarkLoadTree(t *testing.B) {
for i := 0; i < t.N; i++ {
for _, id := range list {
_, err := restic.LoadTree(server, restic.Blob{Storage: id})
_, err := restic.LoadTree(s, server.Blob{Storage: id})
OK(t, err)
}
}

View File

@ -11,6 +11,7 @@ import (
"github.com/restic/restic/backend"
"github.com/restic/restic/debug"
"github.com/restic/restic/server"
)
type Cache struct {
@ -106,7 +107,7 @@ func (c *Cache) Purge(t backend.Type, subtype string, id backend.ID) error {
return err
}
func (c *Cache) Clear(s Server) error {
func (c *Cache) Clear(s *server.Server) error {
list, err := c.List(backend.Snapshot)
if err != nil {
return err
@ -211,7 +212,7 @@ func (c *Cache) filename(t backend.Type, subtype string, id backend.ID) (string,
// RefreshSnapshots loads the maps for all snapshots and saves them to the
// local cache. Old cache entries are purged.
func (c *Cache) RefreshSnapshots(s Server, p *Progress) error {
func (c *Cache) RefreshSnapshots(s *server.Server, p *Progress) error {
defer p.Done()
// list cache entries
@ -274,7 +275,7 @@ func (c *Cache) RefreshSnapshots(s Server, p *Progress) error {
// cacheSnapshotBlobs creates a cache of all the blobs used within the
// snapshot. It collects all blobs from all trees and saves the resulting map
// to the cache and returns the map.
func cacheSnapshotBlobs(p *Progress, s Server, c *Cache, id backend.ID) (*Map, error) {
func cacheSnapshotBlobs(p *Progress, s *server.Server, c *Cache, id backend.ID) (*Map, error) {
debug.Log("CacheSnapshotBlobs", "create cache for snapshot %v", id.Str())
sn, err := LoadSnapshot(s, id)
@ -338,7 +339,7 @@ func (c *Cache) StoreMap(snid backend.ID, m *Map) error {
return nil
}
func (c *Cache) LoadMap(s Server, snid backend.ID) (*Map, error) {
func (c *Cache) LoadMap(s *server.Server, snid backend.ID) (*Map, error) {
rd, err := c.Load(backend.Snapshot, "blobs", snid)
if err != nil {
return nil, err

View File

@ -10,9 +10,9 @@ import (
)
func TestCache(t *testing.T) {
server := setupBackend(t)
defer teardownBackend(t, server)
key := setupKey(t, server, "geheim")
server := SetupBackend(t)
defer TeardownBackend(t, server)
key := SetupKey(t, server, "geheim")
server.SetKey(key)
cache, err := restic.NewCache(server)

View File

@ -8,6 +8,7 @@ import (
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/server"
)
type CmdCat struct{}
@ -112,7 +113,7 @@ func (cmd CmdCat) Execute(args []string) error {
dec := json.NewDecoder(rd)
var key restic.Key
var key server.Key
err = dec.Decode(&key)
if err != nil {
return err

View File

@ -8,6 +8,7 @@ import (
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/debug"
"github.com/restic/restic/server"
)
type findResult struct {
@ -58,7 +59,7 @@ func parseTime(str string) (time.Time, error) {
return time.Time{}, fmt.Errorf("unable to parse time: %q", str)
}
func (c CmdFind) findInTree(s restic.Server, blob restic.Blob, path string) ([]findResult, error) {
func (c CmdFind) findInTree(s *server.Server, blob server.Blob, path string) ([]findResult, error) {
debug.Log("restic.find", "checking tree %v\n", blob)
tree, err := restic.LoadTree(s, blob)
if err != nil {
@ -109,7 +110,7 @@ func (c CmdFind) findInTree(s restic.Server, blob restic.Blob, path string) ([]f
return results, nil
}
func (c CmdFind) findInSnapshot(s restic.Server, name string) error {
func (c CmdFind) findInSnapshot(s *server.Server, name string) error {
debug.Log("restic.find", "searching in snapshot %s\n for entries within [%s %s]", name, c.oldest, c.newest)
id, err := backend.ParseID(name)

View File

@ -8,6 +8,7 @@ import (
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/debug"
"github.com/restic/restic/server"
)
type CmdFsck struct {
@ -31,7 +32,7 @@ func init() {
}
}
func fsckFile(opts CmdFsck, s restic.Server, m *restic.Map, IDs []backend.ID) (uint64, error) {
func fsckFile(opts CmdFsck, s *server.Server, m *restic.Map, IDs []backend.ID) (uint64, error) {
debug.Log("restic.fsckFile", "checking file %v", IDs)
var bytes uint64
@ -74,7 +75,7 @@ func fsckFile(opts CmdFsck, s restic.Server, m *restic.Map, IDs []backend.ID) (u
return bytes, nil
}
func fsckTree(opts CmdFsck, s restic.Server, blob restic.Blob) error {
func fsckTree(opts CmdFsck, s *server.Server, blob server.Blob) error {
debug.Log("restic.fsckTree", "checking tree %v", blob)
tree, err := restic.LoadTree(s, blob)
@ -161,7 +162,7 @@ func fsckTree(opts CmdFsck, s restic.Server, blob restic.Blob) error {
return firstErr
}
func fsckSnapshot(opts CmdFsck, s restic.Server, id backend.ID) error {
func fsckSnapshot(opts CmdFsck, s *server.Server, id backend.ID) error {
debug.Log("restic.fsck", "checking snapshot %v\n", id)
sn, err := restic.LoadSnapshot(s, id)

View File

@ -5,8 +5,8 @@ import (
"fmt"
"os"
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/server"
)
type CmdKey struct{}
@ -21,7 +21,7 @@ func init() {
}
}
func listKeys(s restic.Server) error {
func listKeys(s *server.Server) error {
tab := NewTable()
tab.Header = fmt.Sprintf(" %-10s %-10s %-10s %s", "ID", "User", "Host", "Created")
tab.RowFormat = "%s%-10s %-10s %-10s %s"
@ -35,7 +35,7 @@ func listKeys(s restic.Server) error {
defer close(done)
for name := range s.List(backend.Key, done) {
k, err := restic.LoadKey(s, name)
k, err := server.LoadKey(s, name)
if err != nil {
fmt.Fprintf(os.Stderr, "LoadKey() failed: %v\n", err)
continue
@ -56,7 +56,7 @@ func listKeys(s restic.Server) error {
return nil
}
func addKey(s restic.Server) error {
func addKey(s *server.Server) error {
pw := readPassword("RESTIC_NEWPASSWORD", "enter password for new key: ")
pw2 := readPassword("RESTIC_NEWPASSWORD", "enter password again: ")
@ -64,7 +64,7 @@ func addKey(s restic.Server) error {
return errors.New("passwords do not match")
}
id, err := restic.AddKey(s, pw, s.Key())
id, err := server.AddKey(s, pw, s.Key())
if err != nil {
return fmt.Errorf("creating new key failed: %v\n", err)
}
@ -74,7 +74,7 @@ func addKey(s restic.Server) error {
return nil
}
func deleteKey(s restic.Server, name string) error {
func deleteKey(s *server.Server, name string) error {
if name == s.Key().Name() {
return errors.New("refusing to remove key currently used to access repository")
}
@ -88,7 +88,7 @@ func deleteKey(s restic.Server, name string) error {
return nil
}
func changePassword(s restic.Server) error {
func changePassword(s *server.Server) error {
pw := readPassword("RESTIC_NEWPASSWORD", "enter password for new key: ")
pw2 := readPassword("RESTIC_NEWPASSWORD", "enter password again: ")
@ -97,7 +97,7 @@ func changePassword(s restic.Server) error {
}
// add new key
id, err := restic.AddKey(s, pw, s.Key())
id, err := server.AddKey(s, pw, s.Key())
if err != nil {
return fmt.Errorf("creating new key failed: %v\n", err)
}

View File

@ -7,6 +7,7 @@ import (
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/server"
)
type CmdLs struct{}
@ -37,7 +38,7 @@ func printNode(prefix string, n *restic.Node) string {
}
}
func printTree(prefix string, s restic.Server, blob restic.Blob) error {
func printTree(prefix string, s *server.Server, blob server.Blob) error {
tree, err := restic.LoadTree(s, blob)
if err != nil {
return err

View File

@ -15,6 +15,7 @@ import (
"github.com/restic/restic/backend/local"
"github.com/restic/restic/backend/sftp"
"github.com/restic/restic/debug"
"github.com/restic/restic/server"
)
var version = "compiled manually"
@ -72,9 +73,9 @@ func (cmd CmdInit) Execute(args []string) error {
os.Exit(1)
}
s := restic.NewServer(be)
s := server.NewServer(be)
_, err = restic.CreateKey(s, pw)
_, err = server.CreateKey(s, pw)
if err != nil {
fmt.Fprintf(os.Stderr, "creating key in backend at %s failed: %v\n", opts.Repo, err)
os.Exit(1)
@ -134,21 +135,21 @@ func create(u string) (backend.Backend, error) {
return sftp.Create(url.Path[1:], "ssh", args...)
}
func OpenRepo() (restic.Server, error) {
func OpenRepo() (*server.Server, error) {
if opts.Repo == "" {
return restic.Server{}, errors.New("Please specify repository location (-r)")
return nil, errors.New("Please specify repository location (-r)")
}
be, err := open(opts.Repo)
if err != nil {
return restic.Server{}, err
return nil, err
}
s := restic.NewServer(be)
s := server.NewServer(be)
err = s.SearchKey(readPassword("RESTIC_PASSWORD", "enter password for repository: "))
if err != nil {
return restic.Server{}, fmt.Errorf("unable to open repo: %v", err)
return nil, fmt.Errorf("unable to open repo: %v", err)
}
return s, nil

View File

@ -1,55 +0,0 @@
package restic_test
import (
"flag"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/restic/restic"
"github.com/restic/restic/backend/local"
. "github.com/restic/restic/test"
)
var testPassword = "foobar"
var testCleanup = flag.Bool("test.cleanup", true, "clean up after running tests (remove local backend directory with all content)")
var testTempDir = flag.String("test.tempdir", "", "use this directory for temporary storage (default: system temp dir)")
func setupBackend(t testing.TB) restic.Server {
tempdir, err := ioutil.TempDir(*testTempDir, "restic-test-")
OK(t, err)
// create repository below temp dir
b, err := local.Create(filepath.Join(tempdir, "repo"))
OK(t, err)
// set cache dir
err = os.Setenv("RESTIC_CACHE", filepath.Join(tempdir, "cache"))
OK(t, err)
return restic.NewServer(b)
}
func teardownBackend(t testing.TB, s restic.Server) {
if !*testCleanup {
l := s.Backend().(*local.Local)
t.Logf("leaving local backend at %s\n", l.Location())
return
}
OK(t, s.Delete())
}
func setupKey(t testing.TB, s restic.Server, password string) *restic.Key {
k, err := restic.CreateKey(s, password)
OK(t, err)
return k
}
func TestRepo(t *testing.T) {
s := setupBackend(t)
defer teardownBackend(t, s)
_ = setupKey(t, s, testPassword)
}

49
map.go
View File

@ -1,7 +1,6 @@
package restic
import (
"bytes"
"encoding/json"
"errors"
"sort"
@ -9,10 +8,11 @@ import (
"github.com/restic/restic/backend"
"github.com/restic/restic/debug"
"github.com/restic/restic/server"
)
type Map struct {
list []Blob
list []server.Blob
m sync.Mutex
}
@ -20,11 +20,11 @@ var ErrBlobNotFound = errors.New("Blob not found")
func NewMap() *Map {
return &Map{
list: []Blob{},
list: []server.Blob{},
}
}
func (bl *Map) find(blob Blob, checkSize bool) (int, Blob, error) {
func (bl *Map) find(blob server.Blob, checkSize bool) (int, server.Blob, error) {
pos := sort.Search(len(bl.list), func(i int) bool {
return blob.ID.Compare(bl.list[i].ID) >= 0
})
@ -36,10 +36,10 @@ func (bl *Map) find(blob Blob, checkSize bool) (int, Blob, error) {
}
}
return pos, Blob{}, ErrBlobNotFound
return pos, server.Blob{}, ErrBlobNotFound
}
func (bl *Map) Find(blob Blob) (Blob, error) {
func (bl *Map) Find(blob server.Blob) (server.Blob, error) {
bl.m.Lock()
defer bl.m.Unlock()
@ -47,11 +47,11 @@ func (bl *Map) Find(blob Blob) (Blob, error) {
return blob, err
}
func (bl *Map) FindID(id backend.ID) (Blob, error) {
func (bl *Map) FindID(id backend.ID) (server.Blob, error) {
bl.m.Lock()
defer bl.m.Unlock()
_, blob, err := bl.find(Blob{ID: id}, false)
_, blob, err := bl.find(server.Blob{ID: id}, false)
return blob, err
}
@ -66,7 +66,7 @@ func (bl *Map) Merge(other *Map) {
}
}
func (bl *Map) insert(blob Blob) Blob {
func (bl *Map) insert(blob server.Blob) server.Blob {
pos, b, err := bl.find(blob, true)
if err == nil {
// already present
@ -75,14 +75,14 @@ func (bl *Map) insert(blob Blob) Blob {
// insert blob
// https://code.google.com/p/go-wiki/wiki/SliceTricks
bl.list = append(bl.list, Blob{})
bl.list = append(bl.list, server.Blob{})
copy(bl.list[pos+1:], bl.list[pos:])
bl.list[pos] = blob
return blob
}
func (bl *Map) Insert(blob Blob) Blob {
func (bl *Map) Insert(blob server.Blob) server.Blob {
bl.m.Lock()
defer bl.m.Unlock()
@ -152,7 +152,7 @@ func (bl *Map) Equals(other *Map) bool {
// Each calls f for each blob in the Map. While Each is running, no other
// operation is possible, since a mutex is held for the whole time.
func (bl *Map) Each(f func(blob Blob)) {
func (bl *Map) Each(f func(blob server.Blob)) {
bl.m.Lock()
defer bl.m.Unlock()
@ -162,13 +162,13 @@ func (bl *Map) Each(f func(blob Blob)) {
}
// Select returns a list of of blobs from the plaintext IDs given in list.
func (bl *Map) Select(list backend.IDs) (Blobs, error) {
func (bl *Map) Select(list backend.IDs) (server.Blobs, error) {
bl.m.Lock()
defer bl.m.Unlock()
blobs := make(Blobs, 0, len(list))
blobs := make(server.Blobs, 0, len(list))
for _, id := range list {
_, blob, err := bl.find(Blob{ID: id}, false)
_, blob, err := bl.find(server.Blob{ID: id}, false)
if err != nil {
return nil, err
}
@ -210,27 +210,10 @@ func (m *Map) DeleteID(id backend.ID) {
m.m.Lock()
defer m.m.Unlock()
pos, _, err := m.find(Blob{ID: id}, false)
pos, _, err := m.find(server.Blob{ID: id}, false)
if err != nil {
return
}
m.list = append(m.list[:pos], m.list[pos+1:]...)
}
// Compare compares two blobs by comparing the ID and the size. It returns -1,
// 0, or 1.
func (blob Blob) Compare(other Blob) int {
if res := bytes.Compare(other.ID, blob.ID); res != 0 {
return res
}
if blob.Size < other.Size {
return -1
}
if blob.Size > other.Size {
return 1
}
return 0
}

View File

@ -12,6 +12,7 @@ import (
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/server"
. "github.com/restic/restic/test"
)
@ -26,8 +27,8 @@ func randomID() []byte {
return buf
}
func newBlob() restic.Blob {
return restic.Blob{
func newBlob() server.Blob {
return server.Blob{
ID: randomID(),
Size: uint64(mrand.Uint32()),
Storage: randomID(),
@ -46,7 +47,7 @@ func TestMap(t *testing.T) {
bl.Insert(newBlob())
}
b2, err := bl.Find(restic.Blob{ID: b.ID, Size: b.Size})
b2, err := bl.Find(server.Blob{ID: b.ID, Size: b.Size})
OK(t, err)
Assert(t, b2.Compare(b) == 0, "items are not equal: want %v, got %v", b, b2)
@ -78,7 +79,7 @@ func TestMap(t *testing.T) {
// Test JSON encode/decode
func TestMapJSON(t *testing.T) {
bl := restic.NewMap()
b := restic.Blob{ID: randomID()}
b := server.Blob{ID: randomID()}
bl.Insert(b)
b2, err := bl.Find(b)

View File

@ -10,6 +10,7 @@ import (
"github.com/juju/arrar"
"github.com/restic/restic/backend"
"github.com/restic/restic/server"
)
type Node struct {
@ -37,7 +38,7 @@ type Node struct {
path string
err error
blobs Blobs
blobs server.Blobs
}
func (n Node) String() string {
@ -95,7 +96,7 @@ func nodeTypeFromFileInfo(path string, fi os.FileInfo) string {
return ""
}
func CreateNodeAt(node *Node, m *Map, s Server, path string) error {
func CreateNodeAt(node *Node, m *Map, s *server.Server, path string) error {
switch node.Type {
case "dir":
err := os.Mkdir(path, node.Mode)

View File

@ -9,10 +9,11 @@ import (
"github.com/juju/arrar"
"github.com/restic/restic/backend"
"github.com/restic/restic/server"
)
type Restorer struct {
s Server
s *server.Server
sn *Snapshot
Error func(dir string, node *Node, err error) error
@ -20,7 +21,7 @@ type Restorer struct {
}
// NewRestorer creates a restorer preloaded with the content from the snapshot snid.
func NewRestorer(s Server, snid backend.ID) (*Restorer, error) {
func NewRestorer(s *server.Server, snid backend.ID) (*Restorer, error) {
r := &Restorer{s: s}
var err error
@ -36,7 +37,7 @@ func NewRestorer(s Server, snid backend.ID) (*Restorer, error) {
return r, nil
}
func (res *Restorer) to(dst string, dir string, treeBlob Blob) error {
func (res *Restorer) to(dst string, dir string, treeBlob server.Blob) error {
tree, err := LoadTree(res.s, treeBlob)
if err != nil {
return res.Error(dir, nil, arrar.Annotate(err, "LoadTree"))

View File

@ -1,6 +1,7 @@
package restic
package server
import (
"bytes"
"fmt"
"github.com/restic/restic/backend"
@ -28,3 +29,20 @@ func (b Blob) String() string {
b.ID.Str(), b.Size,
b.Storage.Str(), b.StorageSize)
}
// Compare compares two blobs by comparing the ID and the size. It returns -1,
// 0, or 1.
func (blob Blob) Compare(other Blob) int {
if res := bytes.Compare(other.ID, blob.ID); res != 0 {
return res
}
if blob.Size < other.Size {
return -1
}
if blob.Size > other.Size {
return 1
}
return 0
}

View File

@ -1,4 +1,4 @@
package restic
package server
import (
"crypto/rand"
@ -52,12 +52,12 @@ type Key struct {
// CreateKey initializes a master key in the given backend and encrypts it with
// the password.
func CreateKey(s Server, password string) (*Key, error) {
func CreateKey(s *Server, password string) (*Key, error) {
return AddKey(s, password, nil)
}
// OpenKey tries do decrypt the key specified by name with the given password.
func OpenKey(s Server, name string, password string) (*Key, error) {
func OpenKey(s *Server, name string, password string) (*Key, error) {
k, err := LoadKey(s, name)
if err != nil {
return nil, err
@ -104,7 +104,7 @@ func OpenKey(s Server, name string, password string) (*Key, error) {
// SearchKey tries to decrypt all keys in the backend with the given password.
// If none could be found, ErrNoKeyFound is returned.
func SearchKey(s Server, password string) (*Key, error) {
func SearchKey(s *Server, password string) (*Key, error) {
// try all keys in repo
done := make(chan struct{})
defer close(done)
@ -121,7 +121,7 @@ func SearchKey(s Server, password string) (*Key, error) {
}
// LoadKey loads a key from the backend.
func LoadKey(s Server, name string) (*Key, error) {
func LoadKey(s *Server, name string) (*Key, error) {
// extract data from repo
rd, err := s.be.Get(backend.Key, name)
if err != nil {
@ -141,7 +141,7 @@ func LoadKey(s Server, name string) (*Key, error) {
}
// AddKey adds a new key to an already existing repository.
func AddKey(s Server, password string, template *Key) (*Key, error) {
func AddKey(s *Server, password string, template *Key) (*Key, error) {
// fill meta data about key
newkey := &Key{
Created: time.Now(),
@ -196,7 +196,7 @@ func AddKey(s Server, password string, template *Key) (*Key, error) {
return nil, err
}
newkey.Data, err = crypto.Encrypt(newkey.user, GetChunkBuf("key"), buf)
newkey.Data, err = crypto.Encrypt(newkey.user, nil, buf)
// dump as json
buf, err = json.Marshal(newkey)
@ -226,8 +226,6 @@ func AddKey(s Server, password string, template *Key) (*Key, error) {
newkey.name = name
FreeChunkBuf("key", newkey.Data)
return newkey, nil
}

13
server/key_test.go Normal file
View File

@ -0,0 +1,13 @@
package server_test
import (
"testing"
. "github.com/restic/restic/test"
)
func TestRepo(t *testing.T) {
s := SetupBackend(t)
defer TeardownBackend(t, s)
_ = SetupKey(t, s, TestPassword)
}

21
server/pool.go Normal file
View File

@ -0,0 +1,21 @@
package server
import (
"sync"
"github.com/restic/restic/chunker"
)
var bufPool = sync.Pool{
New: func() interface{} {
return make([]byte, chunker.MinSize)
},
}
func getBuf() []byte {
return bufPool.Get().([]byte)
}
func freeBuf(data []byte) {
bufPool.Put(data)
}

View File

@ -1,4 +1,4 @@
package restic
package server
import (
"crypto/sha256"
@ -7,12 +7,9 @@ import (
"fmt"
"io"
"io/ioutil"
"sync"
"github.com/restic/restic/backend"
"github.com/restic/restic/chunker"
"github.com/restic/restic/crypto"
"github.com/restic/restic/debug"
)
type Server struct {
@ -20,8 +17,8 @@ type Server struct {
key *Key
}
func NewServer(be backend.Backend) Server {
return Server{be: be}
func NewServer(be backend.Backend) *Server {
return &Server{be: be}
}
func (s *Server) SetKey(k *Key) {
@ -36,19 +33,19 @@ func (s *Server) ChunkerPolynomial() chunker.Pol {
// Find loads the list of all blobs of type t and searches for names which start
// with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
// more than one is found, nil and ErrMultipleIDMatches is returned.
func (s Server) Find(t backend.Type, prefix string) (string, error) {
func (s *Server) Find(t backend.Type, prefix string) (string, error) {
return backend.Find(s.be, t, prefix)
}
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
// the string as closely as possible.
func (s Server) FindSnapshot(name string) (string, error) {
func (s *Server) FindSnapshot(name string) (string, error) {
return backend.FindSnapshot(s.be, name)
}
// PrefixLength returns the number of bytes required so that all prefixes of
// all IDs of type t are unique.
func (s Server) PrefixLength(t backend.Type) (int, error) {
func (s *Server) PrefixLength(t backend.Type) (int, error) {
return backend.PrefixLength(s.be, t)
}
@ -56,7 +53,7 @@ func (s Server) PrefixLength(t backend.Type) (int, error) {
// backend. If the blob specifies an ID, the decrypted plaintext is checked
// against this ID. The same goes for blob.Size and blob.StorageSize: If they
// are set to a value > 0, this value is checked.
func (s Server) Load(t backend.Type, blob Blob) ([]byte, error) {
func (s *Server) Load(t backend.Type, blob Blob) ([]byte, error) {
// load data
rd, err := s.be.Get(t, blob.Storage.String())
if err != nil {
@ -101,13 +98,13 @@ func (s Server) Load(t backend.Type, blob Blob) ([]byte, error) {
}
// Load tries to load and decrypt content identified by t and id from the backend.
func (s Server) LoadID(t backend.Type, storageID backend.ID) ([]byte, error) {
func (s *Server) LoadID(t backend.Type, storageID backend.ID) ([]byte, error) {
return s.Load(t, Blob{Storage: storageID})
}
// LoadJSON calls Load() to get content from the backend and afterwards calls
// json.Unmarshal on the item.
func (s Server) LoadJSON(t backend.Type, blob Blob, item interface{}) error {
func (s *Server) LoadJSON(t backend.Type, blob Blob, item interface{}) error {
buf, err := s.Load(t, blob)
if err != nil {
return err
@ -118,7 +115,7 @@ func (s Server) LoadJSON(t backend.Type, blob Blob, item interface{}) error {
// LoadJSONID calls Load() to get content from the backend and afterwards calls
// json.Unmarshal on the item.
func (s Server) LoadJSONID(t backend.Type, id backend.ID, item interface{}) error {
func (s *Server) LoadJSONID(t backend.Type, id backend.ID, item interface{}) error {
// read
rd, err := s.be.Get(t, id.String())
if err != nil {
@ -144,7 +141,7 @@ func (s Server) LoadJSONID(t backend.Type, id backend.ID, item interface{}) erro
}
// Save encrypts data and stores it to the backend as type t.
func (s Server) Save(t backend.Type, data []byte, id backend.ID) (Blob, error) {
func (s *Server) Save(t backend.Type, data []byte, id backend.ID) (Blob, error) {
if id == nil {
// compute plaintext hash
id = backend.Hash(data)
@ -156,20 +153,8 @@ func (s Server) Save(t backend.Type, data []byte, id backend.ID) (Blob, error) {
Size: uint64(len(data)),
}
var ciphertext []byte
// if the data is small enough, use a slice from the pool
if len(data) <= maxCiphertextSize-crypto.Extension {
ciphertext = GetChunkBuf("ch.Save()")
defer FreeChunkBuf("ch.Save()", ciphertext)
} else {
l := len(data) + crypto.Extension
debug.Log("Server.Save", "create large slice of %d bytes for ciphertext", l)
// use a new slice
ciphertext = make([]byte, l)
}
ciphertext := getBuf()
defer freeBuf(ciphertext)
// encrypt blob
ciphertext, err := s.Encrypt(ciphertext, data)
@ -203,7 +188,7 @@ func (s Server) Save(t backend.Type, data []byte, id backend.ID) (Blob, error) {
}
// SaveFrom encrypts data read from rd and stores it to the backend as type t.
func (s Server) SaveFrom(t backend.Type, id backend.ID, length uint, rd io.Reader) (Blob, error) {
func (s *Server) SaveFrom(t backend.Type, id backend.ID, length uint, rd io.Reader) (Blob, error) {
if id == nil {
return Blob{}, errors.New("id is nil")
}
@ -244,7 +229,7 @@ func (s Server) SaveFrom(t backend.Type, id backend.ID, length uint, rd io.Reade
// SaveJSON serialises item as JSON and encrypts and saves it in the backend as
// type t.
func (s Server) SaveJSON(t backend.Type, item interface{}) (Blob, error) {
func (s *Server) SaveJSON(t backend.Type, item interface{}) (Blob, error) {
backendBlob, err := s.be.Create()
if err != nil {
return Blob{}, fmt.Errorf("Create: %v", err)
@ -284,12 +269,12 @@ func (s Server) SaveJSON(t backend.Type, item interface{}) (Blob, error) {
}
// Returns the backend used for this server.
func (s Server) Backend() backend.Backend {
func (s *Server) Backend() backend.Backend {
return s.be
}
func (s *Server) SearchKey(password string) error {
key, err := SearchKey(*s, password)
key, err := SearchKey(s, password)
if err != nil {
return err
}
@ -299,7 +284,7 @@ func (s *Server) SearchKey(password string) error {
return nil
}
func (s Server) Decrypt(ciphertext []byte) ([]byte, error) {
func (s *Server) Decrypt(ciphertext []byte) ([]byte, error) {
if s.key == nil {
return nil, errors.New("key for server not set")
}
@ -307,7 +292,7 @@ func (s Server) Decrypt(ciphertext []byte) ([]byte, error) {
return s.key.Decrypt([]byte{}, ciphertext)
}
func (s Server) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
func (s *Server) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
if s.key == nil {
return nil, errors.New("key for server not set")
}
@ -315,67 +300,12 @@ func (s Server) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
return s.key.Encrypt(ciphertext, plaintext)
}
func (s Server) Key() *Key {
func (s *Server) Key() *Key {
return s.key
}
type ServerStats struct {
Blobs, Trees uint
}
// Stats returns statistics for this backend and the server.
func (s Server) Stats() (ServerStats, error) {
blobs := backend.NewIDSet()
// load all trees, in parallel
worker := func(wg *sync.WaitGroup, b <-chan Blob) {
for blob := range b {
tree, err := LoadTree(s, blob)
// ignore error and advance to next tree
if err != nil {
return
}
for _, id := range tree.Map.StorageIDs() {
blobs.Insert(id)
}
}
wg.Done()
}
blobCh := make(chan Blob)
// start workers
var wg sync.WaitGroup
for i := 0; i < maxConcurrency; i++ {
wg.Add(1)
go worker(&wg, blobCh)
}
// list ids
trees := 0
done := make(chan struct{})
defer close(done)
for name := range s.List(backend.Tree, done) {
trees++
id, err := backend.ParseID(name)
if err != nil {
debug.Log("Server.Stats", "unable to parse name %v as id: %v", name, err)
continue
}
blobCh <- Blob{Storage: id}
}
close(blobCh)
// wait for workers
wg.Wait()
return ServerStats{Blobs: uint(blobs.Len()), Trees: uint(trees)}, nil
}
// Count returns the number of blobs of a given type in the backend.
func (s Server) Count(t backend.Type) (n int) {
func (s *Server) Count(t backend.Type) (n int) {
for _ = range s.List(t, nil) {
n++
}
@ -385,23 +315,27 @@ func (s Server) Count(t backend.Type) (n int) {
// Proxy methods to backend
func (s Server) List(t backend.Type, done <-chan struct{}) <-chan string {
func (s *Server) Get(t backend.Type, name string) (io.ReadCloser, error) {
return s.be.Get(t, name)
}
func (s *Server) List(t backend.Type, done <-chan struct{}) <-chan string {
return s.be.List(t, done)
}
func (s Server) Test(t backend.Type, name string) (bool, error) {
func (s *Server) Test(t backend.Type, name string) (bool, error) {
return s.be.Test(t, name)
}
func (s Server) Remove(t backend.Type, name string) error {
func (s *Server) Remove(t backend.Type, name string) error {
return s.be.Remove(t, name)
}
func (s Server) Close() error {
func (s *Server) Close() error {
return s.be.Close()
}
func (s Server) Delete() error {
func (s *Server) Delete() error {
if b, ok := s.be.(backend.Deleter); ok {
return b.Delete()
}
@ -409,10 +343,10 @@ func (s Server) Delete() error {
return errors.New("Delete() called for backend that does not implement this method")
}
func (s Server) ID() string {
func (s *Server) ID() string {
return s.be.ID()
}
func (s Server) Location() string {
func (s *Server) Location() string {
return s.be.Location()
}

View File

@ -1,10 +1,11 @@
package restic_test
package server_test
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"encoding/json"
"flag"
"io"
"testing"
@ -13,6 +14,8 @@ import (
. "github.com/restic/restic/test"
)
var benchTestDir = flag.String("test.dir", ".", "dir used in benchmarks (default: .)")
type testJSONStruct struct {
Foo uint32
Bar string
@ -24,9 +27,9 @@ var serverTests = []testJSONStruct{
}
func TestSaveJSON(t *testing.T) {
server := setupBackend(t)
defer teardownBackend(t, server)
key := setupKey(t, server, "geheim")
server := SetupBackend(t)
defer TeardownBackend(t, server)
key := SetupKey(t, server, "geheim")
server.SetKey(key)
for _, obj := range serverTests {
@ -45,9 +48,9 @@ func TestSaveJSON(t *testing.T) {
}
func BenchmarkSaveJSON(t *testing.B) {
server := setupBackend(t)
defer teardownBackend(t, server)
key := setupKey(t, server, "geheim")
server := SetupBackend(t)
defer TeardownBackend(t, server)
key := SetupKey(t, server, "geheim")
server.SetKey(key)
obj := serverTests[0]
@ -72,9 +75,9 @@ func BenchmarkSaveJSON(t *testing.B) {
var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20}
func TestSaveFrom(t *testing.T) {
server := setupBackend(t)
defer teardownBackend(t, server)
key := setupKey(t, server, "geheim")
server := SetupBackend(t)
defer TeardownBackend(t, server)
key := SetupKey(t, server, "geheim")
server.SetKey(key)
for _, size := range testSizes {
@ -102,9 +105,9 @@ func TestSaveFrom(t *testing.T) {
}
func BenchmarkSaveFrom(t *testing.B) {
server := setupBackend(t)
defer teardownBackend(t, server)
key := setupKey(t, server, "geheim")
server := SetupBackend(t)
defer TeardownBackend(t, server)
key := SetupKey(t, server, "geheim")
server.SetKey(key)
size := 4 << 20 // 4MiB
@ -125,37 +128,18 @@ func BenchmarkSaveFrom(t *testing.B) {
}
}
func TestServerStats(t *testing.T) {
if *benchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestServerStats")
}
server := setupBackend(t)
defer teardownBackend(t, server)
key := setupKey(t, server, "geheim")
server.SetKey(key)
// archive a few files
sn := snapshot(t, server, *benchArchiveDirectory, nil)
t.Logf("archived snapshot %v", sn.ID())
stats, err := server.Stats()
OK(t, err)
t.Logf("stats: %v", stats)
}
func TestLoadJSONID(t *testing.T) {
if *benchArchiveDirectory == "" {
if *benchTestDir == "" {
t.Skip("benchdir not set, skipping TestServerStats")
}
server := setupBackend(t)
defer teardownBackend(t, server)
key := setupKey(t, server, "geheim")
server := SetupBackend(t)
defer TeardownBackend(t, server)
key := SetupKey(t, server, "geheim")
server.SetKey(key)
// archive a few files
sn := snapshot(t, server, *benchArchiveDirectory, nil)
sn := SnapshotDir(t, server, *benchTestDir, nil)
t.Logf("archived snapshot %v", sn.ID())
// benchmark loading first tree
@ -173,17 +157,17 @@ func TestLoadJSONID(t *testing.T) {
}
func BenchmarkLoadJSONID(t *testing.B) {
if *benchArchiveDirectory == "" {
if *benchTestDir == "" {
t.Skip("benchdir not set, skipping TestServerStats")
}
server := setupBackend(t)
defer teardownBackend(t, server)
key := setupKey(t, server, "geheim")
server := SetupBackend(t)
defer TeardownBackend(t, server)
key := SetupKey(t, server, "geheim")
server.SetKey(key)
// archive a few files
sn := snapshot(t, server, *benchArchiveDirectory, nil)
sn := SnapshotDir(t, server, *benchTestDir, nil)
t.Logf("archived snapshot %v", sn.ID())
t.ResetTimer()

View File

@ -7,12 +7,13 @@ import (
"time"
"github.com/restic/restic/backend"
"github.com/restic/restic/server"
)
type Snapshot struct {
Time time.Time `json:"time"`
Parent backend.ID `json:"parent,omitempty"`
Tree Blob `json:"tree"`
Tree server.Blob `json:"tree"`
Paths []string `json:"paths"`
Hostname string `json:"hostname,omitempty"`
Username string `json:"username,omitempty"`
@ -49,7 +50,7 @@ func NewSnapshot(paths []string) (*Snapshot, error) {
return sn, nil
}
func LoadSnapshot(s Server, id backend.ID) (*Snapshot, error) {
func LoadSnapshot(s *server.Server, id backend.ID) (*Snapshot, error) {
sn := &Snapshot{id: id}
err := s.LoadJSONID(backend.Snapshot, id, sn)
if err != nil {

View File

@ -5,10 +5,11 @@ import (
"time"
"github.com/restic/restic"
"github.com/restic/restic/server"
. "github.com/restic/restic/test"
)
func testSnapshot(t *testing.T, s restic.Server) {
func testSnapshot(t *testing.T, s *server.Server) {
var err error
sn, err := restic.NewSnapshot([]string{"/home/foobar"})
OK(t, err)
@ -22,8 +23,8 @@ func testSnapshot(t *testing.T, s restic.Server) {
}
func TestSnapshot(t *testing.T) {
repo := setupBackend(t)
defer teardownBackend(t, repo)
s := SetupBackend(t)
defer TeardownBackend(t, s)
testSnapshot(t, repo)
testSnapshot(t, s)
}

59
test/backend.go Normal file
View File

@ -0,0 +1,59 @@
package test_helper
import (
"flag"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/backend/local"
"github.com/restic/restic/server"
)
var TestPassword = "foobar"
var TestCleanup = flag.Bool("test.cleanup", true, "clean up after running tests (remove local backend directory with all content)")
var TestTempDir = flag.String("test.tempdir", "", "use this directory for temporary storage (default: system temp dir)")
func SetupBackend(t testing.TB) *server.Server {
tempdir, err := ioutil.TempDir(*TestTempDir, "restic-test-")
OK(t, err)
// create repository below temp dir
b, err := local.Create(filepath.Join(tempdir, "repo"))
OK(t, err)
// set cache dir
err = os.Setenv("RESTIC_CACHE", filepath.Join(tempdir, "cache"))
OK(t, err)
return server.NewServer(b)
}
func TeardownBackend(t testing.TB, s *server.Server) {
if !*TestCleanup {
l := s.Backend().(*local.Local)
t.Logf("leaving local backend at %s\n", l.Location())
return
}
OK(t, s.Delete())
}
func SetupKey(t testing.TB, s *server.Server, password string) *server.Key {
k, err := server.CreateKey(s, password)
OK(t, err)
return k
}
func SnapshotDir(t testing.TB, server *server.Server, path string, parent backend.ID) *restic.Snapshot {
arch, err := restic.NewArchiver(server)
OK(t, err)
OK(t, arch.Preload())
sn, _, err := arch.Snapshot(nil, []string{path}, parent)
OK(t, err)
return sn
}

View File

@ -7,6 +7,7 @@ import (
"github.com/restic/restic/backend"
"github.com/restic/restic/debug"
"github.com/restic/restic/server"
)
type Tree struct {
@ -30,7 +31,7 @@ func (t Tree) String() string {
return fmt.Sprintf("Tree<%d nodes, %d blobs>", len(t.Nodes), len(t.Map.list))
}
func LoadTree(s Server, blob Blob) (*Tree, error) {
func LoadTree(s *server.Server, blob server.Blob) (*Tree, error) {
tree := &Tree{}
err := s.LoadJSON(backend.Tree, blob, tree)
if err != nil {

View File

@ -22,7 +22,7 @@ var testFiles = []struct {
// prepareDir creates a temporary directory and returns it.
func prepareDir(t *testing.T) string {
tempdir, err := ioutil.TempDir(*testTempDir, "restic-test-")
tempdir, err := ioutil.TempDir(*TestTempDir, "restic-test-")
OK(t, err)
for _, test := range testFiles {
@ -49,7 +49,7 @@ func prepareDir(t *testing.T) string {
func TestTree(t *testing.T) {
dir := prepareDir(t)
defer func() {
if *testCleanup {
if *TestCleanup {
OK(t, os.RemoveAll(dir))
}
}()

View File

@ -4,6 +4,7 @@ import (
"path/filepath"
"github.com/restic/restic/debug"
"github.com/restic/restic/server"
)
type WalkTreeJob struct {
@ -14,7 +15,7 @@ type WalkTreeJob struct {
Tree *Tree
}
func walkTree(s Server, path string, treeBlob Blob, done chan struct{}, jobCh chan<- WalkTreeJob) {
func walkTree(s *server.Server, path string, treeBlob server.Blob, done chan struct{}, jobCh chan<- WalkTreeJob) {
debug.Log("walkTree", "start on %q (%v)", path, treeBlob)
// load tree
t, err := LoadTree(s, treeBlob)
@ -49,7 +50,7 @@ func walkTree(s Server, path string, treeBlob Blob, done chan struct{}, jobCh ch
// WalkTree walks the tree specified by ID recursively and sends a job for each
// file and directory it finds. When the channel done is closed, processing
// stops.
func WalkTree(server Server, blob Blob, done chan struct{}, jobCh chan<- WalkTreeJob) {
func WalkTree(server *server.Server, blob server.Blob, done chan struct{}, jobCh chan<- WalkTreeJob) {
debug.Log("WalkTree", "start on %v", blob)
walkTree(server, "", blob, done, jobCh)
close(jobCh)

View File

@ -16,9 +16,9 @@ func TestWalkTree(t *testing.T) {
dirs, err := filepath.Glob(*testWalkDirectory)
OK(t, err)
server := setupBackend(t)
defer teardownBackend(t, server)
key := setupKey(t, server, "geheim")
server := SetupBackend(t)
defer TeardownBackend(t, server)
key := SetupKey(t, server, "geheim")
server.SetKey(key)
// archive a few files