mirror of
https://github.com/octoleo/restic.git
synced 2024-11-22 21:05:10 +00:00
Merge branch 'add_config'
This commit is contained in:
commit
c57c4f0b8f
@ -184,7 +184,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
|
||||
}
|
||||
|
||||
chnker := GetChunker("archiver.SaveFile")
|
||||
chnker.Reset(file, arch.s.ChunkerPolynomial())
|
||||
chnker.Reset(file, arch.s.Config.ChunkerPolynomial)
|
||||
resultChannels := [](<-chan saveResult){}
|
||||
defer FreeChunker("archiver.SaveFile", chnker)
|
||||
|
||||
|
@ -9,8 +9,8 @@ import (
|
||||
"github.com/restic/restic"
|
||||
"github.com/restic/restic/backend"
|
||||
"github.com/restic/restic/chunker"
|
||||
"github.com/restic/restic/crypto"
|
||||
"github.com/restic/restic/pack"
|
||||
"github.com/restic/restic/server"
|
||||
. "github.com/restic/restic/test"
|
||||
)
|
||||
|
||||
@ -24,7 +24,7 @@ type Rdr interface {
|
||||
io.ReaderAt
|
||||
}
|
||||
|
||||
func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *server.Key) {
|
||||
func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.Key) {
|
||||
ch := restic.GetChunker("BenchmarkChunkEncrypt")
|
||||
rd.Seek(0, 0)
|
||||
ch.Reset(rd, testPol)
|
||||
@ -44,7 +44,7 @@ func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *server.K
|
||||
OK(b, err)
|
||||
Assert(b, uint(n) == chunk.Length, "invalid length: got %d, expected %d", n, chunk.Length)
|
||||
|
||||
_, err = key.Encrypt(buf2, buf)
|
||||
_, err = crypto.Encrypt(key, buf2, buf)
|
||||
OK(b, err)
|
||||
}
|
||||
|
||||
@ -55,9 +55,8 @@ func BenchmarkChunkEncrypt(b *testing.B) {
|
||||
data := Random(23, 10<<20) // 10MiB
|
||||
rd := bytes.NewReader(data)
|
||||
|
||||
be := SetupBackend(b)
|
||||
defer TeardownBackend(b, be)
|
||||
key := SetupKey(b, be, "geheim")
|
||||
s := SetupBackend(b)
|
||||
defer TeardownBackend(b, s)
|
||||
|
||||
buf := restic.GetChunkBuf("BenchmarkChunkEncrypt")
|
||||
buf2 := restic.GetChunkBuf("BenchmarkChunkEncrypt")
|
||||
@ -66,14 +65,14 @@ func BenchmarkChunkEncrypt(b *testing.B) {
|
||||
b.SetBytes(int64(len(data)))
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
benchmarkChunkEncrypt(b, buf, buf2, rd, key)
|
||||
benchmarkChunkEncrypt(b, buf, buf2, rd, s.Key())
|
||||
}
|
||||
|
||||
restic.FreeChunkBuf("BenchmarkChunkEncrypt", buf)
|
||||
restic.FreeChunkBuf("BenchmarkChunkEncrypt", buf2)
|
||||
}
|
||||
|
||||
func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *server.Key) {
|
||||
func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key) {
|
||||
ch := restic.GetChunker("BenchmarkChunkEncryptP")
|
||||
rd.Seek(0, 0)
|
||||
ch.Reset(rd, testPol)
|
||||
@ -87,16 +86,15 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *server.Key)
|
||||
// reduce length of chunkBuf
|
||||
buf = buf[:chunk.Length]
|
||||
io.ReadFull(chunk.Reader(rd), buf)
|
||||
key.Encrypt(buf, buf)
|
||||
crypto.Encrypt(key, buf, buf)
|
||||
}
|
||||
|
||||
restic.FreeChunker("BenchmarkChunkEncryptP", ch)
|
||||
}
|
||||
|
||||
func BenchmarkChunkEncryptParallel(b *testing.B) {
|
||||
be := SetupBackend(b)
|
||||
defer TeardownBackend(b, be)
|
||||
key := SetupKey(b, be, "geheim")
|
||||
s := SetupBackend(b)
|
||||
defer TeardownBackend(b, s)
|
||||
|
||||
data := Random(23, 10<<20) // 10MiB
|
||||
|
||||
@ -108,7 +106,7 @@ func BenchmarkChunkEncryptParallel(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
rd := bytes.NewReader(data)
|
||||
benchmarkChunkEncryptP(pb, buf, rd, key)
|
||||
benchmarkChunkEncryptP(pb, buf, rd, s.Key())
|
||||
}
|
||||
})
|
||||
|
||||
@ -118,8 +116,6 @@ func BenchmarkChunkEncryptParallel(b *testing.B) {
|
||||
func archiveDirectory(b testing.TB) {
|
||||
server := SetupBackend(b)
|
||||
defer TeardownBackend(b, server)
|
||||
key := SetupKey(b, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
arch := restic.NewArchiver(server)
|
||||
|
||||
@ -154,8 +150,6 @@ func archiveWithDedup(t testing.TB) {
|
||||
|
||||
server := SetupBackend(t)
|
||||
defer TeardownBackend(t, server)
|
||||
key := SetupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
var cnt struct {
|
||||
before, after, after2 struct {
|
||||
@ -220,8 +214,6 @@ func BenchmarkLoadTree(t *testing.B) {
|
||||
|
||||
s := SetupBackend(t)
|
||||
defer TeardownBackend(t, s)
|
||||
key := SetupKey(t, s, "geheim")
|
||||
s.SetKey(key)
|
||||
|
||||
// archive a few files
|
||||
arch := restic.NewArchiver(s)
|
||||
|
@ -11,10 +11,7 @@ const (
|
||||
Lock = "lock"
|
||||
Snapshot = "snapshot"
|
||||
Index = "index"
|
||||
)
|
||||
|
||||
const (
|
||||
Version = 1
|
||||
Config = "config"
|
||||
)
|
||||
|
||||
// A Backend manages data stored somewhere.
|
||||
@ -43,17 +40,9 @@ type Backend interface {
|
||||
// Close the backend
|
||||
Close() error
|
||||
|
||||
Identifier
|
||||
Lister
|
||||
}
|
||||
|
||||
type Identifier interface {
|
||||
// ID returns a unique ID for a specific repository. This means restic can
|
||||
// recognize repositories accessed via different methods (e.g. local file
|
||||
// access and sftp).
|
||||
ID() string
|
||||
}
|
||||
|
||||
type Lister interface {
|
||||
// List returns a channel that yields all names of blobs of type t in
|
||||
// lexicographic order. A goroutine is started for this. If the channel
|
||||
|
@ -1,9 +1,6 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -11,7 +8,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/restic/restic/backend"
|
||||
)
|
||||
@ -19,9 +15,7 @@ import (
|
||||
var ErrWrongData = errors.New("wrong data returned by backend, checksum does not match")
|
||||
|
||||
type Local struct {
|
||||
p string
|
||||
ver uint
|
||||
id string
|
||||
p string
|
||||
}
|
||||
|
||||
// Open opens the local backend at dir.
|
||||
@ -36,68 +30,19 @@ func Open(dir string) (*Local, error) {
|
||||
filepath.Join(dir, backend.Paths.Temp),
|
||||
}
|
||||
|
||||
// test if all necessary dirs and files are there
|
||||
// test if all necessary dirs are there
|
||||
for _, d := range items {
|
||||
if _, err := os.Stat(d); err != nil {
|
||||
return nil, fmt.Errorf("%s does not exist", d)
|
||||
}
|
||||
}
|
||||
|
||||
// read version file
|
||||
f, err := os.Open(filepath.Join(dir, backend.Paths.Version))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read version file: %v\n", err)
|
||||
}
|
||||
|
||||
var version uint
|
||||
n, err := fmt.Fscanf(f, "%d", &version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if n != 1 {
|
||||
return nil, errors.New("could not read version from file")
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check version
|
||||
if version != backend.Version {
|
||||
return nil, fmt.Errorf("wrong version %d", version)
|
||||
}
|
||||
|
||||
// read ID
|
||||
f, err = os.Open(filepath.Join(dir, backend.Paths.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id := strings.TrimSpace(string(buf))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Local{p: dir, ver: version, id: id}, nil
|
||||
return &Local{p: dir}, nil
|
||||
}
|
||||
|
||||
// Create creates all the necessary files and directories for a new local
|
||||
// backend at dir.
|
||||
// backend at dir. Afterwards a new config blob should be created.
|
||||
func Create(dir string) (*Local, error) {
|
||||
versionFile := filepath.Join(dir, backend.Paths.Version)
|
||||
idFile := filepath.Join(dir, backend.Paths.ID)
|
||||
dirs := []string{
|
||||
dir,
|
||||
filepath.Join(dir, backend.Paths.Data),
|
||||
@ -108,15 +53,10 @@ func Create(dir string) (*Local, error) {
|
||||
filepath.Join(dir, backend.Paths.Temp),
|
||||
}
|
||||
|
||||
// test if files already exist
|
||||
_, err := os.Lstat(versionFile)
|
||||
// test if config file already exists
|
||||
_, err := os.Lstat(backend.Paths.Config)
|
||||
if err == nil {
|
||||
return nil, errors.New("version file already exists")
|
||||
}
|
||||
|
||||
_, err = os.Lstat(idFile)
|
||||
if err == nil {
|
||||
return nil, errors.New("id file already exists")
|
||||
return nil, errors.New("config file already exists")
|
||||
}
|
||||
|
||||
// test if directories already exist
|
||||
@ -134,44 +74,6 @@ func Create(dir string) (*Local, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// create version file
|
||||
f, err := os.Create(versionFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintf(f, "%d\n", backend.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create ID file
|
||||
id := make([]byte, sha256.Size)
|
||||
_, err = rand.Read(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err = os.Create(idFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(f, hex.EncodeToString(id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// open backend
|
||||
return Open(dir)
|
||||
}
|
||||
@ -265,6 +167,10 @@ func (b *Local) Create() (backend.Blob, error) {
|
||||
|
||||
// Construct path for given Type and name.
|
||||
func filename(base string, t backend.Type, name string) string {
|
||||
if t == backend.Config {
|
||||
return filepath.Join(base, "config")
|
||||
}
|
||||
|
||||
return filepath.Join(dirname(base, t, name), name)
|
||||
}
|
||||
|
||||
@ -376,16 +282,6 @@ func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
return ch
|
||||
}
|
||||
|
||||
// Version returns the version of this local backend.
|
||||
func (b *Local) Version() uint {
|
||||
return b.ver
|
||||
}
|
||||
|
||||
// ID returns the ID of this local backend.
|
||||
func (b *Local) ID() string {
|
||||
return b.id
|
||||
}
|
||||
|
||||
// Delete removes the repository and all files.
|
||||
func (b *Local) Delete() error { return os.RemoveAll(b.p) }
|
||||
|
||||
|
@ -10,8 +10,7 @@ var Paths = struct {
|
||||
Locks string
|
||||
Keys string
|
||||
Temp string
|
||||
Version string
|
||||
ID string
|
||||
Config string
|
||||
}{
|
||||
"data",
|
||||
"snapshots",
|
||||
@ -19,8 +18,7 @@ var Paths = struct {
|
||||
"locks",
|
||||
"keys",
|
||||
"tmp",
|
||||
"version",
|
||||
"id",
|
||||
"config",
|
||||
}
|
||||
|
||||
// Default modes for file-based backends
|
||||
|
@ -2,17 +2,14 @@ package sftp
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/juju/errors"
|
||||
"github.com/pkg/sftp"
|
||||
@ -24,10 +21,8 @@ const (
|
||||
)
|
||||
|
||||
type SFTP struct {
|
||||
c *sftp.Client
|
||||
p string
|
||||
ver uint
|
||||
id string
|
||||
c *sftp.Client
|
||||
p string
|
||||
|
||||
cmd *exec.Cmd
|
||||
}
|
||||
@ -81,8 +76,7 @@ func Open(dir string, program string, args ...string) (*SFTP, error) {
|
||||
filepath.Join(dir, backend.Paths.Index),
|
||||
filepath.Join(dir, backend.Paths.Locks),
|
||||
filepath.Join(dir, backend.Paths.Keys),
|
||||
filepath.Join(dir, backend.Paths.Version),
|
||||
filepath.Join(dir, backend.Paths.ID),
|
||||
filepath.Join(dir, backend.Paths.Temp),
|
||||
}
|
||||
for _, d := range items {
|
||||
if _, err := sftp.c.Lstat(d); err != nil {
|
||||
@ -90,64 +84,18 @@ func Open(dir string, program string, args ...string) (*SFTP, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// read version file
|
||||
f, err := sftp.c.Open(filepath.Join(dir, backend.Paths.Version))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read version file: %v\n", err)
|
||||
}
|
||||
|
||||
var version uint
|
||||
n, err := fmt.Fscanf(f, "%d", &version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if n != 1 {
|
||||
return nil, errors.New("could not read version from file")
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check version
|
||||
if version != backend.Version {
|
||||
return nil, fmt.Errorf("wrong version %d", version)
|
||||
}
|
||||
|
||||
// read ID
|
||||
f, err = sftp.c.Open(filepath.Join(dir, backend.Paths.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sftp.id = strings.TrimSpace(string(buf))
|
||||
sftp.p = dir
|
||||
|
||||
return sftp, nil
|
||||
}
|
||||
|
||||
// Create creates all the necessary files and directories for a new sftp
|
||||
// backend at dir.
|
||||
// backend at dir. Afterwards a new config blob should be created.
|
||||
func Create(dir string, program string, args ...string) (*SFTP, error) {
|
||||
sftp, err := startClient(program, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
versionFile := filepath.Join(dir, backend.Paths.Version)
|
||||
idFile := filepath.Join(dir, backend.Paths.ID)
|
||||
dirs := []string{
|
||||
dir,
|
||||
filepath.Join(dir, backend.Paths.Data),
|
||||
@ -158,15 +106,10 @@ func Create(dir string, program string, args ...string) (*SFTP, error) {
|
||||
filepath.Join(dir, backend.Paths.Temp),
|
||||
}
|
||||
|
||||
// test if files already exist
|
||||
_, err = sftp.c.Lstat(versionFile)
|
||||
// test if config file already exists
|
||||
_, err = sftp.c.Lstat(backend.Paths.Config)
|
||||
if err == nil {
|
||||
return nil, errors.New("version file already exists")
|
||||
}
|
||||
|
||||
_, err = sftp.c.Lstat(idFile)
|
||||
if err == nil {
|
||||
return nil, errors.New("id file already exists")
|
||||
return nil, errors.New("config file already exists")
|
||||
}
|
||||
|
||||
// test if directories already exist
|
||||
@ -184,44 +127,6 @@ func Create(dir string, program string, args ...string) (*SFTP, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// create version file
|
||||
f, err := sftp.c.Create(versionFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintf(f, "%d\n", backend.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create ID file
|
||||
id := make([]byte, sha256.Size)
|
||||
_, err = rand.Read(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err = sftp.c.Create(idFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(f, hex.EncodeToString(id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = sftp.c.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -387,6 +292,10 @@ func (r *SFTP) Create() (backend.Blob, error) {
|
||||
|
||||
// Construct path for given backend.Type and name.
|
||||
func (r *SFTP) filename(t backend.Type, name string) string {
|
||||
if t == backend.Config {
|
||||
return filepath.Join(r.p, "config")
|
||||
}
|
||||
|
||||
return filepath.Join(r.dirname(t, name), name)
|
||||
}
|
||||
|
||||
@ -540,16 +449,6 @@ func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
|
||||
}
|
||||
|
||||
// Version returns the version of this local backend.
|
||||
func (r *SFTP) Version() uint {
|
||||
return r.ver
|
||||
}
|
||||
|
||||
// ID returns the ID of this local backend.
|
||||
func (r *SFTP) ID() string {
|
||||
return r.id
|
||||
}
|
||||
|
||||
// Close closes the sftp connection and terminates the underlying command.
|
||||
func (s *SFTP) Close() error {
|
||||
if s == nil {
|
||||
|
4
cache.go
4
cache.go
@ -18,13 +18,13 @@ type Cache struct {
|
||||
base string
|
||||
}
|
||||
|
||||
func NewCache(be backend.Identifier) (*Cache, error) {
|
||||
func NewCache(s *server.Server) (*Cache, error) {
|
||||
cacheDir, err := getCacheDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
basedir := filepath.Join(cacheDir, be.ID())
|
||||
basedir := filepath.Join(cacheDir, s.Config.ID)
|
||||
debug.Log("Cache.New", "opened cache at %v", basedir)
|
||||
|
||||
return &Cache{base: basedir}, nil
|
||||
|
@ -10,8 +10,6 @@ import (
|
||||
func TestCache(t *testing.T) {
|
||||
server := SetupBackend(t)
|
||||
defer TeardownBackend(t, server)
|
||||
key := SetupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
_, err := restic.NewCache(server)
|
||||
OK(t, err)
|
||||
|
@ -276,6 +276,22 @@ func TestPolIrreducible(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPolIrreducible(b *testing.B) {
|
||||
// find first irreducible polynomial
|
||||
var pol chunker.Pol
|
||||
for _, test := range polIrredTests {
|
||||
if test.irred {
|
||||
pol = test.f
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
Assert(b, pol.Irreducible(),
|
||||
"Irreducibility test for Polynomial %v failed", pol)
|
||||
}
|
||||
}
|
||||
|
||||
var polGCDTests = []struct {
|
||||
f1 chunker.Pol
|
||||
f2 chunker.Pol
|
||||
|
@ -27,11 +27,11 @@ func init() {
|
||||
}
|
||||
|
||||
func (cmd CmdCat) Usage() string {
|
||||
return "[pack|blob|tree|snapshot|key|masterkey|lock] ID"
|
||||
return "[pack|blob|tree|snapshot|key|masterkey|config|lock] ID"
|
||||
}
|
||||
|
||||
func (cmd CmdCat) Execute(args []string) error {
|
||||
if len(args) < 1 || (args[0] != "masterkey" && len(args) != 2) {
|
||||
if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) {
|
||||
return fmt.Errorf("type or ID not specified, Usage: %s", cmd.Usage())
|
||||
}
|
||||
|
||||
@ -43,7 +43,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
tpe := args[0]
|
||||
|
||||
var id backend.ID
|
||||
if tpe != "masterkey" {
|
||||
if tpe != "masterkey" && tpe != "config" {
|
||||
id, err = backend.ParseID(args[1])
|
||||
if err != nil {
|
||||
id = nil
|
||||
@ -67,6 +67,14 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
|
||||
// handle all types that don't need an index
|
||||
switch tpe {
|
||||
case "config":
|
||||
buf, err := json.MarshalIndent(s.Config, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(string(buf))
|
||||
return nil
|
||||
case "index":
|
||||
buf, err := s.Load(backend.Index, id)
|
||||
if err != nil {
|
||||
@ -78,7 +86,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
|
||||
case "snapshot":
|
||||
sn := &restic.Snapshot{}
|
||||
err = s.LoadJSONEncrypted(backend.Snapshot, id, sn)
|
||||
err = s.LoadJSONUnpacked(backend.Snapshot, id, sn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -113,7 +121,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
fmt.Println(string(buf))
|
||||
return nil
|
||||
case "masterkey":
|
||||
buf, err := json.MarshalIndent(s.Key().Master(), "", " ")
|
||||
buf, err := json.MarshalIndent(s.Key(), "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ func listKeys(s *server.Server) error {
|
||||
}
|
||||
|
||||
var current string
|
||||
if name == s.Key().Name() {
|
||||
if name == s.KeyName() {
|
||||
current = "*"
|
||||
} else {
|
||||
current = " "
|
||||
@ -75,7 +75,7 @@ func addKey(s *server.Server) error {
|
||||
}
|
||||
|
||||
func deleteKey(s *server.Server, name string) error {
|
||||
if name == s.Key().Name() {
|
||||
if name == s.KeyName() {
|
||||
return errors.New("refusing to remove key currently used to access repository")
|
||||
}
|
||||
|
||||
@ -103,7 +103,7 @@ func changePassword(s *server.Server) error {
|
||||
}
|
||||
|
||||
// remove old key
|
||||
err = s.Remove(backend.Key, s.Key().Name())
|
||||
err = s.Remove(backend.Key, s.KeyName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -74,14 +74,13 @@ func (cmd CmdInit) Execute(args []string) error {
|
||||
}
|
||||
|
||||
s := server.NewServer(be)
|
||||
|
||||
_, err = server.CreateKey(s, pw)
|
||||
err = s.Init(pw)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "creating key in backend at %s failed: %v\n", opts.Repo, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("created restic backend %v at %s\n", s.ID()[:10], opts.Repo)
|
||||
fmt.Printf("created restic backend %v at %s\n", s.Config.ID[:10], opts.Repo)
|
||||
|
||||
fmt.Println("Please note that knowledge of your password is required to access the repository.")
|
||||
fmt.Println("Losing your password means that your data is irrecoverably lost.")
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/restic/restic/chunker"
|
||||
"golang.org/x/crypto/poly1305"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
)
|
||||
@ -35,12 +34,10 @@ var (
|
||||
|
||||
// Key holds encryption and message authentication keys for a repository. It is stored
|
||||
// encrypted and authenticated as a JSON data structure in the Data field of the Key
|
||||
// structure. For the master key, the secret random polynomial used for content
|
||||
// defined chunking is included.
|
||||
// structure.
|
||||
type Key struct {
|
||||
MAC MACKey `json:"mac"`
|
||||
Encrypt EncryptionKey `json:"encrypt"`
|
||||
ChunkerPolynomial chunker.Pol `json:"chunker_polynomial,omitempty"`
|
||||
MAC MACKey `json:"mac"`
|
||||
Encrypt EncryptionKey `json:"encrypt"`
|
||||
}
|
||||
|
||||
type EncryptionKey [32]byte
|
||||
@ -340,9 +337,5 @@ func KDF(N, R, P int, salt []byte, password string) (*Key, error) {
|
||||
|
||||
// Valid tests if the key is valid.
|
||||
func (k *Key) Valid() bool {
|
||||
if k.ChunkerPolynomial != 0 && !k.ChunkerPolynomial.Irreducible() {
|
||||
return false
|
||||
}
|
||||
|
||||
return k.Encrypt.Valid() && k.MAC.Valid()
|
||||
}
|
||||
|
119
doc/Design.md
119
doc/Design.md
@ -21,49 +21,64 @@ been backed up at some point in time. The state here means the content and meta
|
||||
data like the name and modification time for the file or the directory and its
|
||||
contents.
|
||||
|
||||
*Storage ID*: A storage ID is the SHA-256 hash of the content stored in the
|
||||
repository. This ID is needed in order to load the file from the repository.
|
||||
|
||||
Repository Format
|
||||
=================
|
||||
|
||||
All data is stored in a restic repository. A repository is able to store data
|
||||
of several different types, which can later be requested based on an ID. The ID
|
||||
is the hash (SHA-256) of the content of a file. All files in a repository are
|
||||
only written once and never modified afterwards. This allows accessing and even
|
||||
writing to the repository with multiple clients in parallel. Only the delete
|
||||
operation changes data in the repository.
|
||||
of several different types, which can later be requested based on an ID. This
|
||||
so-called "storage ID" is the SHA-256 hash of the content of a file. All files
|
||||
in a repository are only written once and never modified afterwards. This
|
||||
allows accessing and even writing to the repository with multiple clients in
|
||||
parallel. Only the delete operation removes data from the repository.
|
||||
|
||||
At the time of writing, the only implemented repository type is based on
|
||||
directories and files. Such repositories can be accessed locally on the same
|
||||
system or via the integrated SFTP client. The directory layout is the same for
|
||||
both access methods. This repository type is described in the following.
|
||||
|
||||
Repositories consists of several directories and a file called `version`. This
|
||||
file contains the version number of the repository. At the moment, this file
|
||||
is expected to hold the string `1`, with an optional newline character.
|
||||
Additionally there is a file named `id` which contains 32 random bytes, encoded
|
||||
in hexadecimal. This uniquely identifies the repository, regardless if it is
|
||||
accessed via SFTP or locally.
|
||||
Repositories consist of several directories and a file called `config`. For
|
||||
all other files stored in the repository, the name for the file is the lower
|
||||
case hexadecimal representation of the storage ID, which is the SHA-256 hash of
|
||||
the file's contents. This allows easily checking all files for accidental
|
||||
modifications like disk read errors by simply running the program `sha256sum`
|
||||
and comparing its output to the file name. If the prefix of a filename is
|
||||
unique amongst all the other files in the same directory, the prefix may be
|
||||
used instead of the complete filename.
|
||||
|
||||
For all other files stored in the repository, the name for the file is the
|
||||
lower case hexadecimal representation of the SHA-256 hash of the file's
|
||||
contents. This allows easily checking all files for accidental modifications
|
||||
like disk read errors by simply running the program `sha256sum` and comparing
|
||||
its output to the file name. If the prefix of a filename is unique amongst all
|
||||
the other files in the same directory, the prefix may be used instead of the
|
||||
complete filename.
|
||||
|
||||
Apart from the files `version`, `id` and the files stored below the `keys`
|
||||
directory, all files are encrypted with AES-256 in counter mode (CTR). The
|
||||
integrity of the encrypted data is secured by a Poly1305-AES message
|
||||
authentication code (sometimes also referred to as a "signature").
|
||||
Apart from the files stored below the `keys` directory, all files are encrypted
|
||||
with AES-256 in counter mode (CTR). The integrity of the encrypted data is
|
||||
secured by a Poly1305-AES message authentication code (sometimes also referred
|
||||
to as a "signature").
|
||||
|
||||
In the first 16 bytes of each encrypted file the initialisation vector (IV) is
|
||||
stored. It is followed by the encrypted data and completed by the 16 byte
|
||||
MAC. The format is: `IV || CIPHERTEXT || MAC`. The complete encryption
|
||||
overhead is 32 byte. For each file, a new random IV is selected.
|
||||
overhead is 32 bytes. For each file, a new random IV is selected.
|
||||
|
||||
The basic layout of a sample restic repository is shown below:
|
||||
The file `config` is encrypted this way and contains a JSON document like the
|
||||
following:
|
||||
|
||||
{
|
||||
"version": 1,
|
||||
"id": "5956a3f67a6230d4a92cefb29529f10196c7d92582ec305fd71ff6d331d6271b",
|
||||
"chunker_polynomial": "25b468838dcb75"
|
||||
}
|
||||
|
||||
After decryption, restic first checks that the version field contains a version
|
||||
number that it understands, otherwise it aborts. At the moment, the version is
|
||||
expected to be 1. The field `id` holds a unique ID which consists of 32
|
||||
random bytes, encoded in hexadecimal. This uniquely identifies the repository,
|
||||
regardless if it is accessed via SFTP or locally. The field
|
||||
`chunker_polynomial` contains a parameter that is used for splitting large
|
||||
files into smaller chunks (see below).
|
||||
|
||||
The basic layout of a sample restic repository is shown here:
|
||||
|
||||
/tmp/restic-repo
|
||||
├── config
|
||||
├── data
|
||||
│ ├── 21
|
||||
│ │ └── 2159dd48f8a24f33c307b750592773f8b71ff8d11452132a7b2e2a6a01611be1
|
||||
@ -74,7 +89,6 @@ The basic layout of a sample restic repository is shown below:
|
||||
│ ├── 73
|
||||
│ │ └── 73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c
|
||||
│ [...]
|
||||
├── id
|
||||
├── index
|
||||
│ ├── c38f5fb68307c6a3e3aa945d556e325dc38f5fb68307c6a3e3aa945d556e325d
|
||||
│ └── ca171b1b7394d90d330b265d90f506f9984043b342525f019788f97e745c71fd
|
||||
@ -83,8 +97,7 @@ The basic layout of a sample restic repository is shown below:
|
||||
├── locks
|
||||
├── snapshots
|
||||
│ └── 22a5af1bdc6e616f8a29579458c49627e01b32210d09adb288d1ecda7c5711ec
|
||||
├── tmp
|
||||
└── version
|
||||
└── tmp
|
||||
|
||||
A repository can be initialized with the `restic init` command, e.g.:
|
||||
|
||||
@ -93,21 +106,21 @@ A repository can be initialized with the `restic init` command, e.g.:
|
||||
Pack Format
|
||||
-----------
|
||||
|
||||
All files in the repository except Key and Data files just contain raw data,
|
||||
stored as `IV || Ciphertext || MAC`. Data files may contain one or more Blobs
|
||||
of data. The format is described in the following.
|
||||
All files in the repository except Key and Pack files just contain raw data,
|
||||
stored as `IV || Ciphertext || MAC`. Pack files may contain one or more Blobs
|
||||
of data.
|
||||
|
||||
The Pack's structure is as follows:
|
||||
A Pack's structure is as follows:
|
||||
|
||||
EncryptedBlob1 || ... || EncryptedBlobN || EncryptedHeader || Header_Length
|
||||
|
||||
At the end of the Pack is a header, which describes the content. The header is
|
||||
encrypted and authenticated. `Header_Length` is the length of the encrypted header
|
||||
encoded as a four byte integer in little-endian encoding. Placing the header at
|
||||
the end of a file allows writing the blobs in a continuous stream as soon as
|
||||
they are read during the backup phase. This reduces code complexity and avoids
|
||||
having to re-write a file once the pack is complete and the content and length
|
||||
of the header is known.
|
||||
At the end of the Pack file is a header, which describes the content. The
|
||||
header is encrypted and authenticated. `Header_Length` is the length of the
|
||||
encrypted header encoded as a four byte integer in little-endian encoding.
|
||||
Placing the header at the end of a file allows writing the blobs in a
|
||||
continuous stream as soon as they are read during the backup phase. This
|
||||
reduces code complexity and avoids having to re-write a file once the pack is
|
||||
complete and the content and length of the header is known.
|
||||
|
||||
All the blobs (`EncryptedBlob1`, `EncryptedBlobN` etc.) are authenticated and
|
||||
encrypted independently. This enables repository reorganisation without having
|
||||
@ -178,7 +191,7 @@ listed afterwards.
|
||||
|
||||
There may be an arbitrary number of index files, containing information on
|
||||
non-disjoint sets of Packs. The number of packs described in a single file is
|
||||
chosen so that the file size is kep below 8 MiB.
|
||||
chosen so that the file size is kept below 8 MiB.
|
||||
|
||||
Keys, Encryption and MAC
|
||||
------------------------
|
||||
@ -230,9 +243,8 @@ tampered with, the computed MAC will not match the last 16 bytes of the data,
|
||||
and restic exits with an error. Otherwise, the data is decrypted with the
|
||||
encryption key derived from `scrypt`. This yields a JSON document which
|
||||
contains the master encryption and message authentication keys for this
|
||||
repository (encoded in Base64) and the polynomial that is used for CDC. The
|
||||
command `restic cat masterkey` can be used as follows to decrypt and
|
||||
pretty-print the master key:
|
||||
repository (encoded in Base64). The command `restic cat masterkey` can be used
|
||||
as follows to decrypt and pretty-print the master key:
|
||||
|
||||
$ restic -r /tmp/restic-repo cat masterkey
|
||||
{
|
||||
@ -241,7 +253,6 @@ pretty-print the master key:
|
||||
"r": "E9eEDnSJZgqwTOkDtOp+Dw=="
|
||||
},
|
||||
"encrypt": "UQCqa0lKZ94PygPxMRqkePTZnHRYh1k1pX2k2lM2v3Q=",
|
||||
"chunker_polynomial": "2f0797d9c2363f"
|
||||
}
|
||||
|
||||
All data in the repository is encrypted and authenticated with these master keys.
|
||||
@ -257,9 +268,8 @@ Snapshots
|
||||
A snapshots represents a directory with all files and sub-directories at a
|
||||
given point in time. For each backup that is made, a new snapshot is created. A
|
||||
snapshot is a JSON document that is stored in an encrypted file below the
|
||||
directory `snapshots` in the repository. The filename is the SHA-256 hash of
|
||||
the (encrypted) contents. This string is unique and used within restic to
|
||||
uniquely identify a snapshot.
|
||||
directory `snapshots` in the repository. The filename is the storage ID. This
|
||||
string is unique and used within restic to uniquely identify a snapshot.
|
||||
|
||||
The command `restic cat snapshot` can be used as follows to decrypt and
|
||||
pretty-print the contents of a snapshot file:
|
||||
@ -284,9 +294,9 @@ hash. Before saving, each file is split into variable sized Blobs of data. The
|
||||
SHA-256 hashes of all Blobs are saved in an ordered list which then represents
|
||||
the content of the file.
|
||||
|
||||
In order to relate these plain text hashes to the actual encrypted storage
|
||||
hashes (which vary due to random IVs), an index is used. If the index is not
|
||||
available, the header of all data Blobs can be read.
|
||||
In order to relate these plaintext hashes to the actual location within a Pack
|
||||
file , an index is used. If the index is not available, the header of all data
|
||||
Blobs can be read.
|
||||
|
||||
Trees and Data
|
||||
--------------
|
||||
@ -355,9 +365,9 @@ This tree contains a file entry. This time, the `subtree` field is not present
|
||||
and the `content` field contains a list with one plain text SHA-256 hash.
|
||||
|
||||
The command `restic cat data` can be used to extract and decrypt data given a
|
||||
storage hash, e.g. for the data mentioned above:
|
||||
plaintext ID, e.g. for the data mentioned above:
|
||||
|
||||
$ restic -r /tmp/restic-repo cat blob 00634c46e5f7c055c341acd1201cf8289cabe769f991d6e350f8cd8ce2a52ac3 | sha256sum
|
||||
$ restic -r /tmp/restic-repo cat blob 50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d | sha256sum
|
||||
enter password for repository:
|
||||
50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d -
|
||||
|
||||
@ -372,8 +382,9 @@ For creating a backup, restic scans the source directory for all files,
|
||||
sub-directories and other entries. The data from each file is split into
|
||||
variable length Blobs cut at offsets defined by a sliding window of 64 byte.
|
||||
The implementation uses Rabin Fingerprints for implementing this Content
|
||||
Defined Chunking (CDC). An irreducible polynomial is selected at random when a
|
||||
repository is initialized.
|
||||
Defined Chunking (CDC). An irreducible polynomial is selected at random and
|
||||
saved in the file `config` when a repository is initialized, so that watermark
|
||||
attacks are much harder.
|
||||
|
||||
Files smaller than 512 KiB are not split, Blobs are of 512 KiB to 8 MiB in
|
||||
size. The implementation aims for 1 MiB Blob size on average.
|
||||
|
@ -6,15 +6,12 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/user"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/backend"
|
||||
"github.com/restic/restic/chunker"
|
||||
"github.com/restic/restic/crypto"
|
||||
"github.com/restic/restic/debug"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -50,9 +47,9 @@ type Key struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// CreateKey initializes a master key in the given backend and encrypts it with
|
||||
// the password.
|
||||
func CreateKey(s *Server, password string) (*Key, error) {
|
||||
// createMasterKey creates a new master key in the given backend and encrypts
|
||||
// it with the password.
|
||||
func createMasterKey(s *Server, password string) (*Key, error) {
|
||||
return AddKey(s, password, nil)
|
||||
}
|
||||
|
||||
@ -92,13 +89,6 @@ func OpenKey(s *Server, name string, password string) (*Key, error) {
|
||||
return nil, errors.New("Invalid key for repository")
|
||||
}
|
||||
|
||||
// test if the chunker polynomial is present in the master key
|
||||
if k.master.ChunkerPolynomial == 0 {
|
||||
return nil, errors.New("Polynomial for content defined chunking is zero")
|
||||
}
|
||||
|
||||
debug.Log("OpenKey", "Master keys loaded, polynomial %v", k.master.ChunkerPolynomial)
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
@ -141,7 +131,7 @@ func LoadKey(s *Server, name string) (*Key, error) {
|
||||
}
|
||||
|
||||
// AddKey adds a new key to an already existing repository.
|
||||
func AddKey(s *Server, password string, template *Key) (*Key, error) {
|
||||
func AddKey(s *Server, password string, template *crypto.Key) (*Key, error) {
|
||||
// fill meta data about key
|
||||
newkey := &Key{
|
||||
Created: time.Now(),
|
||||
@ -177,17 +167,9 @@ func AddKey(s *Server, password string, template *Key) (*Key, error) {
|
||||
if template == nil {
|
||||
// generate new random master keys
|
||||
newkey.master = crypto.NewRandomKey()
|
||||
// generate random polynomial for cdc
|
||||
p, err := chunker.RandomPolynomial()
|
||||
if err != nil {
|
||||
debug.Log("AddKey", "error generating new polynomial for cdc: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
debug.Log("AddKey", "generated new polynomial for cdc: %v", p)
|
||||
newkey.master.ChunkerPolynomial = p
|
||||
} else {
|
||||
// copy master keys from old key
|
||||
newkey.master = template.master
|
||||
newkey.master = template
|
||||
}
|
||||
|
||||
// encrypt master keys (as json) with user key
|
||||
@ -229,46 +211,6 @@ func AddKey(s *Server, password string, template *Key) (*Key, error) {
|
||||
return newkey, nil
|
||||
}
|
||||
|
||||
// Encrypt encrypts and authenticates data with the master key. Stored in
|
||||
// ciphertext is IV || Ciphertext || MAC. Returns the ciphertext, which is
|
||||
// extended if necessary.
|
||||
func (k *Key) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
|
||||
return crypto.Encrypt(k.master, ciphertext, plaintext)
|
||||
}
|
||||
|
||||
// EncryptTo encrypts and authenticates data with the master key. The returned
|
||||
// io.Writer writes IV || Ciphertext || MAC.
|
||||
func (k *Key) EncryptTo(wr io.Writer) io.WriteCloser {
|
||||
return crypto.EncryptTo(k.master, wr)
|
||||
}
|
||||
|
||||
// Decrypt verifes and decrypts the ciphertext with the master key. Ciphertext
|
||||
// must be in the form IV || Ciphertext || MAC.
|
||||
func (k *Key) Decrypt(plaintext, ciphertext []byte) ([]byte, error) {
|
||||
return crypto.Decrypt(k.master, plaintext, ciphertext)
|
||||
}
|
||||
|
||||
// DecryptFrom verifies and decrypts the ciphertext read from rd and makes it
|
||||
// available on the returned Reader. Ciphertext must be in the form IV ||
|
||||
// Ciphertext || MAC. In order to correctly verify the ciphertext, rd is
|
||||
// drained, locally buffered and made available on the returned Reader
|
||||
// afterwards. If a MAC verification failure is observed, it is returned
|
||||
// immediately.
|
||||
func (k *Key) DecryptFrom(rd io.Reader) (io.ReadCloser, error) {
|
||||
return crypto.DecryptFrom(k.master, rd)
|
||||
}
|
||||
|
||||
// Master returns the master keys for this repository. Only included for
|
||||
// debug purposes.
|
||||
func (k *Key) Master() *crypto.Key {
|
||||
return k.master
|
||||
}
|
||||
|
||||
// User returns the user keys for this key. Only included for debug purposes.
|
||||
func (k *Key) User() *crypto.Key {
|
||||
return k.user
|
||||
}
|
||||
|
||||
func (k *Key) String() string {
|
||||
if k == nil {
|
||||
return "<Key nil>"
|
||||
|
@ -1,13 +0,0 @@
|
||||
package server_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/restic/restic/test"
|
||||
)
|
||||
|
||||
func TestRepo(t *testing.T) {
|
||||
s := SetupBackend(t)
|
||||
defer TeardownBackend(t, s)
|
||||
_ = SetupKey(t, s, TestPassword)
|
||||
}
|
130
server/server.go
130
server/server.go
@ -2,7 +2,9 @@ package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -12,14 +14,25 @@ import (
|
||||
|
||||
"github.com/restic/restic/backend"
|
||||
"github.com/restic/restic/chunker"
|
||||
"github.com/restic/restic/crypto"
|
||||
"github.com/restic/restic/debug"
|
||||
"github.com/restic/restic/pack"
|
||||
)
|
||||
|
||||
// Config contains the configuration for a repository.
|
||||
type Config struct {
|
||||
Version uint `json:"version"`
|
||||
ID string `json:"id"`
|
||||
ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"`
|
||||
}
|
||||
|
||||
// Server is used to access a repository in a backend.
|
||||
type Server struct {
|
||||
be backend.Backend
|
||||
key *Key
|
||||
idx *Index
|
||||
be backend.Backend
|
||||
Config Config
|
||||
key *crypto.Key
|
||||
keyName string
|
||||
idx *Index
|
||||
|
||||
pm sync.Mutex
|
||||
packs []*pack.Packer
|
||||
@ -32,15 +45,6 @@ func NewServer(be backend.Backend) *Server {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) SetKey(k *Key) {
|
||||
s.key = k
|
||||
}
|
||||
|
||||
// ChunkerPolynomial returns the secret polynomial used for content defined chunking.
|
||||
func (s *Server) ChunkerPolynomial() chunker.Pol {
|
||||
return chunker.Pol(s.key.Master().ChunkerPolynomial)
|
||||
}
|
||||
|
||||
// Find loads the list of all blobs of type t and searches for names which start
|
||||
// with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
|
||||
// more than one is found, nil and ErrMultipleIDMatches is returned.
|
||||
@ -145,9 +149,9 @@ func (s *Server) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
|
||||
return plain, nil
|
||||
}
|
||||
|
||||
// LoadJSONEncrypted decrypts the data and afterwards calls json.Unmarshal on
|
||||
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
|
||||
// the item.
|
||||
func (s *Server) LoadJSONEncrypted(t backend.Type, id backend.ID, item interface{}) error {
|
||||
func (s *Server) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) error {
|
||||
// load blob from backend
|
||||
rd, err := s.be.Get(t, id.String())
|
||||
if err != nil {
|
||||
@ -156,7 +160,7 @@ func (s *Server) LoadJSONEncrypted(t backend.Type, id backend.ID, item interface
|
||||
defer rd.Close()
|
||||
|
||||
// decrypt
|
||||
decryptRd, err := s.key.DecryptFrom(rd)
|
||||
decryptRd, err := crypto.DecryptFrom(s.key, rd)
|
||||
defer decryptRd.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -189,7 +193,7 @@ func (s *Server) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{})
|
||||
defer rd.Close()
|
||||
|
||||
// decrypt
|
||||
decryptRd, err := s.key.DecryptFrom(rd)
|
||||
decryptRd, err := crypto.DecryptFrom(s.key, rd)
|
||||
defer decryptRd.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -234,7 +238,7 @@ func (s *Server) findPacker(size uint) (*pack.Packer, error) {
|
||||
return nil, err
|
||||
}
|
||||
debug.Log("Server.findPacker", "create new pack %p", blob)
|
||||
return pack.NewPacker(s.key.Master(), blob), nil
|
||||
return pack.NewPacker(s.key, blob), nil
|
||||
}
|
||||
|
||||
// insertPacker appends p to s.packs.
|
||||
@ -369,18 +373,18 @@ func (s *Server) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, error)
|
||||
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
|
||||
// backend as type t, without a pack. It returns the storage hash.
|
||||
func (s *Server) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) {
|
||||
// create blob
|
||||
// create file
|
||||
blob, err := s.be.Create()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
debug.Log("Server.SaveJSONUnpacked", "create new pack %p", blob)
|
||||
debug.Log("Server.SaveJSONUnpacked", "create new file %p", blob)
|
||||
|
||||
// hash
|
||||
hw := backend.NewHashingWriter(blob, sha256.New())
|
||||
|
||||
// encrypt blob
|
||||
ewr := s.key.EncryptTo(hw)
|
||||
ewr := crypto.EncryptTo(s.key, hw)
|
||||
|
||||
enc := json.NewEncoder(ewr)
|
||||
err = enc.Encode(item)
|
||||
@ -452,7 +456,7 @@ func (s *Server) SaveIndex() (backend.ID, error) {
|
||||
hw := backend.NewHashingWriter(blob, sha256.New())
|
||||
|
||||
// encrypt blob
|
||||
ewr := s.key.EncryptTo(hw)
|
||||
ewr := crypto.EncryptTo(s.key, hw)
|
||||
|
||||
err = s.idx.Encode(ewr)
|
||||
if err != nil {
|
||||
@ -505,7 +509,7 @@ func (s *Server) loadIndex(id string) error {
|
||||
}
|
||||
|
||||
// decrypt
|
||||
decryptRd, err := s.key.DecryptFrom(rd)
|
||||
decryptRd, err := crypto.DecryptFrom(s.key, rd)
|
||||
defer decryptRd.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -525,15 +529,79 @@ func (s *Server) loadIndex(id string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
const repositoryIDSize = sha256.Size
|
||||
const RepositoryVersion = 1
|
||||
|
||||
func createConfig(s *Server) (err error) {
|
||||
s.Config.ChunkerPolynomial, err = chunker.RandomPolynomial()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newID := make([]byte, repositoryIDSize)
|
||||
_, err = io.ReadFull(rand.Reader, newID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Config.ID = hex.EncodeToString(newID)
|
||||
s.Config.Version = RepositoryVersion
|
||||
|
||||
debug.Log("Server.createConfig", "New config: %#v", s.Config)
|
||||
|
||||
_, err = s.SaveJSONUnpacked(backend.Config, s.Config)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Server) loadConfig(cfg *Config) error {
|
||||
err := s.LoadJSONUnpacked(backend.Config, nil, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.Version != RepositoryVersion {
|
||||
return errors.New("unsupported repository version")
|
||||
}
|
||||
|
||||
if !cfg.ChunkerPolynomial.Irreducible() {
|
||||
return errors.New("invalid chunker polynomial")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SearchKey finds a key with the supplied password, afterwards the config is
|
||||
// read and parsed.
|
||||
func (s *Server) SearchKey(password string) error {
|
||||
key, err := SearchKey(s, password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.key = key
|
||||
s.key = key.master
|
||||
s.keyName = key.Name()
|
||||
return s.loadConfig(&s.Config)
|
||||
}
|
||||
|
||||
return nil
|
||||
// Init creates a new master key with the supplied password and initializes the
|
||||
// repository config.
|
||||
func (s *Server) Init(password string) error {
|
||||
has, err := s.Test(backend.Config, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if has {
|
||||
return errors.New("repository master key and config already initialized")
|
||||
}
|
||||
|
||||
key, err := createMasterKey(s, password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.key = key.master
|
||||
s.keyName = key.Name()
|
||||
return createConfig(s)
|
||||
}
|
||||
|
||||
func (s *Server) Decrypt(ciphertext []byte) ([]byte, error) {
|
||||
@ -541,7 +609,7 @@ func (s *Server) Decrypt(ciphertext []byte) ([]byte, error) {
|
||||
return nil, errors.New("key for server not set")
|
||||
}
|
||||
|
||||
return s.key.Decrypt(nil, ciphertext)
|
||||
return crypto.Decrypt(s.key, nil, ciphertext)
|
||||
}
|
||||
|
||||
func (s *Server) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
|
||||
@ -549,13 +617,17 @@ func (s *Server) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
|
||||
return nil, errors.New("key for server not set")
|
||||
}
|
||||
|
||||
return s.key.Encrypt(ciphertext, plaintext)
|
||||
return crypto.Encrypt(s.key, ciphertext, plaintext)
|
||||
}
|
||||
|
||||
func (s *Server) Key() *Key {
|
||||
func (s *Server) Key() *crypto.Key {
|
||||
return s.key
|
||||
}
|
||||
|
||||
func (s *Server) KeyName() string {
|
||||
return s.keyName
|
||||
}
|
||||
|
||||
// Count returns the number of blobs of a given type in the backend.
|
||||
func (s *Server) Count(t backend.Type) (n uint) {
|
||||
for _ = range s.be.List(t, nil) {
|
||||
@ -595,10 +667,6 @@ func (s *Server) Delete() error {
|
||||
return errors.New("Delete() called for backend that does not implement this method")
|
||||
}
|
||||
|
||||
func (s *Server) ID() string {
|
||||
return s.be.ID()
|
||||
}
|
||||
|
||||
func (s *Server) Location() string {
|
||||
return s.be.Location()
|
||||
}
|
||||
|
@ -30,8 +30,6 @@ var serverTests = []testJSONStruct{
|
||||
func TestSaveJSON(t *testing.T) {
|
||||
server := SetupBackend(t)
|
||||
defer TeardownBackend(t, server)
|
||||
key := SetupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
for _, obj := range serverTests {
|
||||
data, err := json.Marshal(obj)
|
||||
@ -51,8 +49,6 @@ func TestSaveJSON(t *testing.T) {
|
||||
func BenchmarkSaveJSON(t *testing.B) {
|
||||
server := SetupBackend(t)
|
||||
defer TeardownBackend(t, server)
|
||||
key := SetupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
obj := serverTests[0]
|
||||
|
||||
@ -78,8 +74,6 @@ var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20}
|
||||
func TestSave(t *testing.T) {
|
||||
server := SetupBackend(t)
|
||||
defer TeardownBackend(t, server)
|
||||
key := SetupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
for _, size := range testSizes {
|
||||
data := make([]byte, size)
|
||||
@ -112,8 +106,6 @@ func TestSave(t *testing.T) {
|
||||
func TestSaveFrom(t *testing.T) {
|
||||
server := SetupBackend(t)
|
||||
defer TeardownBackend(t, server)
|
||||
key := SetupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
for _, size := range testSizes {
|
||||
data := make([]byte, size)
|
||||
@ -144,8 +136,6 @@ func TestSaveFrom(t *testing.T) {
|
||||
func BenchmarkSaveFrom(t *testing.B) {
|
||||
server := SetupBackend(t)
|
||||
defer TeardownBackend(t, server)
|
||||
key := SetupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
size := 4 << 20 // 4MiB
|
||||
|
||||
@ -172,8 +162,6 @@ func TestLoadJSONPack(t *testing.T) {
|
||||
|
||||
server := SetupBackend(t)
|
||||
defer TeardownBackend(t, server)
|
||||
key := SetupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
// archive a few files
|
||||
sn := SnapshotDir(t, server, *benchTestDir, nil)
|
||||
@ -184,15 +172,13 @@ func TestLoadJSONPack(t *testing.T) {
|
||||
OK(t, err)
|
||||
}
|
||||
|
||||
func TestLoadJSONEncrypted(t *testing.T) {
|
||||
func TestLoadJSONUnpacked(t *testing.T) {
|
||||
if *benchTestDir == "" {
|
||||
t.Skip("benchdir not set, skipping TestServerStats")
|
||||
}
|
||||
|
||||
server := SetupBackend(t)
|
||||
defer TeardownBackend(t, server)
|
||||
key := SetupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
// archive a snapshot
|
||||
sn := restic.Snapshot{}
|
||||
@ -205,7 +191,7 @@ func TestLoadJSONEncrypted(t *testing.T) {
|
||||
var sn2 restic.Snapshot
|
||||
|
||||
// restore
|
||||
err = server.LoadJSONEncrypted(backend.Snapshot, id, &sn2)
|
||||
err = server.LoadJSONUnpacked(backend.Snapshot, id, &sn2)
|
||||
OK(t, err)
|
||||
|
||||
Equals(t, sn.Hostname, sn2.Hostname)
|
||||
|
@ -52,7 +52,7 @@ func NewSnapshot(paths []string) (*Snapshot, error) {
|
||||
|
||||
func LoadSnapshot(s *server.Server, id backend.ID) (*Snapshot, error) {
|
||||
sn := &Snapshot{id: id}
|
||||
err := s.LoadJSONEncrypted(backend.Snapshot, id, sn)
|
||||
err := s.LoadJSONUnpacked(backend.Snapshot, id, sn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
"github.com/restic/restic/server"
|
||||
)
|
||||
|
||||
var TestPassword = "foobar"
|
||||
var TestPassword = flag.String("test.password", "", `use this password for repositories created during tests (default: "geheim")`)
|
||||
var TestCleanup = flag.Bool("test.cleanup", true, "clean up after running tests (remove local backend directory with all content)")
|
||||
var TestTempDir = flag.String("test.tempdir", "", "use this directory for temporary storage (default: system temp dir)")
|
||||
|
||||
@ -25,11 +25,13 @@ func SetupBackend(t testing.TB) *server.Server {
|
||||
b, err := local.Create(filepath.Join(tempdir, "repo"))
|
||||
OK(t, err)
|
||||
|
||||
// set cache dir
|
||||
// set cache dir below temp dir
|
||||
err = os.Setenv("RESTIC_CACHE", filepath.Join(tempdir, "cache"))
|
||||
OK(t, err)
|
||||
|
||||
return server.NewServer(b)
|
||||
s := server.NewServer(b)
|
||||
OK(t, s.Init(*TestPassword))
|
||||
return s
|
||||
}
|
||||
|
||||
func TeardownBackend(t testing.TB, s *server.Server) {
|
||||
@ -42,13 +44,6 @@ func TeardownBackend(t testing.TB, s *server.Server) {
|
||||
OK(t, s.Delete())
|
||||
}
|
||||
|
||||
func SetupKey(t testing.TB, s *server.Server, password string) *server.Key {
|
||||
k, err := server.CreateKey(s, password)
|
||||
OK(t, err)
|
||||
|
||||
return k
|
||||
}
|
||||
|
||||
func SnapshotDir(t testing.TB, server *server.Server, path string, parent backend.ID) *restic.Snapshot {
|
||||
arch := restic.NewArchiver(server)
|
||||
sn, _, err := arch.Snapshot(nil, []string{path}, parent)
|
||||
|
@ -95,8 +95,6 @@ func TestNodeComparison(t *testing.T) {
|
||||
func TestLoadTree(t *testing.T) {
|
||||
server := SetupBackend(t)
|
||||
defer TeardownBackend(t, server)
|
||||
key := SetupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
// save tree
|
||||
tree := restic.NewTree()
|
||||
|
@ -18,8 +18,6 @@ func TestWalkTree(t *testing.T) {
|
||||
|
||||
server := SetupBackend(t)
|
||||
defer TeardownBackend(t, server)
|
||||
key := SetupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
// archive a few files
|
||||
arch := restic.NewArchiver(server)
|
||||
|
Loading…
Reference in New Issue
Block a user