2018-03-30 20:43:18 +00:00
|
|
|
package archiver
|
|
|
|
|
|
|
|
import (
|
2019-03-20 01:27:37 +00:00
|
|
|
"bytes"
|
2018-03-30 20:43:18 +00:00
|
|
|
"context"
|
2020-12-28 19:45:53 +00:00
|
|
|
"io"
|
2018-03-30 20:43:18 +00:00
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"runtime"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
2018-05-12 21:08:00 +00:00
|
|
|
"sync/atomic"
|
2018-03-30 20:43:18 +00:00
|
|
|
"syscall"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2019-05-04 08:34:28 +00:00
|
|
|
"github.com/google/go-cmp/cmp"
|
2020-12-28 19:45:53 +00:00
|
|
|
"github.com/restic/restic/internal/backend/mem"
|
2018-03-30 20:43:18 +00:00
|
|
|
"github.com/restic/restic/internal/checker"
|
2018-05-12 21:08:00 +00:00
|
|
|
"github.com/restic/restic/internal/errors"
|
2018-03-30 20:43:18 +00:00
|
|
|
"github.com/restic/restic/internal/fs"
|
|
|
|
"github.com/restic/restic/internal/repository"
|
|
|
|
"github.com/restic/restic/internal/restic"
|
|
|
|
restictest "github.com/restic/restic/internal/test"
|
2018-05-08 20:28:37 +00:00
|
|
|
tomb "gopkg.in/tomb.v2"
|
2018-03-30 20:43:18 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func prepareTempdirRepoSrc(t testing.TB, src TestDir) (tempdir string, repo restic.Repository, cleanup func()) {
|
|
|
|
tempdir, removeTempdir := restictest.TempDir(t)
|
|
|
|
repo, removeRepository := repository.TestRepository(t)
|
|
|
|
|
|
|
|
TestCreateFiles(t, tempdir, src)
|
|
|
|
|
|
|
|
cleanup = func() {
|
|
|
|
removeRepository()
|
|
|
|
removeTempdir()
|
|
|
|
}
|
|
|
|
|
|
|
|
return tempdir, repo, cleanup
|
|
|
|
}
|
|
|
|
|
|
|
|
func saveFile(t testing.TB, repo restic.Repository, filename string, filesystem fs.FS) (*restic.Node, ItemStats) {
|
2018-05-08 20:28:37 +00:00
|
|
|
var tmb tomb.Tomb
|
|
|
|
ctx := tmb.Context(context.Background())
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
arch := New(repo, filesystem, Options{})
|
2018-05-08 20:28:37 +00:00
|
|
|
arch.runWorkers(ctx, &tmb)
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2018-05-20 14:11:36 +00:00
|
|
|
arch.Error = func(item string, fi os.FileInfo, err error) error {
|
|
|
|
t.Errorf("archiver error for %v: %v", item, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
var (
|
|
|
|
completeCallbackNode *restic.Node
|
|
|
|
completeCallbackStats ItemStats
|
|
|
|
completeCallback bool
|
|
|
|
|
|
|
|
startCallback bool
|
|
|
|
)
|
|
|
|
|
|
|
|
complete := func(node *restic.Node, stats ItemStats) {
|
|
|
|
completeCallback = true
|
|
|
|
completeCallbackNode = node
|
|
|
|
completeCallbackStats = stats
|
|
|
|
}
|
|
|
|
|
|
|
|
start := func() {
|
|
|
|
startCallback = true
|
|
|
|
}
|
|
|
|
|
|
|
|
file, err := arch.FS.OpenFile(filename, fs.O_RDONLY|fs.O_NOFOLLOW, 0)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fi, err := file.Stat()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
res := arch.fileSaver.Save(ctx, "/", file, fi, start, complete)
|
2018-05-12 19:40:31 +00:00
|
|
|
|
|
|
|
res.Wait(ctx)
|
2018-03-30 20:43:18 +00:00
|
|
|
if res.Err() != nil {
|
|
|
|
t.Fatal(res.Err())
|
|
|
|
}
|
|
|
|
|
2018-05-08 20:28:37 +00:00
|
|
|
tmb.Kill(nil)
|
|
|
|
err = tmb.Wait()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-11-08 07:24:24 +00:00
|
|
|
err = repo.Flush(context.Background())
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !startCallback {
|
|
|
|
t.Errorf("start callback did not happen")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !completeCallback {
|
|
|
|
t.Errorf("complete callback did not happen")
|
|
|
|
}
|
|
|
|
|
|
|
|
if completeCallbackNode == nil {
|
|
|
|
t.Errorf("no node returned for complete callback")
|
|
|
|
}
|
|
|
|
|
|
|
|
if completeCallbackNode != nil && !res.Node().Equals(*completeCallbackNode) {
|
|
|
|
t.Errorf("different node returned for complete callback")
|
|
|
|
}
|
|
|
|
|
|
|
|
if completeCallbackStats != res.Stats() {
|
|
|
|
t.Errorf("different stats return for complete callback, want:\n %v\ngot:\n %v", res.Stats(), completeCallbackStats)
|
|
|
|
}
|
|
|
|
|
|
|
|
return res.Node(), res.Stats()
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchiverSaveFile(t *testing.T) {
|
|
|
|
var tests = []TestFile{
|
2019-04-28 01:19:02 +00:00
|
|
|
{Content: ""},
|
|
|
|
{Content: "foo"},
|
|
|
|
{Content: string(restictest.Random(23, 12*1024*1024+1287898))},
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, testfile := range tests {
|
|
|
|
t.Run("", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, TestDir{"file": testfile})
|
|
|
|
defer cleanup()
|
|
|
|
|
2018-09-05 12:04:55 +00:00
|
|
|
node, stats := saveFile(t, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}})
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
TestEnsureFileContent(ctx, t, repo, "file", node, testfile)
|
|
|
|
if stats.DataSize != uint64(len(testfile.Content)) {
|
|
|
|
t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(testfile.Content), stats.DataSize)
|
|
|
|
}
|
|
|
|
if stats.DataBlobs <= 0 && len(testfile.Content) > 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
|
|
|
if stats.TreeSize != 0 {
|
|
|
|
t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
|
|
|
|
}
|
|
|
|
if stats.TreeBlobs != 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchiverSaveFileReaderFS(t *testing.T) {
|
|
|
|
var tests = []struct {
|
|
|
|
Data string
|
|
|
|
}{
|
|
|
|
{Data: "foo"},
|
|
|
|
{Data: string(restictest.Random(23, 12*1024*1024+1287898))},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run("", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
repo, cleanup := repository.TestRepository(t)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
ts := time.Now()
|
|
|
|
filename := "xx"
|
|
|
|
readerFs := &fs.Reader{
|
|
|
|
ModTime: ts,
|
|
|
|
Mode: 0123,
|
|
|
|
Name: filename,
|
|
|
|
ReadCloser: ioutil.NopCloser(strings.NewReader(test.Data)),
|
|
|
|
}
|
|
|
|
|
|
|
|
node, stats := saveFile(t, repo, filename, readerFs)
|
|
|
|
|
|
|
|
TestEnsureFileContent(ctx, t, repo, "file", node, TestFile{Content: test.Data})
|
|
|
|
if stats.DataSize != uint64(len(test.Data)) {
|
2018-05-20 14:11:36 +00:00
|
|
|
t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(test.Data), stats.DataSize)
|
|
|
|
}
|
|
|
|
if stats.DataBlobs <= 0 && len(test.Data) > 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
|
|
|
if stats.TreeSize != 0 {
|
|
|
|
t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
|
|
|
|
}
|
|
|
|
if stats.TreeBlobs != 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchiverSave(t *testing.T) {
|
|
|
|
var tests = []TestFile{
|
2019-04-28 01:19:02 +00:00
|
|
|
{Content: ""},
|
|
|
|
{Content: "foo"},
|
|
|
|
{Content: string(restictest.Random(23, 12*1024*1024+1287898))},
|
2018-05-20 14:11:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, testfile := range tests {
|
|
|
|
t.Run("", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, TestDir{"file": testfile})
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
var tmb tomb.Tomb
|
|
|
|
|
2018-09-05 12:04:55 +00:00
|
|
|
arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
|
2018-05-20 14:11:36 +00:00
|
|
|
arch.Error = func(item string, fi os.FileInfo, err error) error {
|
|
|
|
t.Errorf("archiver error for %v: %v", item, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
arch.runWorkers(tmb.Context(ctx), &tmb)
|
|
|
|
|
|
|
|
node, excluded, err := arch.Save(ctx, "/", filepath.Join(tempdir, "file"), nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if excluded {
|
|
|
|
t.Errorf("Save() excluded the node, that's unexpected")
|
|
|
|
}
|
|
|
|
|
|
|
|
node.wait(ctx)
|
|
|
|
if node.err != nil {
|
|
|
|
t.Fatal(node.err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if node.node == nil {
|
|
|
|
t.Fatalf("returned node is nil")
|
|
|
|
}
|
|
|
|
|
|
|
|
stats := node.stats
|
|
|
|
|
|
|
|
err = repo.Flush(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
TestEnsureFileContent(ctx, t, repo, "file", node.node, testfile)
|
|
|
|
if stats.DataSize != uint64(len(testfile.Content)) {
|
|
|
|
t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(testfile.Content), stats.DataSize)
|
|
|
|
}
|
|
|
|
if stats.DataBlobs <= 0 && len(testfile.Content) > 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
|
|
|
if stats.TreeSize != 0 {
|
|
|
|
t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
|
|
|
|
}
|
|
|
|
if stats.TreeBlobs != 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchiverSaveReaderFS(t *testing.T) {
|
|
|
|
var tests = []struct {
|
|
|
|
Data string
|
|
|
|
}{
|
|
|
|
{Data: "foo"},
|
|
|
|
{Data: string(restictest.Random(23, 12*1024*1024+1287898))},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run("", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
repo, cleanup := repository.TestRepository(t)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
ts := time.Now()
|
|
|
|
filename := "xx"
|
|
|
|
readerFs := &fs.Reader{
|
|
|
|
ModTime: ts,
|
|
|
|
Mode: 0123,
|
|
|
|
Name: filename,
|
|
|
|
ReadCloser: ioutil.NopCloser(strings.NewReader(test.Data)),
|
|
|
|
}
|
|
|
|
|
|
|
|
var tmb tomb.Tomb
|
|
|
|
|
|
|
|
arch := New(repo, readerFs, Options{})
|
|
|
|
arch.Error = func(item string, fi os.FileInfo, err error) error {
|
|
|
|
t.Errorf("archiver error for %v: %v", item, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
arch.runWorkers(tmb.Context(ctx), &tmb)
|
|
|
|
|
|
|
|
node, excluded, err := arch.Save(ctx, "/", filename, nil)
|
|
|
|
t.Logf("Save returned %v %v", node, err)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if excluded {
|
|
|
|
t.Errorf("Save() excluded the node, that's unexpected")
|
|
|
|
}
|
|
|
|
|
|
|
|
node.wait(ctx)
|
|
|
|
if node.err != nil {
|
|
|
|
t.Fatal(node.err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if node.node == nil {
|
|
|
|
t.Fatalf("returned node is nil")
|
|
|
|
}
|
|
|
|
|
|
|
|
stats := node.stats
|
|
|
|
|
|
|
|
err = repo.Flush(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
TestEnsureFileContent(ctx, t, repo, "file", node.node, TestFile{Content: test.Data})
|
|
|
|
if stats.DataSize != uint64(len(test.Data)) {
|
2018-03-30 20:43:18 +00:00
|
|
|
t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(test.Data), stats.DataSize)
|
|
|
|
}
|
|
|
|
if stats.DataBlobs <= 0 && len(test.Data) > 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
|
|
|
if stats.TreeSize != 0 {
|
|
|
|
t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
|
|
|
|
}
|
|
|
|
if stats.TreeBlobs != 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkArchiverSaveFileSmall(b *testing.B) {
|
|
|
|
const fileSize = 4 * 1024
|
|
|
|
d := TestDir{"file": TestFile{
|
|
|
|
Content: string(restictest.Random(23, fileSize)),
|
|
|
|
}}
|
|
|
|
|
|
|
|
b.SetBytes(fileSize)
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
b.StopTimer()
|
|
|
|
tempdir, repo, cleanup := prepareTempdirRepoSrc(b, d)
|
|
|
|
b.StartTimer()
|
|
|
|
|
2018-09-05 12:04:55 +00:00
|
|
|
_, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}})
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
b.StopTimer()
|
|
|
|
if stats.DataSize != fileSize {
|
|
|
|
b.Errorf("wrong stats returned in DataSize, want %d, got %d", fileSize, stats.DataSize)
|
|
|
|
}
|
|
|
|
if stats.DataBlobs <= 0 {
|
|
|
|
b.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
|
|
|
if stats.TreeSize != 0 {
|
|
|
|
b.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
|
|
|
|
}
|
|
|
|
if stats.TreeBlobs != 0 {
|
|
|
|
b.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
|
|
|
cleanup()
|
|
|
|
b.StartTimer()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkArchiverSaveFileLarge(b *testing.B) {
|
|
|
|
const fileSize = 40*1024*1024 + 1287898
|
|
|
|
d := TestDir{"file": TestFile{
|
|
|
|
Content: string(restictest.Random(23, fileSize)),
|
|
|
|
}}
|
|
|
|
|
|
|
|
b.SetBytes(fileSize)
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
b.StopTimer()
|
|
|
|
tempdir, repo, cleanup := prepareTempdirRepoSrc(b, d)
|
|
|
|
b.StartTimer()
|
|
|
|
|
2018-09-05 12:04:55 +00:00
|
|
|
_, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}})
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
b.StopTimer()
|
|
|
|
if stats.DataSize != fileSize {
|
|
|
|
b.Errorf("wrong stats returned in DataSize, want %d, got %d", fileSize, stats.DataSize)
|
|
|
|
}
|
|
|
|
if stats.DataBlobs <= 0 {
|
|
|
|
b.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
|
|
|
if stats.TreeSize != 0 {
|
|
|
|
b.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
|
|
|
|
}
|
|
|
|
if stats.TreeBlobs != 0 {
|
|
|
|
b.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
|
|
|
cleanup()
|
|
|
|
b.StartTimer()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type blobCountingRepo struct {
|
|
|
|
restic.Repository
|
|
|
|
|
|
|
|
m sync.Mutex
|
|
|
|
saved map[restic.BlobHandle]uint
|
|
|
|
}
|
|
|
|
|
2020-06-06 20:20:44 +00:00
|
|
|
func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, error) {
|
|
|
|
id, exists, err := repo.Repository.SaveBlob(ctx, t, buf, id, false)
|
|
|
|
if exists {
|
|
|
|
return id, exists, err
|
|
|
|
}
|
2018-03-30 20:43:18 +00:00
|
|
|
h := restic.BlobHandle{ID: id, Type: t}
|
|
|
|
repo.m.Lock()
|
|
|
|
repo.saved[h]++
|
|
|
|
repo.m.Unlock()
|
2020-06-06 20:20:44 +00:00
|
|
|
return id, exists, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (repo *blobCountingRepo) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) {
|
|
|
|
id, err := repo.Repository.SaveTree(ctx, t)
|
|
|
|
h := restic.BlobHandle{ID: id, Type: restic.TreeBlob}
|
|
|
|
repo.m.Lock()
|
|
|
|
repo.saved[h]++
|
|
|
|
repo.m.Unlock()
|
|
|
|
return id, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func appendToFile(t testing.TB, filename string, data []byte) {
|
|
|
|
f, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = f.Write(data)
|
|
|
|
if err != nil {
|
|
|
|
_ = f.Close()
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = f.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchiverSaveFileIncremental(t *testing.T) {
|
|
|
|
tempdir, removeTempdir := restictest.TempDir(t)
|
|
|
|
defer removeTempdir()
|
|
|
|
|
|
|
|
testRepo, removeRepository := repository.TestRepository(t)
|
|
|
|
defer removeRepository()
|
|
|
|
|
|
|
|
repo := &blobCountingRepo{
|
|
|
|
Repository: testRepo,
|
|
|
|
saved: make(map[restic.BlobHandle]uint),
|
|
|
|
}
|
|
|
|
|
|
|
|
data := restictest.Random(23, 512*1024+887898)
|
|
|
|
testfile := filepath.Join(tempdir, "testfile")
|
|
|
|
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
appendToFile(t, testfile, data)
|
2018-09-05 12:04:55 +00:00
|
|
|
node, _ := saveFile(t, repo, testfile, fs.Track{FS: fs.Local{}})
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
t.Logf("node blobs: %v", node.Content)
|
|
|
|
|
|
|
|
for h, n := range repo.saved {
|
|
|
|
if n > 1 {
|
|
|
|
t.Errorf("iteration %v: blob %v saved more than once (%d times)", i, h, n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func save(t testing.TB, filename string, data []byte) {
|
|
|
|
f, err := os.Create(filename)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = f.Write(data)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = f.Sync()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = f.Close()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-08 07:59:00 +00:00
|
|
|
func chmodTwice(t testing.TB, name string) {
|
|
|
|
// POSIX says that ctime is updated "even if the file status does not
|
|
|
|
// change", but let's make sure it does change, just in case.
|
|
|
|
err := os.Chmod(name, 0700)
|
|
|
|
restictest.OK(t, err)
|
|
|
|
|
|
|
|
sleep()
|
|
|
|
|
|
|
|
err = os.Chmod(name, 0600)
|
|
|
|
restictest.OK(t, err)
|
|
|
|
}
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
func lstat(t testing.TB, name string) os.FileInfo {
|
|
|
|
fi, err := os.Lstat(name)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return fi
|
|
|
|
}
|
|
|
|
|
|
|
|
func setTimestamp(t testing.TB, filename string, atime, mtime time.Time) {
|
|
|
|
var utimes = [...]syscall.Timespec{
|
|
|
|
syscall.NsecToTimespec(atime.UnixNano()),
|
|
|
|
syscall.NsecToTimespec(mtime.UnixNano()),
|
|
|
|
}
|
|
|
|
|
|
|
|
err := syscall.UtimesNano(filename, utimes[:])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func remove(t testing.TB, filename string) {
|
|
|
|
err := os.Remove(filename)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-08 07:59:00 +00:00
|
|
|
func rename(t testing.TB, oldname, newname string) {
|
|
|
|
err := os.Rename(oldname, newname)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
func nodeFromFI(t testing.TB, filename string, fi os.FileInfo) *restic.Node {
|
|
|
|
node, err := restic.NodeFromFileInfo(filename, fi)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return node
|
|
|
|
}
|
|
|
|
|
2020-07-08 07:59:00 +00:00
|
|
|
// sleep sleeps long enough to ensure a timestamp change.
|
|
|
|
func sleep() {
|
|
|
|
d := 50 * time.Millisecond
|
2018-03-30 20:43:18 +00:00
|
|
|
if runtime.GOOS == "darwin" {
|
2020-07-08 07:59:00 +00:00
|
|
|
// On older Darwin instances, the file system only supports one second
|
|
|
|
// granularity.
|
|
|
|
d = 1500 * time.Millisecond
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
2020-07-08 07:59:00 +00:00
|
|
|
time.Sleep(d)
|
|
|
|
}
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2020-07-08 07:59:00 +00:00
|
|
|
func TestFileChanged(t *testing.T) {
|
|
|
|
var defaultContent = []byte("foobar")
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
var tests = []struct {
|
2019-05-05 10:51:26 +00:00
|
|
|
Name string
|
|
|
|
SkipForWindows bool
|
|
|
|
Content []byte
|
|
|
|
Modify func(t testing.TB, filename string)
|
2020-07-08 07:59:00 +00:00
|
|
|
ChangeIgnore uint
|
2019-05-05 10:51:26 +00:00
|
|
|
SameFile bool
|
2018-03-30 20:43:18 +00:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
Name: "same-content-new-file",
|
|
|
|
Modify: func(t testing.TB, filename string) {
|
|
|
|
remove(t, filename)
|
|
|
|
sleep()
|
|
|
|
save(t, filename, defaultContent)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "same-content-new-timestamp",
|
|
|
|
Modify: func(t testing.TB, filename string) {
|
|
|
|
sleep()
|
|
|
|
save(t, filename, defaultContent)
|
|
|
|
},
|
|
|
|
},
|
2019-03-20 01:27:37 +00:00
|
|
|
{
|
|
|
|
Name: "new-content-same-timestamp",
|
2019-05-05 10:51:26 +00:00
|
|
|
// on Windows, there's no "create time" field users cannot modify,
|
|
|
|
// so we're unable to detect if a file has been modified when the
|
|
|
|
// timestamps are reset, so we skip this test for Windows
|
|
|
|
SkipForWindows: true,
|
2019-03-20 01:27:37 +00:00
|
|
|
Modify: func(t testing.TB, filename string) {
|
2019-04-24 03:39:13 +00:00
|
|
|
fi, err := os.Stat(filename)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2019-03-20 01:27:37 +00:00
|
|
|
extFI := fs.ExtendedStat(fi)
|
|
|
|
save(t, filename, bytes.ToUpper(defaultContent))
|
|
|
|
sleep()
|
2019-04-24 03:39:13 +00:00
|
|
|
setTimestamp(t, filename, extFI.AccessTime, extFI.ModTime)
|
2019-03-20 01:27:37 +00:00
|
|
|
},
|
|
|
|
},
|
2018-03-30 20:43:18 +00:00
|
|
|
{
|
|
|
|
Name: "other-content",
|
|
|
|
Modify: func(t testing.TB, filename string) {
|
|
|
|
remove(t, filename)
|
|
|
|
sleep()
|
|
|
|
save(t, filename, []byte("xxxxxx"))
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "longer-content",
|
|
|
|
Modify: func(t testing.TB, filename string) {
|
|
|
|
save(t, filename, []byte("xxxxxxxxxxxxxxxxxxxxxx"))
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "new-file",
|
|
|
|
Modify: func(t testing.TB, filename string) {
|
|
|
|
remove(t, filename)
|
|
|
|
sleep()
|
|
|
|
save(t, filename, defaultContent)
|
|
|
|
},
|
|
|
|
},
|
2020-07-08 07:59:00 +00:00
|
|
|
{
|
|
|
|
Name: "ctime-change",
|
|
|
|
Modify: chmodTwice,
|
|
|
|
SameFile: false,
|
|
|
|
SkipForWindows: true, // No ctime on Windows, so this test would fail.
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "ignore-ctime-change",
|
|
|
|
Modify: chmodTwice,
|
|
|
|
ChangeIgnore: ChangeIgnoreCtime,
|
|
|
|
SameFile: true,
|
|
|
|
SkipForWindows: true, // No ctime on Windows, so this test is meaningless.
|
|
|
|
},
|
2019-03-10 20:22:54 +00:00
|
|
|
{
|
|
|
|
Name: "ignore-inode",
|
|
|
|
Modify: func(t testing.TB, filename string) {
|
|
|
|
fi := lstat(t, filename)
|
2020-07-08 07:59:00 +00:00
|
|
|
// First create the new file, then remove the old one,
|
|
|
|
// so that the old file retains its inode number.
|
|
|
|
tempname := filename + ".old"
|
|
|
|
rename(t, filename, tempname)
|
2019-03-10 20:22:54 +00:00
|
|
|
save(t, filename, defaultContent)
|
2020-07-08 07:59:00 +00:00
|
|
|
remove(t, tempname)
|
2019-03-10 20:22:54 +00:00
|
|
|
setTimestamp(t, filename, fi.ModTime(), fi.ModTime())
|
|
|
|
},
|
2020-07-08 07:59:00 +00:00
|
|
|
ChangeIgnore: ChangeIgnoreCtime | ChangeIgnoreInode,
|
|
|
|
SameFile: true,
|
2019-03-10 20:22:54 +00:00
|
|
|
},
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(test.Name, func(t *testing.T) {
|
2019-05-05 10:51:26 +00:00
|
|
|
if runtime.GOOS == "windows" && test.SkipForWindows {
|
|
|
|
t.Skip("don't run test on Windows")
|
|
|
|
}
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
tempdir, cleanup := restictest.TempDir(t)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
filename := filepath.Join(tempdir, "file")
|
|
|
|
content := defaultContent
|
|
|
|
if test.Content != nil {
|
|
|
|
content = test.Content
|
|
|
|
}
|
|
|
|
save(t, filename, content)
|
|
|
|
|
|
|
|
fiBefore := lstat(t, filename)
|
|
|
|
node := nodeFromFI(t, filename, fiBefore)
|
|
|
|
|
2020-07-08 07:59:00 +00:00
|
|
|
if fileChanged(fiBefore, node, 0) {
|
2018-03-30 20:43:18 +00:00
|
|
|
t.Fatalf("unchanged file detected as changed")
|
|
|
|
}
|
|
|
|
|
|
|
|
test.Modify(t, filename)
|
|
|
|
|
|
|
|
fiAfter := lstat(t, filename)
|
2019-05-05 10:50:47 +00:00
|
|
|
|
|
|
|
if test.SameFile {
|
|
|
|
// file should be detected as unchanged
|
2020-07-08 07:59:00 +00:00
|
|
|
if fileChanged(fiAfter, node, test.ChangeIgnore) {
|
2019-03-10 20:22:54 +00:00
|
|
|
t.Fatalf("unmodified file detected as changed")
|
2019-05-05 10:50:47 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// file should be detected as changed
|
2020-07-08 07:59:00 +00:00
|
|
|
if !fileChanged(fiAfter, node, test.ChangeIgnore) && !test.SameFile {
|
2019-03-10 20:22:54 +00:00
|
|
|
t.Fatalf("modified file detected as unchanged")
|
|
|
|
}
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestFilChangedSpecialCases(t *testing.T) {
|
|
|
|
tempdir, cleanup := restictest.TempDir(t)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
filename := filepath.Join(tempdir, "file")
|
|
|
|
content := []byte("foobar")
|
|
|
|
save(t, filename, content)
|
|
|
|
|
|
|
|
t.Run("nil-node", func(t *testing.T) {
|
|
|
|
fi := lstat(t, filename)
|
2020-07-08 07:59:00 +00:00
|
|
|
if !fileChanged(fi, nil, 0) {
|
2018-03-30 20:43:18 +00:00
|
|
|
t.Fatal("nil node detected as unchanged")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("type-change", func(t *testing.T) {
|
|
|
|
fi := lstat(t, filename)
|
|
|
|
node := nodeFromFI(t, filename, fi)
|
|
|
|
node.Type = "symlink"
|
2020-07-08 07:59:00 +00:00
|
|
|
if !fileChanged(fi, node, 0) {
|
2018-03-30 20:43:18 +00:00
|
|
|
t.Fatal("node with changed type detected as unchanged")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchiverSaveDir(t *testing.T) {
|
|
|
|
const targetNodeName = "targetdir"
|
|
|
|
|
|
|
|
var tests = []struct {
|
|
|
|
src TestDir
|
|
|
|
chdir string
|
|
|
|
target string
|
|
|
|
want TestDir
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
src: TestDir{
|
|
|
|
"targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))},
|
|
|
|
},
|
|
|
|
target: ".",
|
|
|
|
want: TestDir{
|
|
|
|
"targetdir": TestDir{
|
|
|
|
"targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
src: TestDir{
|
|
|
|
"targetdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
"emptyfile": TestFile{Content: ""},
|
|
|
|
"bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"},
|
|
|
|
"largefile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))},
|
|
|
|
"largerfile": TestFile{Content: string(restictest.Random(234, 5*1024*1024+5000))},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
target: "targetdir",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
src: TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
"emptyfile": TestFile{Content: ""},
|
|
|
|
"bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"},
|
|
|
|
},
|
|
|
|
target: ".",
|
|
|
|
want: TestDir{
|
|
|
|
"targetdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
"emptyfile": TestFile{Content: ""},
|
|
|
|
"bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
src: TestDir{
|
|
|
|
"foo": TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"x": TestFile{Content: "xxx"},
|
|
|
|
"y": TestFile{Content: "yyyyyyyyyyyyyyyy"},
|
|
|
|
"z": TestFile{Content: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"},
|
|
|
|
},
|
|
|
|
"file": TestFile{Content: "just a test"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
chdir: "foo/subdir",
|
|
|
|
target: "../../",
|
|
|
|
want: TestDir{
|
|
|
|
"targetdir": TestDir{
|
|
|
|
"foo": TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"x": TestFile{Content: "xxx"},
|
|
|
|
"y": TestFile{Content: "yyyyyyyyyyyyyyyy"},
|
|
|
|
"z": TestFile{Content: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"},
|
|
|
|
},
|
|
|
|
"file": TestFile{Content: "just a test"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
src: TestDir{
|
|
|
|
"foo": TestDir{
|
|
|
|
"file": TestFile{Content: "just a test"},
|
|
|
|
"file2": TestFile{Content: "again"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
target: "./foo",
|
|
|
|
want: TestDir{
|
|
|
|
"targetdir": TestDir{
|
|
|
|
"file": TestFile{Content: "just a test"},
|
|
|
|
"file2": TestFile{Content: "again"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run("", func(t *testing.T) {
|
2018-05-08 20:28:37 +00:00
|
|
|
var tmb tomb.Tomb
|
|
|
|
ctx := tmb.Context(context.Background())
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src)
|
|
|
|
defer cleanup()
|
|
|
|
|
2018-09-05 12:04:55 +00:00
|
|
|
arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
|
2018-05-08 20:28:37 +00:00
|
|
|
arch.runWorkers(ctx, &tmb)
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
chdir := tempdir
|
|
|
|
if test.chdir != "" {
|
|
|
|
chdir = filepath.Join(chdir, test.chdir)
|
|
|
|
}
|
|
|
|
|
2020-02-17 12:24:09 +00:00
|
|
|
back := restictest.Chdir(t, chdir)
|
2018-03-30 20:43:18 +00:00
|
|
|
defer back()
|
|
|
|
|
|
|
|
fi, err := fs.Lstat(test.target)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-04-22 20:23:02 +00:00
|
|
|
ft, err := arch.SaveDir(ctx, "/", fi, test.target, nil, nil)
|
2018-04-30 13:13:03 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-05-12 19:40:31 +00:00
|
|
|
ft.Wait(ctx)
|
2018-05-08 20:28:37 +00:00
|
|
|
node, stats := ft.Node(), ft.Stats()
|
|
|
|
|
|
|
|
tmb.Kill(nil)
|
|
|
|
err = tmb.Wait()
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("stats: %v", stats)
|
|
|
|
if stats.DataSize != 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataSize, want 0, got %d", stats.DataSize)
|
|
|
|
}
|
|
|
|
if stats.DataBlobs != 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
2020-03-07 20:48:59 +00:00
|
|
|
if stats.TreeSize == 0 {
|
2018-03-30 20:43:18 +00:00
|
|
|
t.Errorf("wrong stats returned in TreeSize, want > 0, got %d", stats.TreeSize)
|
|
|
|
}
|
|
|
|
if stats.TreeBlobs <= 0 {
|
|
|
|
t.Errorf("wrong stats returned in TreeBlobs, want > 0, got %d", stats.TreeBlobs)
|
|
|
|
}
|
|
|
|
|
2020-11-08 07:24:24 +00:00
|
|
|
ctx = context.Background()
|
2018-03-30 20:43:18 +00:00
|
|
|
node.Name = targetNodeName
|
|
|
|
tree := &restic.Tree{Nodes: []*restic.Node{node}}
|
|
|
|
treeID, err := repo.SaveTree(ctx, tree)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = repo.Flush(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
want := test.want
|
|
|
|
if want == nil {
|
|
|
|
want = test.src
|
|
|
|
}
|
|
|
|
TestEnsureTree(ctx, t, "/", repo, treeID, want)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchiverSaveDirIncremental(t *testing.T) {
|
|
|
|
tempdir, removeTempdir := restictest.TempDir(t)
|
|
|
|
defer removeTempdir()
|
|
|
|
|
|
|
|
testRepo, removeRepository := repository.TestRepository(t)
|
|
|
|
defer removeRepository()
|
|
|
|
|
|
|
|
repo := &blobCountingRepo{
|
|
|
|
Repository: testRepo,
|
|
|
|
saved: make(map[restic.BlobHandle]uint),
|
|
|
|
}
|
|
|
|
|
|
|
|
appendToFile(t, filepath.Join(tempdir, "testfile"), []byte("foobar"))
|
|
|
|
|
|
|
|
// save the empty directory several times in a row, then have a look if the
|
|
|
|
// archiver did save the same tree several times
|
|
|
|
for i := 0; i < 5; i++ {
|
2018-05-08 20:28:37 +00:00
|
|
|
var tmb tomb.Tomb
|
|
|
|
ctx := tmb.Context(context.Background())
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2018-09-05 12:04:55 +00:00
|
|
|
arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
|
2018-05-08 20:28:37 +00:00
|
|
|
arch.runWorkers(ctx, &tmb)
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
fi, err := fs.Lstat(tempdir)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-04-22 20:23:02 +00:00
|
|
|
ft, err := arch.SaveDir(ctx, "/", fi, tempdir, nil, nil)
|
2018-04-30 13:13:03 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-05-12 19:40:31 +00:00
|
|
|
ft.Wait(ctx)
|
2018-05-08 20:28:37 +00:00
|
|
|
node, stats := ft.Node(), ft.Stats()
|
|
|
|
|
|
|
|
tmb.Kill(nil)
|
|
|
|
err = tmb.Wait()
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if i == 0 {
|
|
|
|
// operation must have added new tree data
|
|
|
|
if stats.DataSize != 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataSize, want 0, got %d", stats.DataSize)
|
|
|
|
}
|
|
|
|
if stats.DataBlobs != 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
2020-03-07 20:48:59 +00:00
|
|
|
if stats.TreeSize == 0 {
|
2018-03-30 20:43:18 +00:00
|
|
|
t.Errorf("wrong stats returned in TreeSize, want > 0, got %d", stats.TreeSize)
|
|
|
|
}
|
|
|
|
if stats.TreeBlobs <= 0 {
|
|
|
|
t.Errorf("wrong stats returned in TreeBlobs, want > 0, got %d", stats.TreeBlobs)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// operation must not have added any new data
|
|
|
|
if stats.DataSize != 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataSize, want 0, got %d", stats.DataSize)
|
|
|
|
}
|
|
|
|
if stats.DataBlobs != 0 {
|
|
|
|
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
|
|
|
|
}
|
|
|
|
if stats.TreeSize != 0 {
|
|
|
|
t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize)
|
|
|
|
}
|
|
|
|
if stats.TreeBlobs != 0 {
|
|
|
|
t.Errorf("wrong stats returned in TreeBlobs, want 0, got %d", stats.TreeBlobs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("node subtree %v", node.Subtree)
|
|
|
|
|
2020-11-08 07:24:24 +00:00
|
|
|
err = repo.Flush(context.Background())
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for h, n := range repo.saved {
|
|
|
|
if n > 1 {
|
|
|
|
t.Errorf("iteration %v: blob %v saved more than once (%d times)", i, h, n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-22 20:23:02 +00:00
|
|
|
// bothZeroOrNeither fails the test if only one of exp, act is zero.
|
|
|
|
func bothZeroOrNeither(tb testing.TB, exp, act uint64) {
|
|
|
|
if (exp == 0 && act != 0) || (exp != 0 && act == 0) {
|
|
|
|
_, file, line, _ := runtime.Caller(1)
|
|
|
|
tb.Fatalf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
func TestArchiverSaveTree(t *testing.T) {
|
|
|
|
symlink := func(from, to string) func(t testing.TB) {
|
|
|
|
return func(t testing.TB) {
|
|
|
|
err := os.Symlink(from, to)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-22 20:23:02 +00:00
|
|
|
// The toplevel directory is not counted in the ItemStats
|
2018-03-30 20:43:18 +00:00
|
|
|
var tests = []struct {
|
|
|
|
src TestDir
|
|
|
|
prepare func(t testing.TB)
|
|
|
|
targets []string
|
|
|
|
want TestDir
|
2020-04-22 20:23:02 +00:00
|
|
|
stat ItemStats
|
2018-03-30 20:43:18 +00:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
src: TestDir{
|
|
|
|
"targetfile": TestFile{Content: string("foobar")},
|
|
|
|
},
|
|
|
|
targets: []string{"targetfile"},
|
|
|
|
want: TestDir{
|
|
|
|
"targetfile": TestFile{Content: string("foobar")},
|
|
|
|
},
|
2020-04-22 20:23:02 +00:00
|
|
|
stat: ItemStats{1, 6, 0, 0},
|
2018-03-30 20:43:18 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
src: TestDir{
|
|
|
|
"targetfile": TestFile{Content: string("foobar")},
|
|
|
|
},
|
|
|
|
prepare: symlink("targetfile", "filesymlink"),
|
|
|
|
targets: []string{"targetfile", "filesymlink"},
|
|
|
|
want: TestDir{
|
|
|
|
"targetfile": TestFile{Content: string("foobar")},
|
|
|
|
"filesymlink": TestSymlink{Target: "targetfile"},
|
|
|
|
},
|
2020-04-22 20:23:02 +00:00
|
|
|
stat: ItemStats{1, 6, 0, 0},
|
2018-03-30 20:43:18 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
src: TestDir{
|
|
|
|
"dir": TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"targetfile": TestFile{Content: string("foobar")},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"otherfile": TestFile{Content: string("xxx")},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
prepare: symlink("subdir", filepath.FromSlash("dir/symlink")),
|
|
|
|
targets: []string{filepath.FromSlash("dir/symlink")},
|
|
|
|
want: TestDir{
|
|
|
|
"dir": TestDir{
|
|
|
|
"symlink": TestSymlink{Target: "subdir"},
|
|
|
|
},
|
|
|
|
},
|
2020-04-22 20:23:02 +00:00
|
|
|
stat: ItemStats{0, 0, 1, 0x154},
|
2018-03-30 20:43:18 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
src: TestDir{
|
|
|
|
"dir": TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"targetfile": TestFile{Content: string("foobar")},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"otherfile": TestFile{Content: string("xxx")},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
prepare: symlink("subdir", filepath.FromSlash("dir/symlink")),
|
|
|
|
targets: []string{filepath.FromSlash("dir/symlink/subsubdir")},
|
|
|
|
want: TestDir{
|
|
|
|
"dir": TestDir{
|
|
|
|
"symlink": TestDir{
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"targetfile": TestFile{Content: string("foobar")},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-04-22 20:23:02 +00:00
|
|
|
stat: ItemStats{1, 6, 3, 0x47f},
|
2018-03-30 20:43:18 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run("", func(t *testing.T) {
|
2018-05-08 20:28:37 +00:00
|
|
|
var tmb tomb.Tomb
|
|
|
|
ctx := tmb.Context(context.Background())
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src)
|
|
|
|
defer cleanup()
|
|
|
|
|
2018-09-05 12:04:55 +00:00
|
|
|
testFS := fs.Track{FS: fs.Local{}}
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
arch := New(repo, testFS, Options{})
|
2020-04-22 20:23:02 +00:00
|
|
|
|
|
|
|
var stat ItemStats
|
|
|
|
lock := &sync.Mutex{}
|
|
|
|
arch.CompleteItem = func(item string, previous, current *restic.Node, s ItemStats, d time.Duration) {
|
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
stat.Add(s)
|
|
|
|
}
|
|
|
|
|
2018-05-08 20:28:37 +00:00
|
|
|
arch.runWorkers(ctx, &tmb)
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2020-02-17 12:24:09 +00:00
|
|
|
back := restictest.Chdir(t, tempdir)
|
2018-03-30 20:43:18 +00:00
|
|
|
defer back()
|
|
|
|
|
|
|
|
if test.prepare != nil {
|
|
|
|
test.prepare(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
atree, err := NewTree(testFS, test.targets)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tree, err := arch.SaveTree(ctx, "/", atree, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
treeID, err := repo.SaveTree(ctx, tree)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-05-08 20:28:37 +00:00
|
|
|
tmb.Kill(nil)
|
|
|
|
err = tmb.Wait()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-11-08 07:24:24 +00:00
|
|
|
ctx = context.Background()
|
2018-03-30 20:43:18 +00:00
|
|
|
err = repo.Flush(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
want := test.want
|
|
|
|
if want == nil {
|
|
|
|
want = test.src
|
|
|
|
}
|
|
|
|
TestEnsureTree(ctx, t, "/", repo, treeID, want)
|
2020-04-22 20:23:02 +00:00
|
|
|
bothZeroOrNeither(t, uint64(test.stat.DataBlobs), uint64(stat.DataBlobs))
|
|
|
|
bothZeroOrNeither(t, uint64(test.stat.TreeBlobs), uint64(stat.TreeBlobs))
|
|
|
|
bothZeroOrNeither(t, test.stat.DataSize, stat.DataSize)
|
|
|
|
bothZeroOrNeither(t, test.stat.TreeSize, stat.TreeSize)
|
2018-03-30 20:43:18 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchiverSnapshot(t *testing.T) {
|
|
|
|
var tests = []struct {
|
|
|
|
name string
|
|
|
|
src TestDir
|
|
|
|
want TestDir
|
|
|
|
chdir string
|
|
|
|
targets []string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "single-file",
|
|
|
|
src: TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
targets: []string{"foo"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "file-current-dir",
|
|
|
|
src: TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
targets: []string{"./foo"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "dir",
|
|
|
|
src: TestDir{
|
|
|
|
"target": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
targets: []string{"target"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "dir-current-dir",
|
|
|
|
src: TestDir{
|
|
|
|
"target": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
targets: []string{"./target"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "content-dir-current-dir",
|
|
|
|
src: TestDir{
|
|
|
|
"target": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
targets: []string{"./target/."},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "current-dir",
|
|
|
|
src: TestDir{
|
|
|
|
"target": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
targets: []string{"."},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "subdir",
|
|
|
|
src: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo in subsubdir"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
targets: []string{"subdir"},
|
|
|
|
want: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo in subsubdir"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "subsubdir",
|
|
|
|
src: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo in subsubdir"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
targets: []string{"subdir/subsubdir"},
|
|
|
|
want: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo in subsubdir"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "parent-dir",
|
|
|
|
src: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
chdir: "subdir",
|
|
|
|
targets: []string{".."},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "parent-parent-dir",
|
|
|
|
src: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"empty": TestFile{Content: ""},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
chdir: "subdir/subsubdir",
|
|
|
|
targets: []string{"../.."},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "parent-parent-dir-slash",
|
|
|
|
src: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
chdir: "subdir/subsubdir",
|
|
|
|
targets: []string{"../../"},
|
|
|
|
want: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "parent-subdir",
|
|
|
|
src: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
chdir: "subdir",
|
|
|
|
targets: []string{"../subdir"},
|
|
|
|
want: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "parent-parent-dir-subdir",
|
|
|
|
src: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
chdir: "subdir/subsubdir",
|
|
|
|
targets: []string{"../../subdir/subsubdir"},
|
|
|
|
want: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "included-multiple1",
|
|
|
|
src: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
targets: []string{"subdir", "subdir/subsubdir"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "included-multiple2",
|
|
|
|
src: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
targets: []string{"subdir/subsubdir", "subdir"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "collision",
|
|
|
|
src: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo in subdir"},
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo in subsubdir"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"foo": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
chdir: "subdir",
|
|
|
|
targets: []string{".", "../foo"},
|
|
|
|
want: TestDir{
|
|
|
|
|
|
|
|
"foo": TestFile{Content: "foo in subdir"},
|
|
|
|
"subsubdir": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo in subsubdir"},
|
|
|
|
},
|
|
|
|
"foo-1": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(test.name, func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src)
|
|
|
|
defer cleanup()
|
|
|
|
|
2018-09-05 12:04:55 +00:00
|
|
|
arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
chdir := tempdir
|
|
|
|
if test.chdir != "" {
|
|
|
|
chdir = filepath.Join(chdir, filepath.FromSlash(test.chdir))
|
|
|
|
}
|
|
|
|
|
2020-02-17 12:24:09 +00:00
|
|
|
back := restictest.Chdir(t, chdir)
|
2018-03-30 20:43:18 +00:00
|
|
|
defer back()
|
|
|
|
|
|
|
|
var targets []string
|
|
|
|
for _, target := range test.targets {
|
|
|
|
targets = append(targets, os.ExpandEnv(target))
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("targets: %v", targets)
|
|
|
|
sn, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("saved as %v", snapshotID.Str())
|
|
|
|
|
|
|
|
want := test.want
|
|
|
|
if want == nil {
|
|
|
|
want = test.src
|
|
|
|
}
|
|
|
|
TestEnsureSnapshot(t, repo, snapshotID, want)
|
|
|
|
|
|
|
|
checker.TestCheckRepo(t, repo)
|
|
|
|
|
|
|
|
// check that the snapshot contains the targets with absolute paths
|
|
|
|
for i, target := range sn.Paths {
|
|
|
|
atarget, err := filepath.Abs(test.targets[i])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if target != atarget {
|
|
|
|
t.Errorf("wrong path in snapshot: want %v, got %v", atarget, target)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchiverSnapshotSelect(t *testing.T) {
|
|
|
|
var tests = []struct {
|
|
|
|
name string
|
|
|
|
src TestDir
|
|
|
|
want TestDir
|
|
|
|
selFn SelectFunc
|
2018-05-20 13:58:55 +00:00
|
|
|
err string
|
2018-03-30 20:43:18 +00:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "include-all",
|
|
|
|
src: TestDir{
|
|
|
|
"work": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
"foo.txt": TestFile{Content: "foo text file"},
|
|
|
|
"subdir": TestDir{
|
|
|
|
"other": TestFile{Content: "other in subdir"},
|
|
|
|
"bar.txt": TestFile{Content: "bar.txt in subdir"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
selFn: func(item string, fi os.FileInfo) bool {
|
|
|
|
return true
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "exclude-all",
|
|
|
|
src: TestDir{
|
|
|
|
"work": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
"foo.txt": TestFile{Content: "foo text file"},
|
|
|
|
"subdir": TestDir{
|
|
|
|
"other": TestFile{Content: "other in subdir"},
|
|
|
|
"bar.txt": TestFile{Content: "bar.txt in subdir"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
selFn: func(item string, fi os.FileInfo) bool {
|
|
|
|
return false
|
|
|
|
},
|
2018-05-20 13:58:55 +00:00
|
|
|
err: "snapshot is empty",
|
2018-03-30 20:43:18 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "exclude-txt-files",
|
|
|
|
src: TestDir{
|
|
|
|
"work": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
"foo.txt": TestFile{Content: "foo text file"},
|
|
|
|
"subdir": TestDir{
|
|
|
|
"other": TestFile{Content: "other in subdir"},
|
|
|
|
"bar.txt": TestFile{Content: "bar.txt in subdir"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
want: TestDir{
|
|
|
|
"work": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
"subdir": TestDir{
|
|
|
|
"other": TestFile{Content: "other in subdir"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
selFn: func(item string, fi os.FileInfo) bool {
|
2020-03-06 22:35:09 +00:00
|
|
|
return filepath.Ext(item) != ".txt"
|
2018-03-30 20:43:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "exclude-dir",
|
|
|
|
src: TestDir{
|
|
|
|
"work": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
"foo.txt": TestFile{Content: "foo text file"},
|
|
|
|
"subdir": TestDir{
|
|
|
|
"other": TestFile{Content: "other in subdir"},
|
|
|
|
"bar.txt": TestFile{Content: "bar.txt in subdir"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
want: TestDir{
|
|
|
|
"work": TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
"foo.txt": TestFile{Content: "foo text file"},
|
|
|
|
},
|
|
|
|
"other": TestFile{Content: "another file"},
|
|
|
|
},
|
|
|
|
selFn: func(item string, fi os.FileInfo) bool {
|
2020-03-06 22:35:09 +00:00
|
|
|
return filepath.Base(item) != "subdir"
|
2018-03-30 20:43:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "select-absolute-paths",
|
|
|
|
src: TestDir{
|
|
|
|
"foo": TestFile{Content: "foo"},
|
|
|
|
},
|
|
|
|
selFn: func(item string, fi os.FileInfo) bool {
|
|
|
|
return filepath.IsAbs(item)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(test.name, func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src)
|
|
|
|
defer cleanup()
|
|
|
|
|
2018-09-05 12:04:55 +00:00
|
|
|
arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
|
2018-03-30 20:43:18 +00:00
|
|
|
arch.Select = test.selFn
|
|
|
|
|
2020-02-17 12:24:09 +00:00
|
|
|
back := restictest.Chdir(t, tempdir)
|
2018-03-30 20:43:18 +00:00
|
|
|
defer back()
|
|
|
|
|
|
|
|
targets := []string{"."}
|
|
|
|
_, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()})
|
2018-05-20 13:58:55 +00:00
|
|
|
if test.err != "" {
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("expected error not found, got %v, wanted %q", err, test.err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err.Error() != test.err {
|
|
|
|
t.Fatalf("unexpected error, want %q, got %q", test.err, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("saved as %v", snapshotID.Str())
|
|
|
|
|
|
|
|
want := test.want
|
|
|
|
if want == nil {
|
|
|
|
want = test.src
|
|
|
|
}
|
|
|
|
TestEnsureSnapshot(t, repo, snapshotID, want)
|
|
|
|
|
|
|
|
checker.TestCheckRepo(t, repo)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// MockFS keeps track which files are read.
|
|
|
|
type MockFS struct {
|
|
|
|
fs.FS
|
|
|
|
|
|
|
|
m sync.Mutex
|
|
|
|
bytesRead map[string]int // tracks bytes read from all opened files
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MockFS) Open(name string) (fs.File, error) {
|
|
|
|
f, err := m.FS.Open(name)
|
|
|
|
if err != nil {
|
|
|
|
return f, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return MockFile{File: f, fs: m, filename: name}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MockFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, error) {
|
|
|
|
f, err := m.FS.OpenFile(name, flag, perm)
|
|
|
|
if err != nil {
|
|
|
|
return f, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return MockFile{File: f, fs: m, filename: name}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type MockFile struct {
|
|
|
|
fs.File
|
|
|
|
filename string
|
|
|
|
|
|
|
|
fs *MockFS
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f MockFile) Read(p []byte) (int, error) {
|
|
|
|
n, err := f.File.Read(p)
|
|
|
|
if n > 0 {
|
|
|
|
f.fs.m.Lock()
|
|
|
|
f.fs.bytesRead[f.filename] += n
|
|
|
|
f.fs.m.Unlock()
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchiverParent(t *testing.T) {
|
|
|
|
var tests = []struct {
|
|
|
|
src TestDir
|
|
|
|
read map[string]int // tracks number of times a file must have been read
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
src: TestDir{
|
|
|
|
"targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))},
|
|
|
|
},
|
|
|
|
read: map[string]int{
|
|
|
|
"targetfile": 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run("", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
testFS := &MockFS{
|
2018-09-05 12:04:55 +00:00
|
|
|
FS: fs.Track{FS: fs.Local{}},
|
2018-03-30 20:43:18 +00:00
|
|
|
bytesRead: make(map[string]int),
|
|
|
|
}
|
|
|
|
|
|
|
|
arch := New(repo, testFS, Options{})
|
|
|
|
|
2020-02-17 12:24:09 +00:00
|
|
|
back := restictest.Chdir(t, tempdir)
|
2018-03-30 20:43:18 +00:00
|
|
|
defer back()
|
|
|
|
|
|
|
|
_, firstSnapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("first backup saved as %v", firstSnapshotID.Str())
|
|
|
|
t.Logf("testfs: %v", testFS)
|
|
|
|
|
|
|
|
// check that all files have been read exactly once
|
|
|
|
TestWalkFiles(t, ".", test.src, func(filename string, item interface{}) error {
|
|
|
|
file, ok := item.(TestFile)
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
n, ok := testFS.bytesRead[filename]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("file %v was not read at all", filename)
|
|
|
|
}
|
|
|
|
|
|
|
|
if n != len(file.Content) {
|
|
|
|
t.Fatalf("file %v: read %v bytes, wanted %v bytes", filename, n, len(file.Content))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
opts := SnapshotOptions{
|
|
|
|
Time: time.Now(),
|
|
|
|
ParentSnapshot: firstSnapshotID,
|
|
|
|
}
|
|
|
|
_, secondSnapshotID, err := arch.Snapshot(ctx, []string{"."}, opts)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// check that all files still been read exactly once
|
|
|
|
TestWalkFiles(t, ".", test.src, func(filename string, item interface{}) error {
|
|
|
|
file, ok := item.(TestFile)
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
n, ok := testFS.bytesRead[filename]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("file %v was not read at all", filename)
|
|
|
|
}
|
|
|
|
|
|
|
|
if n != len(file.Content) {
|
|
|
|
t.Fatalf("file %v: read %v bytes, wanted %v bytes", filename, n, len(file.Content))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Logf("second backup saved as %v", secondSnapshotID.Str())
|
|
|
|
t.Logf("testfs: %v", testFS)
|
|
|
|
|
|
|
|
checker.TestCheckRepo(t, repo)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchiverErrorReporting(t *testing.T) {
|
|
|
|
ignoreErrorForBasename := func(basename string) ErrorFunc {
|
|
|
|
return func(item string, fi os.FileInfo, err error) error {
|
|
|
|
if filepath.Base(item) == "targetfile" {
|
|
|
|
t.Logf("ignoring error for targetfile: %v", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Errorf("error handler called for unexpected file %v: %v", item, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
chmodUnreadable := func(filename string) func(testing.TB) {
|
|
|
|
return func(t testing.TB) {
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Skipping this test for windows")
|
|
|
|
}
|
|
|
|
|
|
|
|
err := os.Chmod(filepath.FromSlash(filename), 0004)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var tests = []struct {
|
|
|
|
name string
|
|
|
|
src TestDir
|
|
|
|
want TestDir
|
|
|
|
prepare func(t testing.TB)
|
|
|
|
errFn ErrorFunc
|
|
|
|
mustError bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "no-error",
|
|
|
|
src: TestDir{
|
|
|
|
"targetfile": TestFile{Content: "foobar"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "file-unreadable",
|
|
|
|
src: TestDir{
|
|
|
|
"targetfile": TestFile{Content: "foobar"},
|
|
|
|
},
|
|
|
|
prepare: chmodUnreadable("targetfile"),
|
|
|
|
mustError: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "file-unreadable-ignore-error",
|
|
|
|
src: TestDir{
|
|
|
|
"targetfile": TestFile{Content: "foobar"},
|
|
|
|
"other": TestFile{Content: "xxx"},
|
|
|
|
},
|
|
|
|
want: TestDir{
|
|
|
|
"other": TestFile{Content: "xxx"},
|
|
|
|
},
|
|
|
|
prepare: chmodUnreadable("targetfile"),
|
|
|
|
errFn: ignoreErrorForBasename("targetfile"),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "file-subdir-unreadable",
|
|
|
|
src: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"targetfile": TestFile{Content: "foobar"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
prepare: chmodUnreadable("subdir/targetfile"),
|
|
|
|
mustError: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "file-subdir-unreadable-ignore-error",
|
|
|
|
src: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"targetfile": TestFile{Content: "foobar"},
|
|
|
|
"other": TestFile{Content: "xxx"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
want: TestDir{
|
|
|
|
"subdir": TestDir{
|
|
|
|
"other": TestFile{Content: "xxx"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
prepare: chmodUnreadable("subdir/targetfile"),
|
|
|
|
errFn: ignoreErrorForBasename("targetfile"),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(test.name, func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src)
|
|
|
|
defer cleanup()
|
|
|
|
|
2020-02-17 12:24:09 +00:00
|
|
|
back := restictest.Chdir(t, tempdir)
|
2018-03-30 20:43:18 +00:00
|
|
|
defer back()
|
|
|
|
|
|
|
|
if test.prepare != nil {
|
|
|
|
test.prepare(t)
|
|
|
|
}
|
|
|
|
|
2018-09-05 12:04:55 +00:00
|
|
|
arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
|
2018-03-30 20:43:18 +00:00
|
|
|
arch.Error = test.errFn
|
|
|
|
|
|
|
|
_, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()})
|
|
|
|
if test.mustError {
|
|
|
|
if err != nil {
|
|
|
|
t.Logf("found expected error (%v), skipping further checks", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Fatalf("expected error not returned by archiver")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error of type %T found: %v", err, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("saved as %v", snapshotID.Str())
|
|
|
|
|
|
|
|
want := test.want
|
|
|
|
if want == nil {
|
|
|
|
want = test.src
|
|
|
|
}
|
|
|
|
TestEnsureSnapshot(t, repo, snapshotID, want)
|
|
|
|
|
|
|
|
checker.TestCheckRepo(t, repo)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2018-05-12 21:08:00 +00:00
|
|
|
|
2020-12-28 19:45:53 +00:00
|
|
|
type noCancelBackend struct {
|
|
|
|
restic.Backend
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *noCancelBackend) Test(ctx context.Context, h restic.Handle) (bool, error) {
|
|
|
|
return c.Backend.Test(context.Background(), h)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *noCancelBackend) Remove(ctx context.Context, h restic.Handle) error {
|
|
|
|
return c.Backend.Remove(context.Background(), h)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *noCancelBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
|
|
|
|
return c.Backend.Save(context.Background(), h, rd)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *noCancelBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
|
|
|
return c.Backend.Load(context.Background(), h, length, offset, fn)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *noCancelBackend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
|
|
|
|
return c.Backend.Stat(context.Background(), h)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *noCancelBackend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
|
|
|
|
return c.Backend.List(context.Background(), t, fn)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *noCancelBackend) Delete(ctx context.Context) error {
|
|
|
|
return c.Backend.Delete(context.Background())
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchiverContextCanceled(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
cancel()
|
|
|
|
|
|
|
|
tempdir, removeTempdir := restictest.TempDir(t)
|
|
|
|
TestCreateFiles(t, tempdir, TestDir{
|
|
|
|
"targetfile": TestFile{Content: "foobar"},
|
|
|
|
})
|
|
|
|
defer removeTempdir()
|
|
|
|
|
|
|
|
// Ensure that the archiver itself reports the canceled context and not just the backend
|
|
|
|
repo, _ := repository.TestRepositoryWithBackend(t, &noCancelBackend{mem.New()})
|
|
|
|
|
|
|
|
back := restictest.Chdir(t, tempdir)
|
|
|
|
defer back()
|
|
|
|
|
|
|
|
arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
|
|
|
|
|
|
|
|
_, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Logf("found expected error (%v)", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if snapshotID.IsNull() {
|
|
|
|
t.Fatalf("no error returned but found null id")
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Fatalf("expected error not returned by archiver")
|
|
|
|
}
|
|
|
|
|
2018-05-12 21:08:00 +00:00
|
|
|
// TrackFS keeps track which files are opened. For some files, an error is injected.
|
|
|
|
type TrackFS struct {
|
|
|
|
fs.FS
|
|
|
|
|
|
|
|
errorOn map[string]error
|
|
|
|
|
|
|
|
opened map[string]uint
|
|
|
|
m sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TrackFS) Open(name string) (fs.File, error) {
|
|
|
|
m.m.Lock()
|
|
|
|
m.opened[name]++
|
|
|
|
m.m.Unlock()
|
|
|
|
|
|
|
|
return m.FS.Open(name)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *TrackFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, error) {
|
|
|
|
m.m.Lock()
|
|
|
|
m.opened[name]++
|
|
|
|
m.m.Unlock()
|
|
|
|
|
|
|
|
return m.FS.OpenFile(name, flag, perm)
|
|
|
|
}
|
|
|
|
|
|
|
|
type failSaveRepo struct {
|
|
|
|
restic.Repository
|
|
|
|
failAfter int32
|
|
|
|
cnt int32
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
2020-06-06 20:20:44 +00:00
|
|
|
func (f *failSaveRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, error) {
|
2018-05-12 21:08:00 +00:00
|
|
|
val := atomic.AddInt32(&f.cnt, 1)
|
|
|
|
if val >= f.failAfter {
|
2020-06-06 20:20:44 +00:00
|
|
|
return restic.ID{}, false, f.err
|
2018-05-12 21:08:00 +00:00
|
|
|
}
|
|
|
|
|
2020-06-06 20:20:44 +00:00
|
|
|
return f.Repository.SaveBlob(ctx, t, buf, id, storeDuplicate)
|
2018-05-12 21:08:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchiverAbortEarlyOnError(t *testing.T) {
|
|
|
|
var testErr = errors.New("test error")
|
|
|
|
|
|
|
|
var tests = []struct {
|
|
|
|
src TestDir
|
|
|
|
wantOpen map[string]uint
|
2020-02-14 22:16:13 +00:00
|
|
|
failAfter uint // error after so many blobs have been saved to the repo
|
2018-05-12 21:08:00 +00:00
|
|
|
err error
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
src: TestDir{
|
|
|
|
"dir": TestDir{
|
|
|
|
"bar": TestFile{Content: "foobar"},
|
|
|
|
"baz": TestFile{Content: "foobar"},
|
|
|
|
"foo": TestFile{Content: "foobar"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
wantOpen: map[string]uint{
|
|
|
|
filepath.FromSlash("dir/bar"): 1,
|
|
|
|
filepath.FromSlash("dir/baz"): 1,
|
|
|
|
filepath.FromSlash("dir/foo"): 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
src: TestDir{
|
|
|
|
"dir": TestDir{
|
2020-02-14 22:16:13 +00:00
|
|
|
"file1": TestFile{Content: string(restictest.Random(1, 1024))},
|
|
|
|
"file2": TestFile{Content: string(restictest.Random(2, 1024))},
|
|
|
|
"file3": TestFile{Content: string(restictest.Random(3, 1024))},
|
|
|
|
"file4": TestFile{Content: string(restictest.Random(4, 1024))},
|
|
|
|
"file5": TestFile{Content: string(restictest.Random(5, 1024))},
|
|
|
|
"file6": TestFile{Content: string(restictest.Random(6, 1024))},
|
|
|
|
"file7": TestFile{Content: string(restictest.Random(7, 1024))},
|
|
|
|
"file8": TestFile{Content: string(restictest.Random(8, 1024))},
|
|
|
|
"file9": TestFile{Content: string(restictest.Random(9, 1024))},
|
2018-05-12 21:08:00 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
wantOpen: map[string]uint{
|
|
|
|
filepath.FromSlash("dir/file1"): 1,
|
|
|
|
filepath.FromSlash("dir/file2"): 1,
|
|
|
|
filepath.FromSlash("dir/file3"): 1,
|
2020-02-14 22:16:13 +00:00
|
|
|
filepath.FromSlash("dir/file4"): 1,
|
2018-05-12 21:08:00 +00:00
|
|
|
filepath.FromSlash("dir/file7"): 0,
|
|
|
|
filepath.FromSlash("dir/file8"): 0,
|
|
|
|
filepath.FromSlash("dir/file9"): 0,
|
|
|
|
},
|
2020-02-14 22:16:13 +00:00
|
|
|
// fails four to six files were opened as the FileReadConcurrency allows for
|
|
|
|
// two queued files
|
|
|
|
failAfter: 4,
|
2018-05-12 21:08:00 +00:00
|
|
|
err: testErr,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run("", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src)
|
|
|
|
defer cleanup()
|
|
|
|
|
2020-02-17 12:24:09 +00:00
|
|
|
back := restictest.Chdir(t, tempdir)
|
2018-05-12 21:08:00 +00:00
|
|
|
defer back()
|
|
|
|
|
|
|
|
testFS := &TrackFS{
|
2018-09-05 12:04:55 +00:00
|
|
|
FS: fs.Track{FS: fs.Local{}},
|
2018-05-12 21:08:00 +00:00
|
|
|
opened: make(map[string]uint),
|
|
|
|
}
|
|
|
|
|
|
|
|
if testFS.errorOn == nil {
|
|
|
|
testFS.errorOn = make(map[string]error)
|
|
|
|
}
|
|
|
|
|
|
|
|
testRepo := &failSaveRepo{
|
|
|
|
Repository: repo,
|
|
|
|
failAfter: int32(test.failAfter),
|
|
|
|
err: test.err,
|
|
|
|
}
|
|
|
|
|
2020-02-14 22:16:13 +00:00
|
|
|
// at most two files may be queued
|
|
|
|
arch := New(testRepo, testFS, Options{
|
|
|
|
FileReadConcurrency: 2,
|
|
|
|
})
|
2018-05-12 21:08:00 +00:00
|
|
|
|
|
|
|
_, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()})
|
|
|
|
if errors.Cause(err) != test.err {
|
|
|
|
t.Errorf("expected error (%v) not found, got %v", test.err, errors.Cause(err))
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("Snapshot return error: %v", err)
|
|
|
|
|
|
|
|
t.Logf("track fs: %v", testFS.opened)
|
|
|
|
|
|
|
|
for k, v := range test.wantOpen {
|
|
|
|
if testFS.opened[k] != v {
|
|
|
|
t.Errorf("opened %v %d times, want %d", k, testFS.opened[k], v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2019-04-23 20:23:35 +00:00
|
|
|
|
|
|
|
func snapshot(t testing.TB, repo restic.Repository, fs fs.FS, parent restic.ID, filename string) (restic.ID, *restic.Node) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
arch := New(repo, fs, Options{})
|
|
|
|
|
|
|
|
sopts := SnapshotOptions{
|
|
|
|
Time: time.Now(),
|
|
|
|
ParentSnapshot: parent,
|
|
|
|
}
|
|
|
|
snapshot, snapshotID, err := arch.Snapshot(ctx, []string{filename}, sopts)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tree, err := repo.LoadTree(ctx, *snapshot.Tree)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
node := tree.Find(filename)
|
|
|
|
if node == nil {
|
|
|
|
t.Fatalf("unable to find node for testfile in snapshot")
|
|
|
|
}
|
|
|
|
|
|
|
|
return snapshotID, node
|
|
|
|
}
|
|
|
|
|
|
|
|
// StatFS allows overwriting what is returned by the Lstat function.
|
|
|
|
type StatFS struct {
|
|
|
|
fs.FS
|
|
|
|
|
2020-02-07 21:14:50 +00:00
|
|
|
OverrideLstat map[string]os.FileInfo
|
|
|
|
OnlyOverrideStat bool
|
2019-04-23 20:23:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *StatFS) Lstat(name string) (os.FileInfo, error) {
|
2020-02-07 21:14:50 +00:00
|
|
|
if !fs.OnlyOverrideStat {
|
2020-02-10 20:58:00 +00:00
|
|
|
if fi, ok := fs.OverrideLstat[fixpath(name)]; ok {
|
2020-02-07 21:14:50 +00:00
|
|
|
return fi, nil
|
|
|
|
}
|
2019-04-23 20:23:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return fs.FS.Lstat(name)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *StatFS) OpenFile(name string, flags int, perm os.FileMode) (fs.File, error) {
|
2020-02-10 20:58:00 +00:00
|
|
|
if fi, ok := fs.OverrideLstat[fixpath(name)]; ok {
|
2019-04-23 20:23:35 +00:00
|
|
|
f, err := fs.FS.OpenFile(name, flags, perm)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
wrappedFile := fileStat{
|
|
|
|
File: f,
|
|
|
|
fi: fi,
|
|
|
|
}
|
|
|
|
return wrappedFile, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return fs.FS.OpenFile(name, flags, perm)
|
|
|
|
}
|
|
|
|
|
|
|
|
type fileStat struct {
|
|
|
|
fs.File
|
|
|
|
fi os.FileInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f fileStat) Stat() (os.FileInfo, error) {
|
|
|
|
return f.fi, nil
|
|
|
|
}
|
|
|
|
|
2019-05-05 12:57:38 +00:00
|
|
|
// used by wrapFileInfo, use untyped const in order to avoid having a version
|
|
|
|
// of wrapFileInfo for each OS
|
|
|
|
const (
|
|
|
|
mockFileInfoMode = 0400
|
|
|
|
mockFileInfoUID = 51234
|
|
|
|
mockFileInfoGID = 51235
|
|
|
|
)
|
|
|
|
|
2019-05-04 08:34:28 +00:00
|
|
|
func TestMetadataChanged(t *testing.T) {
|
|
|
|
files := TestDir{
|
|
|
|
"testfile": TestFile{
|
|
|
|
Content: "foo bar test file",
|
|
|
|
},
|
|
|
|
}
|
2019-04-23 20:23:35 +00:00
|
|
|
|
2019-05-04 08:34:28 +00:00
|
|
|
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, files)
|
|
|
|
defer cleanup()
|
|
|
|
|
2020-02-17 12:24:09 +00:00
|
|
|
back := restictest.Chdir(t, tempdir)
|
2019-05-04 08:34:28 +00:00
|
|
|
defer back()
|
|
|
|
|
|
|
|
// get metadata
|
|
|
|
fi := lstat(t, "testfile")
|
|
|
|
want, err := restic.NodeFromFileInfo("testfile", fi)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fs := &StatFS{
|
|
|
|
FS: fs.Local{},
|
|
|
|
OverrideLstat: map[string]os.FileInfo{
|
|
|
|
"testfile": fi,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
snapshotID, node2 := snapshot(t, repo, fs, restic.ID{}, "testfile")
|
|
|
|
|
|
|
|
// set some values so we can then compare the nodes
|
|
|
|
want.Content = node2.Content
|
|
|
|
want.Path = ""
|
2020-02-27 10:21:01 +00:00
|
|
|
if len(want.ExtendedAttributes) == 0 {
|
|
|
|
want.ExtendedAttributes = nil
|
|
|
|
}
|
2019-05-04 08:34:28 +00:00
|
|
|
|
2019-11-06 08:38:46 +00:00
|
|
|
want.AccessTime = want.ModTime
|
|
|
|
|
2019-05-04 08:34:28 +00:00
|
|
|
// make sure that metadata was recorded successfully
|
|
|
|
if !cmp.Equal(want, node2) {
|
|
|
|
t.Fatalf("metadata does not match:\n%v", cmp.Diff(want, node2))
|
|
|
|
}
|
|
|
|
|
2019-05-05 12:57:38 +00:00
|
|
|
// modify the mode by wrapping it in a new struct, uses the consts defined above
|
|
|
|
fs.OverrideLstat["testfile"] = wrapFileInfo(t, fi)
|
2019-05-04 08:34:28 +00:00
|
|
|
|
|
|
|
// set the override values in the 'want' node which
|
|
|
|
want.Mode = 0400
|
|
|
|
// ignore UID and GID on Windows
|
|
|
|
if runtime.GOOS != "windows" {
|
|
|
|
want.UID = 51234
|
|
|
|
want.GID = 51235
|
|
|
|
}
|
|
|
|
// no user and group name
|
|
|
|
want.User = ""
|
|
|
|
want.Group = ""
|
|
|
|
|
|
|
|
// make another snapshot
|
2020-10-05 21:19:22 +00:00
|
|
|
_, node3 := snapshot(t, repo, fs, snapshotID, "testfile")
|
2019-08-13 21:25:00 +00:00
|
|
|
// Override username and group to empty string - in case underlying system has user with UID 51234
|
|
|
|
// See https://github.com/restic/restic/issues/2372
|
|
|
|
node3.User = ""
|
|
|
|
node3.Group = ""
|
2019-05-04 08:34:28 +00:00
|
|
|
|
|
|
|
// make sure that metadata was recorded successfully
|
|
|
|
if !cmp.Equal(want, node3) {
|
|
|
|
t.Fatalf("metadata does not match:\n%v", cmp.Diff(want, node3))
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure the content matches
|
|
|
|
TestEnsureFileContent(context.Background(), t, repo, "testfile", node3, files["testfile"].(TestFile))
|
2019-04-23 20:23:35 +00:00
|
|
|
|
2019-05-04 08:34:28 +00:00
|
|
|
checker.TestCheckRepo(t, repo)
|
2019-04-23 20:23:35 +00:00
|
|
|
}
|
2020-02-07 21:14:50 +00:00
|
|
|
|
|
|
|
func TestRacyFileSwap(t *testing.T) {
|
|
|
|
files := TestDir{
|
|
|
|
"file": TestFile{
|
|
|
|
Content: "foo bar test file",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, files)
|
|
|
|
defer cleanup()
|
|
|
|
|
2020-02-17 12:24:09 +00:00
|
|
|
back := restictest.Chdir(t, tempdir)
|
2020-02-07 21:14:50 +00:00
|
|
|
defer back()
|
|
|
|
|
|
|
|
// get metadata of current folder
|
|
|
|
fi := lstat(t, ".")
|
|
|
|
tempfile := filepath.Join(tempdir, "file")
|
|
|
|
|
|
|
|
statfs := &StatFS{
|
|
|
|
FS: fs.Local{},
|
|
|
|
OverrideLstat: map[string]os.FileInfo{
|
|
|
|
tempfile: fi,
|
|
|
|
},
|
|
|
|
OnlyOverrideStat: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
var tmb tomb.Tomb
|
|
|
|
|
|
|
|
arch := New(repo, fs.Track{FS: statfs}, Options{})
|
|
|
|
arch.Error = func(item string, fi os.FileInfo, err error) error {
|
|
|
|
t.Logf("archiver error as expected for %v: %v", item, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
arch.runWorkers(tmb.Context(ctx), &tmb)
|
|
|
|
|
|
|
|
// fs.Track will panic if the file was not closed
|
|
|
|
_, excluded, err := arch.Save(ctx, "/", tempfile, nil)
|
|
|
|
if err == nil {
|
|
|
|
t.Errorf("Save() should have failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
if excluded {
|
|
|
|
t.Errorf("Save() excluded the node, that's unexpected")
|
|
|
|
}
|
|
|
|
}
|