mirror of
https://github.com/octoleo/restic.git
synced 2024-11-13 16:56:27 +00:00
c206a101a3
There is no real difference between the FutureTree and FutureFile structs. However, differentiating both increases the size of the FutureNode struct. The FutureNode struct is now only 16 bytes large on 64bit platforms. That way is has a very low overhead if the corresponding file/directory was not processed yet. There is a special case for nodes that were reused from the parent snapshot, as a go channel seems to have 96 bytes overhead which would result in a memory usage regression.
98 lines
2.0 KiB
Go
98 lines
2.0 KiB
Go
package archiver
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"io/ioutil"
|
|
"path/filepath"
|
|
"runtime"
|
|
"testing"
|
|
|
|
"github.com/restic/chunker"
|
|
"github.com/restic/restic/internal/fs"
|
|
"github.com/restic/restic/internal/restic"
|
|
"github.com/restic/restic/internal/test"
|
|
"golang.org/x/sync/errgroup"
|
|
)
|
|
|
|
func createTestFiles(t testing.TB, num int) (files []string, cleanup func()) {
|
|
tempdir, cleanup := test.TempDir(t)
|
|
|
|
for i := 0; i < 15; i++ {
|
|
filename := fmt.Sprintf("testfile-%d", i)
|
|
err := ioutil.WriteFile(filepath.Join(tempdir, filename), []byte(filename), 0600)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
files = append(files, filepath.Join(tempdir, filename))
|
|
}
|
|
|
|
return files, cleanup
|
|
}
|
|
|
|
func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, context.Context, *errgroup.Group) {
|
|
wg, ctx := errgroup.WithContext(ctx)
|
|
|
|
saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *Buffer) FutureBlob {
|
|
ch := make(chan saveBlobResponse)
|
|
close(ch)
|
|
return FutureBlob{ch: ch}
|
|
}
|
|
|
|
workers := uint(runtime.NumCPU())
|
|
pol, err := chunker.RandomPolynomial()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
s := NewFileSaver(ctx, wg, saveBlob, pol, workers, workers)
|
|
s.NodeFromFileInfo = restic.NodeFromFileInfo
|
|
|
|
return s, ctx, wg
|
|
}
|
|
|
|
func TestFileSaver(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
files, cleanup := createTestFiles(t, 15)
|
|
defer cleanup()
|
|
|
|
startFn := func() {}
|
|
completeFn := func(*restic.Node, ItemStats) {}
|
|
|
|
testFs := fs.Local{}
|
|
s, ctx, wg := startFileSaver(ctx, t)
|
|
|
|
var results []FutureNode
|
|
|
|
for _, filename := range files {
|
|
f, err := testFs.Open(filename)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
fi, err := f.Stat()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
ff := s.Save(ctx, filename, filename, f, fi, startFn, completeFn)
|
|
results = append(results, ff)
|
|
}
|
|
|
|
for _, file := range results {
|
|
fnr := file.take(ctx)
|
|
if fnr.err != nil {
|
|
t.Errorf("unable to save file: %v", fnr.err)
|
|
}
|
|
}
|
|
|
|
s.TriggerShutdown()
|
|
|
|
err := wg.Wait()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|