2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-30 00:33:57 +00:00

tests: Remove more flags

This commit is contained in:
Alexander Neumann 2015-06-28 13:15:35 +02:00
parent 189a33730a
commit 26e4d2e019
8 changed files with 52 additions and 87 deletions

View File

@ -3,7 +3,6 @@ package restic_test
import ( import (
"bytes" "bytes"
"crypto/sha256" "crypto/sha256"
"flag"
"io" "io"
"testing" "testing"
@ -15,7 +14,6 @@ import (
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
) )
var benchArchiveDirectory = flag.String("test.benchdir", ".", "benchmark archiving a real directory (default: .)")
var testPol = chunker.Pol(0x3DA3358B4DC173) var testPol = chunker.Pol(0x3DA3358B4DC173)
type Rdr interface { type Rdr interface {
@ -106,14 +104,14 @@ func archiveDirectory(b testing.TB) {
arch := restic.NewArchiver(repo) arch := restic.NewArchiver(repo)
_, id, err := arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil) _, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
OK(b, err) OK(b, err)
b.Logf("snapshot archived as %v", id) b.Logf("snapshot archived as %v", id)
} }
func TestArchiveDirectory(t *testing.T) { func TestArchiveDirectory(t *testing.T) {
if *benchArchiveDirectory == "" { if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiveDirectory") t.Skip("benchdir not set, skipping TestArchiveDirectory")
} }
@ -121,7 +119,7 @@ func TestArchiveDirectory(t *testing.T) {
} }
func BenchmarkArchiveDirectory(b *testing.B) { func BenchmarkArchiveDirectory(b *testing.B) {
if *benchArchiveDirectory == "" { if BenchArchiveDirectory == "" {
b.Skip("benchdir not set, skipping BenchmarkArchiveDirectory") b.Skip("benchdir not set, skipping BenchmarkArchiveDirectory")
} }
@ -134,7 +132,7 @@ func archiveWithDedup(t testing.TB) {
repo := SetupRepo() repo := SetupRepo()
defer TeardownRepo(repo) defer TeardownRepo(repo)
if *benchArchiveDirectory == "" { if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiverDedup") t.Skip("benchdir not set, skipping TestArchiverDedup")
} }
@ -145,7 +143,7 @@ func archiveWithDedup(t testing.TB) {
} }
// archive a few files // archive a few files
sn := SnapshotDir(t, repo, *benchArchiveDirectory, nil) sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil)
t.Logf("archived snapshot %v", sn.ID().Str()) t.Logf("archived snapshot %v", sn.ID().Str())
// get archive stats // get archive stats
@ -156,7 +154,7 @@ func archiveWithDedup(t testing.TB) {
cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs) cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs)
// archive the same files again, without parent snapshot // archive the same files again, without parent snapshot
sn2 := SnapshotDir(t, repo, *benchArchiveDirectory, nil) sn2 := SnapshotDir(t, repo, BenchArchiveDirectory, nil)
t.Logf("archived snapshot %v", sn2.ID().Str()) t.Logf("archived snapshot %v", sn2.ID().Str())
// get archive stats again // get archive stats again
@ -173,7 +171,7 @@ func archiveWithDedup(t testing.TB) {
} }
// archive the same files again, with a parent snapshot // archive the same files again, with a parent snapshot
sn3 := SnapshotDir(t, repo, *benchArchiveDirectory, sn2.ID()) sn3 := SnapshotDir(t, repo, BenchArchiveDirectory, sn2.ID())
t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str()) t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())
// get archive stats again // get archive stats again
@ -198,13 +196,13 @@ func BenchmarkLoadTree(t *testing.B) {
repo := SetupRepo() repo := SetupRepo()
defer TeardownRepo(repo) defer TeardownRepo(repo)
if *benchArchiveDirectory == "" { if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiverDedup") t.Skip("benchdir not set, skipping TestArchiverDedup")
} }
// archive a few files // archive a few files
arch := restic.NewArchiver(repo) arch := restic.NewArchiver(repo)
sn, _, err := arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil) sn, _, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
OK(t, err) OK(t, err)
t.Logf("archived snapshot %v", sn.ID()) t.Logf("archived snapshot %v", sn.ID())

View File

@ -17,7 +17,7 @@ func TestCache(t *testing.T) {
arch := restic.NewArchiver(repo) arch := restic.NewArchiver(repo)
// archive some files, this should automatically cache all blobs from the snapshot // archive some files, this should automatically cache all blobs from the snapshot
_, _, err = arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil) _, _, err = arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
// TODO: test caching index // TODO: test caching index
} }

View File

@ -5,12 +5,10 @@ import (
"crypto/md5" "crypto/md5"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"flag"
"hash" "hash"
"io" "io"
"io/ioutil" "io/ioutil"
"math/rand" "math/rand"
"os"
"testing" "testing"
"time" "time"
@ -18,8 +16,6 @@ import (
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
) )
var benchmarkFile = flag.String("bench.file", "", "read from this file for benchmark")
func parseDigest(s string) []byte { func parseDigest(s string) []byte {
d, err := hex.DecodeString(s) d, err := hex.DecodeString(s)
if err != nil { if err != nil {
@ -247,29 +243,8 @@ func TestChunkerWithoutHash(t *testing.T) {
} }
func benchmarkChunker(b *testing.B, hash hash.Hash) { func benchmarkChunker(b *testing.B, hash hash.Hash) {
var ( size := 10 * 1024 * 1024
rd io.ReadSeeker rd := bytes.NewReader(getRandom(23, size))
size int
)
if *benchmarkFile != "" {
b.Logf("using file %q for benchmark", *benchmarkFile)
f, err := os.Open(*benchmarkFile)
if err != nil {
b.Fatalf("open(%q): %v", *benchmarkFile, err)
}
fi, err := f.Stat()
if err != nil {
b.Fatalf("lstat(%q): %v", *benchmarkFile, err)
}
size = int(fi.Size())
rd = f
} else {
size = 10 * 1024 * 1024
rd = bytes.NewReader(getRandom(23, size))
}
b.ResetTimer() b.ResetTimer()
b.SetBytes(int64(size)) b.SetBytes(int64(size))

View File

@ -2,7 +2,6 @@ package crypto_test
import ( import (
"bytes" "bytes"
"flag"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -13,13 +12,13 @@ import (
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
) )
var testLargeCrypto = flag.Bool("test.largecrypto", false, "also test crypto functions with large payloads") const testLargeCrypto = false
func TestEncryptDecrypt(t *testing.T) { func TestEncryptDecrypt(t *testing.T) {
k := crypto.NewRandomKey() k := crypto.NewRandomKey()
tests := []int{5, 23, 2<<18 + 23, 1 << 20} tests := []int{5, 23, 2<<18 + 23, 1 << 20}
if *testLargeCrypto { if testLargeCrypto {
tests = append(tests, 7<<20+123) tests = append(tests, 7<<20+123)
} }
@ -117,7 +116,7 @@ func TestCornerCases(t *testing.T) {
} }
func TestLargeEncrypt(t *testing.T) { func TestLargeEncrypt(t *testing.T) {
if !*testLargeCrypto { if !testLargeCrypto {
t.SkipNow() t.SkipNow()
} }
@ -252,7 +251,7 @@ func TestEncryptStreamWriter(t *testing.T) {
k := crypto.NewRandomKey() k := crypto.NewRandomKey()
tests := []int{5, 23, 2<<18 + 23, 1 << 20} tests := []int{5, 23, 2<<18 + 23, 1 << 20}
if *testLargeCrypto { if testLargeCrypto {
tests = append(tests, 7<<20+123) tests = append(tests, 7<<20+123)
} }
@ -286,7 +285,7 @@ func TestDecryptStreamReader(t *testing.T) {
k := crypto.NewRandomKey() k := crypto.NewRandomKey()
tests := []int{5, 23, 2<<18 + 23, 1 << 20} tests := []int{5, 23, 2<<18 + 23, 1 << 20}
if *testLargeCrypto { if testLargeCrypto {
tests = append(tests, 7<<20+123) tests = append(tests, 7<<20+123)
} }
@ -320,7 +319,7 @@ func TestEncryptWriter(t *testing.T) {
k := crypto.NewRandomKey() k := crypto.NewRandomKey()
tests := []int{5, 23, 2<<18 + 23, 1 << 20} tests := []int{5, 23, 2<<18 + 23, 1 << 20}
if *testLargeCrypto { if testLargeCrypto {
tests = append(tests, 7<<20+123) tests = append(tests, 7<<20+123)
} }

View File

@ -1,7 +1,6 @@
package pipe_test package pipe_test
import ( import (
"flag"
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
@ -12,9 +11,6 @@ import (
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
) )
var testWalkerPath = flag.String("test.walkerpath", ".", "pipeline walker testpath (default: .)")
var maxWorkers = flag.Int("test.workers", 100, "max concurrency (default: 100)")
func isFile(fi os.FileInfo) bool { func isFile(fi os.FileInfo) bool {
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
} }
@ -27,7 +23,7 @@ func statPath(path string) (stats, error) {
var s stats var s stats
// count files and directories with filepath.Walk() // count files and directories with filepath.Walk()
err := filepath.Walk(*testWalkerPath, func(p string, fi os.FileInfo, err error) error { err := filepath.Walk(TestWalkerPath, func(p string, fi os.FileInfo, err error) error {
if fi == nil { if fi == nil {
return err return err
} }
@ -44,15 +40,17 @@ func statPath(path string) (stats, error) {
return s, err return s, err
} }
const maxWorkers = 100
func TestPipelineWalkerWithSplit(t *testing.T) { func TestPipelineWalkerWithSplit(t *testing.T) {
if *testWalkerPath == "" { if TestWalkerPath == "" {
t.Skipf("walkerpath not set, skipping TestPipelineWalker") t.Skipf("walkerpath not set, skipping TestPipelineWalker")
} }
before, err := statPath(*testWalkerPath) before, err := statPath(TestWalkerPath)
OK(t, err) OK(t, err)
t.Logf("walking path %s with %d dirs, %d files", *testWalkerPath, t.Logf("walking path %s with %d dirs, %d files", TestWalkerPath,
before.dirs, before.files) before.dirs, before.files)
// account for top level dir // account for top level dir
@ -105,7 +103,7 @@ func TestPipelineWalkerWithSplit(t *testing.T) {
entCh := make(chan pipe.Entry) entCh := make(chan pipe.Entry)
dirCh := make(chan pipe.Dir) dirCh := make(chan pipe.Dir)
for i := 0; i < *maxWorkers; i++ { for i := 0; i < maxWorkers; i++ {
wg.Add(1) wg.Add(1)
go worker(&wg, done, entCh, dirCh) go worker(&wg, done, entCh, dirCh)
} }
@ -120,7 +118,7 @@ func TestPipelineWalkerWithSplit(t *testing.T) {
}() }()
resCh := make(chan pipe.Result, 1) resCh := make(chan pipe.Result, 1)
err = pipe.Walk([]string{*testWalkerPath}, done, jobs, resCh) err = pipe.Walk([]string{TestWalkerPath}, done, jobs, resCh)
OK(t, err) OK(t, err)
// wait for all workers to terminate // wait for all workers to terminate
@ -129,21 +127,21 @@ func TestPipelineWalkerWithSplit(t *testing.T) {
// wait for top-level blob // wait for top-level blob
<-resCh <-resCh
t.Logf("walked path %s with %d dirs, %d files", *testWalkerPath, t.Logf("walked path %s with %d dirs, %d files", TestWalkerPath,
after.dirs, after.files) after.dirs, after.files)
Assert(t, before == after, "stats do not match, expected %v, got %v", before, after) Assert(t, before == after, "stats do not match, expected %v, got %v", before, after)
} }
func TestPipelineWalker(t *testing.T) { func TestPipelineWalker(t *testing.T) {
if *testWalkerPath == "" { if TestWalkerPath == "" {
t.Skipf("walkerpath not set, skipping TestPipelineWalker") t.Skipf("walkerpath not set, skipping TestPipelineWalker")
} }
before, err := statPath(*testWalkerPath) before, err := statPath(TestWalkerPath)
OK(t, err) OK(t, err)
t.Logf("walking path %s with %d dirs, %d files", *testWalkerPath, t.Logf("walking path %s with %d dirs, %d files", TestWalkerPath,
before.dirs, before.files) before.dirs, before.files)
// account for top level dir // account for top level dir
@ -194,13 +192,13 @@ func TestPipelineWalker(t *testing.T) {
done := make(chan struct{}) done := make(chan struct{})
jobs := make(chan pipe.Job) jobs := make(chan pipe.Job)
for i := 0; i < *maxWorkers; i++ { for i := 0; i < maxWorkers; i++ {
wg.Add(1) wg.Add(1)
go worker(&wg, done, jobs) go worker(&wg, done, jobs)
} }
resCh := make(chan pipe.Result, 1) resCh := make(chan pipe.Result, 1)
err = pipe.Walk([]string{*testWalkerPath}, done, jobs, resCh) err = pipe.Walk([]string{TestWalkerPath}, done, jobs, resCh)
OK(t, err) OK(t, err)
// wait for all workers to terminate // wait for all workers to terminate
@ -209,14 +207,14 @@ func TestPipelineWalker(t *testing.T) {
// wait for top-level blob // wait for top-level blob
<-resCh <-resCh
t.Logf("walked path %s with %d dirs, %d files", *testWalkerPath, t.Logf("walked path %s with %d dirs, %d files", TestWalkerPath,
after.dirs, after.files) after.dirs, after.files)
Assert(t, before == after, "stats do not match, expected %v, got %v", before, after) Assert(t, before == after, "stats do not match, expected %v, got %v", before, after)
} }
func BenchmarkPipelineWalker(b *testing.B) { func BenchmarkPipelineWalker(b *testing.B) {
if *testWalkerPath == "" { if TestWalkerPath == "" {
b.Skipf("walkerpath not set, skipping BenchPipelineWalker") b.Skipf("walkerpath not set, skipping BenchPipelineWalker")
} }
@ -283,8 +281,8 @@ func BenchmarkPipelineWalker(b *testing.B) {
dirCh := make(chan pipe.Dir, 200) dirCh := make(chan pipe.Dir, 200)
var wg sync.WaitGroup var wg sync.WaitGroup
b.Logf("starting %d workers", *maxWorkers) b.Logf("starting %d workers", maxWorkers)
for i := 0; i < *maxWorkers; i++ { for i := 0; i < maxWorkers; i++ {
wg.Add(2) wg.Add(2)
go dirWorker(&wg, done, dirCh) go dirWorker(&wg, done, dirCh)
go fileWorker(&wg, done, entCh) go fileWorker(&wg, done, entCh)
@ -300,7 +298,7 @@ func BenchmarkPipelineWalker(b *testing.B) {
}() }()
resCh := make(chan pipe.Result, 1) resCh := make(chan pipe.Result, 1)
err := pipe.Walk([]string{*testWalkerPath}, done, jobs, resCh) err := pipe.Walk([]string{TestWalkerPath}, done, jobs, resCh)
OK(b, err) OK(b, err)
// wait for all workers to terminate // wait for all workers to terminate
@ -314,13 +312,13 @@ func BenchmarkPipelineWalker(b *testing.B) {
} }
func TestPipelineWalkerMultiple(t *testing.T) { func TestPipelineWalkerMultiple(t *testing.T) {
if *testWalkerPath == "" { if TestWalkerPath == "" {
t.Skipf("walkerpath not set, skipping TestPipelineWalker") t.Skipf("walkerpath not set, skipping TestPipelineWalker")
} }
paths, err := filepath.Glob(filepath.Join(*testWalkerPath, "*")) paths, err := filepath.Glob(filepath.Join(TestWalkerPath, "*"))
before, err := statPath(*testWalkerPath) before, err := statPath(TestWalkerPath)
OK(t, err) OK(t, err)
t.Logf("walking paths %v with %d dirs, %d files", paths, t.Logf("walking paths %v with %d dirs, %d files", paths,
@ -371,7 +369,7 @@ func TestPipelineWalkerMultiple(t *testing.T) {
done := make(chan struct{}) done := make(chan struct{})
jobs := make(chan pipe.Job) jobs := make(chan pipe.Job)
for i := 0; i < *maxWorkers; i++ { for i := 0; i < maxWorkers; i++ {
wg.Add(1) wg.Add(1)
go worker(&wg, done, jobs) go worker(&wg, done, jobs)
} }

View File

@ -5,7 +5,6 @@ import (
"crypto/rand" "crypto/rand"
"crypto/sha256" "crypto/sha256"
"encoding/json" "encoding/json"
"flag"
"io" "io"
"testing" "testing"
@ -15,8 +14,6 @@ import (
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
) )
var benchTestDir = flag.String("test.dir", ".", "dir used in benchmarks (default: .)")
type testJSONStruct struct { type testJSONStruct struct {
Foo uint32 Foo uint32
Bar string Bar string
@ -159,12 +156,12 @@ func TestLoadJSONPack(t *testing.T) {
repo := SetupRepo() repo := SetupRepo()
defer TeardownRepo(repo) defer TeardownRepo(repo)
if *benchTestDir == "" { if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping") t.Skip("benchdir not set, skipping")
} }
// archive a few files // archive a few files
sn := SnapshotDir(t, repo, *benchTestDir, nil) sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil)
OK(t, repo.Flush()) OK(t, repo.Flush())
tree := restic.NewTree() tree := restic.NewTree()
@ -176,7 +173,7 @@ func TestLoadJSONUnpacked(t *testing.T) {
repo := SetupRepo() repo := SetupRepo()
defer TeardownRepo(repo) defer TeardownRepo(repo)
if *benchTestDir == "" { if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping") t.Skip("benchdir not set, skipping")
} }

View File

@ -18,8 +18,9 @@ var (
TestCleanup = getBoolVar("RESTIC_TEST_CLEANUP", true) TestCleanup = getBoolVar("RESTIC_TEST_CLEANUP", true)
TestTempDir = getStringVar("RESTIC_TEST_TMPDIR", "") TestTempDir = getStringVar("RESTIC_TEST_TMPDIR", "")
RunIntegrationTest = getBoolVar("RESTIC_TEST_INTEGRATION", true) RunIntegrationTest = getBoolVar("RESTIC_TEST_INTEGRATION", true)
TestSFTPPath = getStringVar("RESTIC_TEST_SFTPPATH", TestSFTPPath = getStringVar("RESTIC_TEST_SFTPPATH", "/usr/lib/ssh:/usr/lib/openssh")
"/usr/lib/ssh:/usr/lib/openssh") TestWalkerPath = getStringVar("RESTIC_TEST_PATH", ".")
BenchArchiveDirectory = getStringVar("RESTIC_BENCH_DIR", ".")
) )
func getStringVar(name, defaultValue string) string { func getStringVar(name, defaultValue string) string {

View File

@ -1,7 +1,6 @@
package restic_test package restic_test
import ( import (
"flag"
"path/filepath" "path/filepath"
"testing" "testing"
@ -10,13 +9,11 @@ import (
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
) )
var testWalkDirectory = flag.String("test.walkdir", ".", "test walking a directory (globbing pattern, default: .)")
func TestWalkTree(t *testing.T) { func TestWalkTree(t *testing.T) {
repo := SetupRepo() repo := SetupRepo()
defer TeardownRepo(repo) defer TeardownRepo(repo)
dirs, err := filepath.Glob(*testWalkDirectory) dirs, err := filepath.Glob(TestWalkerPath)
OK(t, err) OK(t, err)
// archive a few files // archive a few files