mirror of
https://github.com/octoleo/restic.git
synced 2024-11-21 20:35:12 +00:00
Remove unused context or testing parameters
This commit is contained in:
parent
5e4e268bdc
commit
1514593f22
@ -501,7 +501,7 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc
|
||||
return packIDs
|
||||
}
|
||||
|
||||
func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobType) {
|
||||
func (f *Finder) findObjectPack(id string, t restic.BlobType) {
|
||||
idx := f.repo.Index()
|
||||
|
||||
rid, err := restic.ParseID(id)
|
||||
@ -524,13 +524,13 @@ func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobTyp
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Finder) findObjectsPacks(ctx context.Context) {
|
||||
func (f *Finder) findObjectsPacks() {
|
||||
for i := range f.blobIDs {
|
||||
f.findObjectPack(ctx, i, restic.DataBlob)
|
||||
f.findObjectPack(i, restic.DataBlob)
|
||||
}
|
||||
|
||||
for i := range f.treeIDs {
|
||||
f.findObjectPack(ctx, i, restic.TreeBlob)
|
||||
f.findObjectPack(i, restic.TreeBlob)
|
||||
}
|
||||
}
|
||||
|
||||
@ -632,7 +632,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
||||
f.out.Finish()
|
||||
|
||||
if opts.ShowPackID && (f.blobIDs != nil || f.treeIDs != nil) {
|
||||
f.findObjectsPacks(ctx)
|
||||
f.findObjectsPacks()
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -180,7 +180,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
|
||||
progress = restoreui.NewProgress(restoreui.NewProgressPrinter(term), calculateProgressInterval(!gopts.Quiet, gopts.JSON))
|
||||
}
|
||||
|
||||
res := restorer.NewRestorer(ctx, repo, sn, opts.Sparse, progress)
|
||||
res := restorer.NewRestorer(repo, sn, opts.Sparse, progress)
|
||||
|
||||
totalErrors := 0
|
||||
res.Error = func(location string, err error) error {
|
||||
|
@ -70,28 +70,28 @@ func TestRestoreFailsWhenUsingInvalidPatterns(t *testing.T) {
|
||||
var err error
|
||||
|
||||
// Test --exclude
|
||||
err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{Exclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
err = testRunRestoreAssumeFailure("latest", RestoreOptions{Exclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
|
||||
rtest.Equals(t, `Fatal: --exclude: invalid pattern(s) provided:
|
||||
*[._]log[.-][0-9]
|
||||
!*[._]log[.-][0-9]`, err.Error())
|
||||
|
||||
// Test --iexclude
|
||||
err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{InsensitiveExclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
err = testRunRestoreAssumeFailure("latest", RestoreOptions{InsensitiveExclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
|
||||
rtest.Equals(t, `Fatal: --iexclude: invalid pattern(s) provided:
|
||||
*[._]log[.-][0-9]
|
||||
!*[._]log[.-][0-9]`, err.Error())
|
||||
|
||||
// Test --include
|
||||
err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{Include: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
err = testRunRestoreAssumeFailure("latest", RestoreOptions{Include: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
|
||||
rtest.Equals(t, `Fatal: --include: invalid pattern(s) provided:
|
||||
*[._]log[.-][0-9]
|
||||
!*[._]log[.-][0-9]`, err.Error())
|
||||
|
||||
// Test --iinclude
|
||||
err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{InsensitiveInclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
err = testRunRestoreAssumeFailure("latest", RestoreOptions{InsensitiveInclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
|
||||
rtest.Equals(t, `Fatal: --iinclude: invalid pattern(s) provided:
|
||||
*[._]log[.-][0-9]
|
||||
|
@ -64,7 +64,7 @@ func testRunMount(t testing.TB, gopts GlobalOptions, dir string, wg *sync.WaitGr
|
||||
rtest.OK(t, runMount(context.TODO(), opts, gopts, []string{dir}))
|
||||
}
|
||||
|
||||
func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
|
||||
func testRunUmount(t testing.TB, dir string) {
|
||||
var err error
|
||||
for i := 0; i < mountWait; i++ {
|
||||
if err = umount(dir); err == nil {
|
||||
@ -95,7 +95,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
|
||||
go testRunMount(t, global, mountpoint, &wg)
|
||||
waitForMount(t, mountpoint)
|
||||
defer wg.Wait()
|
||||
defer testRunUmount(t, global, mountpoint)
|
||||
defer testRunUmount(t, mountpoint)
|
||||
|
||||
if !snapshotsDirExists(t, mountpoint) {
|
||||
t.Fatal(`virtual directory "snapshots" doesn't exist`)
|
||||
|
@ -141,7 +141,7 @@ func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snaps
|
||||
rtest.OK(t, runRestore(context.TODO(), opts, gopts, nil, []string{snapshotID.String()}))
|
||||
}
|
||||
|
||||
func testRunRestoreAssumeFailure(t testing.TB, snapshotID string, opts RestoreOptions, gopts GlobalOptions) error {
|
||||
func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts GlobalOptions) error {
|
||||
err := runRestore(context.TODO(), opts, gopts, nil, []string{snapshotID})
|
||||
|
||||
return err
|
||||
|
@ -2166,7 +2166,7 @@ func TestMetadataChanged(t *testing.T) {
|
||||
}
|
||||
|
||||
// modify the mode by wrapping it in a new struct, uses the consts defined above
|
||||
fs.OverrideLstat["testfile"] = wrapFileInfo(t, fi)
|
||||
fs.OverrideLstat["testfile"] = wrapFileInfo(fi)
|
||||
|
||||
// set the override values in the 'want' node which
|
||||
want.Mode = 0400
|
||||
|
@ -6,7 +6,6 @@ package archiver
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type wrappedFileInfo struct {
|
||||
@ -24,7 +23,7 @@ func (fi wrappedFileInfo) Mode() os.FileMode {
|
||||
}
|
||||
|
||||
// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed.
|
||||
func wrapFileInfo(t testing.TB, fi os.FileInfo) os.FileInfo {
|
||||
func wrapFileInfo(fi os.FileInfo) os.FileInfo {
|
||||
// get the underlying stat_t and modify the values
|
||||
stat := fi.Sys().(*syscall.Stat_t)
|
||||
stat.Mode = mockFileInfoMode
|
||||
|
@ -5,7 +5,6 @@ package archiver
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type wrappedFileInfo struct {
|
||||
@ -18,7 +17,7 @@ func (fi wrappedFileInfo) Mode() os.FileMode {
|
||||
}
|
||||
|
||||
// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed.
|
||||
func wrapFileInfo(t testing.TB, fi os.FileInfo) os.FileInfo {
|
||||
func wrapFileInfo(fi os.FileInfo) os.FileInfo {
|
||||
// wrap the os.FileInfo and return the modified mode, uid and gid are ignored on Windows
|
||||
res := wrappedFileInfo{
|
||||
FileInfo: fi,
|
||||
|
@ -124,7 +124,7 @@ func (s *Suite) TestLoad(t *testing.T) {
|
||||
b := s.open(t)
|
||||
defer s.close(t, b)
|
||||
|
||||
err := testLoad(b, restic.Handle{Type: restic.PackFile, Name: "foobar"}, 0, 0)
|
||||
err := testLoad(b, restic.Handle{Type: restic.PackFile, Name: "foobar"})
|
||||
if err == nil {
|
||||
t.Fatalf("Load() did not return an error for non-existing blob")
|
||||
}
|
||||
@ -672,7 +672,7 @@ func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) res
|
||||
}
|
||||
|
||||
// testLoad loads a blob (but discards its contents).
|
||||
func testLoad(b restic.Backend, h restic.Handle, length int, offset int64) error {
|
||||
func testLoad(b restic.Backend, h restic.Handle) error {
|
||||
return b.Load(context.TODO(), h, 0, 0, func(rd io.Reader) (ierr error) {
|
||||
_, ierr = io.Copy(io.Discard, rd)
|
||||
return ierr
|
||||
@ -773,7 +773,7 @@ func (s *Suite) TestBackend(t *testing.T) {
|
||||
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Stat() error: %v", err)
|
||||
|
||||
// try to read not existing blob
|
||||
err = testLoad(b, h, 0, 0)
|
||||
err = testLoad(b, h)
|
||||
test.Assert(t, err != nil, "blob could be read before creation")
|
||||
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Load() error: %v", err)
|
||||
|
||||
|
6
internal/cache/backend.go
vendored
6
internal/cache/backend.go
vendored
@ -133,7 +133,7 @@ func (b *Backend) cacheFile(ctx context.Context, h restic.Handle) error {
|
||||
}
|
||||
|
||||
// loadFromCache will try to load the file from the cache.
|
||||
func (b *Backend) loadFromCache(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) (bool, error) {
|
||||
func (b *Backend) loadFromCache(h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) (bool, error) {
|
||||
rd, err := b.Cache.load(h, length, offset)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -160,7 +160,7 @@ func (b *Backend) Load(ctx context.Context, h restic.Handle, length int, offset
|
||||
}
|
||||
|
||||
// try loading from cache without checking that the handle is actually cached
|
||||
inCache, err := b.loadFromCache(ctx, h, length, offset, consumer)
|
||||
inCache, err := b.loadFromCache(h, length, offset, consumer)
|
||||
if inCache {
|
||||
if err == nil {
|
||||
return nil
|
||||
@ -183,7 +183,7 @@ func (b *Backend) Load(ctx context.Context, h restic.Handle, length int, offset
|
||||
return err
|
||||
}
|
||||
|
||||
inCache, err = b.loadFromCache(ctx, h, length, offset, consumer)
|
||||
inCache, err = b.loadFromCache(h, length, offset, consumer)
|
||||
if inCache {
|
||||
return err
|
||||
}
|
||||
|
@ -427,7 +427,7 @@ func testRepositoryIncrementalIndex(t *testing.T, version uint) {
|
||||
}
|
||||
|
||||
// buildPackfileWithoutHeader returns a manually built pack file without a header.
|
||||
func buildPackfileWithoutHeader(t testing.TB, blobSizes []int, key *crypto.Key, compress bool) (blobs []restic.Blob, packfile []byte) {
|
||||
func buildPackfileWithoutHeader(blobSizes []int, key *crypto.Key, compress bool) (blobs []restic.Blob, packfile []byte) {
|
||||
opts := []zstd.EOption{
|
||||
// Set the compression level configured.
|
||||
zstd.WithEncoderLevel(zstd.SpeedDefault),
|
||||
@ -524,7 +524,7 @@ func testStreamPack(t *testing.T, version uint) {
|
||||
t.Fatal("test does not suport repository version", version)
|
||||
}
|
||||
|
||||
packfileBlobs, packfile := buildPackfileWithoutHeader(t, blobSizes, &key, compress)
|
||||
packfileBlobs, packfile := buildPackfileWithoutHeader(blobSizes, &key, compress)
|
||||
|
||||
loadCalls := 0
|
||||
load := func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
||||
|
@ -30,7 +30,7 @@ type Restorer struct {
|
||||
var restorerAbortOnAllErrors = func(location string, err error) error { return err }
|
||||
|
||||
// NewRestorer creates a restorer preloaded with the content from the snapshot id.
|
||||
func NewRestorer(ctx context.Context, repo restic.Repository, sn *restic.Snapshot, sparse bool,
|
||||
func NewRestorer(repo restic.Repository, sn *restic.Snapshot, sparse bool,
|
||||
progress *restoreui.Progress) *Restorer {
|
||||
r := &Restorer{
|
||||
repo: repo,
|
||||
|
@ -325,7 +325,7 @@ func TestRestorer(t *testing.T) {
|
||||
sn, id := saveSnapshot(t, repo, test.Snapshot)
|
||||
t.Logf("snapshot saved as %v", id.Str())
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, nil)
|
||||
res := NewRestorer(repo, sn, false, nil)
|
||||
|
||||
tempdir := rtest.TempDir(t)
|
||||
// make sure we're creating a new subdir of the tempdir
|
||||
@ -442,7 +442,7 @@ func TestRestorerRelative(t *testing.T) {
|
||||
sn, id := saveSnapshot(t, repo, test.Snapshot)
|
||||
t.Logf("snapshot saved as %v", id.Str())
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, nil)
|
||||
res := NewRestorer(repo, sn, false, nil)
|
||||
|
||||
tempdir := rtest.TempDir(t)
|
||||
cleanup := rtest.Chdir(t, tempdir)
|
||||
@ -671,7 +671,7 @@ func TestRestorerTraverseTree(t *testing.T) {
|
||||
repo := repository.TestRepository(t)
|
||||
sn, _ := saveSnapshot(t, repo, test.Snapshot)
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, nil)
|
||||
res := NewRestorer(repo, sn, false, nil)
|
||||
|
||||
res.SelectFilter = test.Select
|
||||
|
||||
@ -747,7 +747,7 @@ func TestRestorerConsistentTimestampsAndPermissions(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, nil)
|
||||
res := NewRestorer(repo, sn, false, nil)
|
||||
|
||||
res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
|
||||
switch filepath.ToSlash(item) {
|
||||
@ -802,7 +802,7 @@ func TestVerifyCancel(t *testing.T) {
|
||||
repo := repository.TestRepository(t)
|
||||
sn, _ := saveSnapshot(t, repo, snapshot)
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, nil)
|
||||
res := NewRestorer(repo, sn, false, nil)
|
||||
|
||||
tempdir := rtest.TempDir(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -844,7 +844,7 @@ func TestRestorerSparseFiles(t *testing.T) {
|
||||
archiver.SnapshotOptions{})
|
||||
rtest.OK(t, err)
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, true, nil)
|
||||
res := NewRestorer(repo, sn, true, nil)
|
||||
|
||||
tempdir := rtest.TempDir(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -31,7 +31,7 @@ func TestRestorerRestoreEmptyHardlinkedFileds(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, nil)
|
||||
res := NewRestorer(repo, sn, false, nil)
|
||||
|
||||
res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
|
||||
return true, true
|
||||
@ -99,7 +99,7 @@ func TestRestorerProgressBar(t *testing.T) {
|
||||
|
||||
mock := &printerMock{}
|
||||
progress := restoreui.NewProgress(mock, 0)
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, progress)
|
||||
res := NewRestorer(repo, sn, false, progress)
|
||||
res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
|
||||
return true, true
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user