mirror of
https://github.com/octoleo/restic.git
synced 2024-11-22 21:05:10 +00:00
Merge pull request #4333 from MichaelEischer/staticcheck
Update golangci-lint and resolve all errors
This commit is contained in:
commit
998cf5a7f8
6
.github/workflows/tests.yml
vendored
6
.github/workflows/tests.yml
vendored
@ -258,10 +258,8 @@ jobs:
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.51
|
||||
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
||||
only-new-issues: true
|
||||
args: --verbose --timeout 10m
|
||||
version: v1.52.2
|
||||
args: --verbose --timeout 5m
|
||||
|
||||
# only run golangci-lint for pull requests, otherwise ALL hints get
|
||||
# reported. We need to slowly address all issues until we can enable
|
||||
|
@ -10,13 +10,10 @@ linters:
|
||||
# make sure all errors returned by functions are handled
|
||||
- errcheck
|
||||
|
||||
# find unused code
|
||||
- deadcode
|
||||
|
||||
# show how code can be simplified
|
||||
- gosimple
|
||||
|
||||
# # make sure code is formatted
|
||||
# make sure code is formatted
|
||||
- gofmt
|
||||
|
||||
# examine code and report suspicious constructs, such as Printf calls whose
|
||||
@ -35,12 +32,6 @@ linters:
|
||||
# find unused variables, functions, structs, types, etc.
|
||||
- unused
|
||||
|
||||
# find unused struct fields
|
||||
- structcheck
|
||||
|
||||
# find unused global variables
|
||||
- varcheck
|
||||
|
||||
# parse and typecheck code
|
||||
- typecheck
|
||||
|
||||
@ -57,3 +48,6 @@ issues:
|
||||
- don't use ALL_CAPS in Go names; use CamelCase
|
||||
# revive: lots of packages don't have such a comment
|
||||
- "package-comments: should have a package comment"
|
||||
# staticcheck: there's no easy way to replace these packages
|
||||
- "SA1019: \"golang.org/x/crypto/poly1305\" is deprecated"
|
||||
- "SA1019: \"golang.org/x/crypto/openpgp\" is deprecated"
|
||||
|
3
changelog/unreleased/pull-4333
Normal file
3
changelog/unreleased/pull-4333
Normal file
@ -0,0 +1,3 @@
|
||||
Bugfix: `generate` and `init` no longer silently ignore unexpected arguments
|
||||
|
||||
https://github.com/restic/restic/pull/4333
|
@ -308,7 +308,7 @@ func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error {
|
||||
|
||||
// collectRejectByNameFuncs returns a list of all functions which may reject data
|
||||
// from being saved in a snapshot based on path only
|
||||
func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectByNameFunc, err error) {
|
||||
func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (fs []RejectByNameFunc, err error) {
|
||||
// exclude restic cache
|
||||
if repo.Cache != nil {
|
||||
f, err := rejectResticCache(repo)
|
||||
@ -343,7 +343,7 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, t
|
||||
|
||||
// collectRejectFuncs returns a list of all functions which may reject data
|
||||
// from being saved in a snapshot based on path and file info
|
||||
func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectFunc, err error) {
|
||||
func collectRejectFuncs(opts BackupOptions, targets []string) (fs []RejectFunc, err error) {
|
||||
// allowed devices
|
||||
if opts.ExcludeOtherFS && !opts.Stdin {
|
||||
f, err := rejectByDevice(targets)
|
||||
@ -513,13 +513,13 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
||||
}
|
||||
|
||||
// rejectByNameFuncs collect functions that can reject items from the backup based on path only
|
||||
rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo, targets)
|
||||
rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// rejectFuncs collect functions that can reject items from the backup based on path and file info
|
||||
rejectFuncs, err := collectRejectFuncs(opts, repo, targets)
|
||||
rejectFuncs, err := collectRejectFuncs(opts, targets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -501,7 +501,7 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc
|
||||
return packIDs
|
||||
}
|
||||
|
||||
func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobType) {
|
||||
func (f *Finder) findObjectPack(id string, t restic.BlobType) {
|
||||
idx := f.repo.Index()
|
||||
|
||||
rid, err := restic.ParseID(id)
|
||||
@ -524,13 +524,13 @@ func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobTyp
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Finder) findObjectsPacks(ctx context.Context) {
|
||||
func (f *Finder) findObjectsPacks() {
|
||||
for i := range f.blobIDs {
|
||||
f.findObjectPack(ctx, i, restic.DataBlob)
|
||||
f.findObjectPack(i, restic.DataBlob)
|
||||
}
|
||||
|
||||
for i := range f.treeIDs {
|
||||
f.findObjectPack(ctx, i, restic.TreeBlob)
|
||||
f.findObjectPack(i, restic.TreeBlob)
|
||||
}
|
||||
}
|
||||
|
||||
@ -632,7 +632,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
||||
f.out.Finish()
|
||||
|
||||
if opts.ShowPackID && (f.blobIDs != nil || f.treeIDs != nil) {
|
||||
f.findObjectsPacks(ctx)
|
||||
f.findObjectsPacks()
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -90,7 +90,11 @@ func writePowerShellCompletion(file string) error {
|
||||
return cmdRoot.GenPowerShellCompletionFile(file)
|
||||
}
|
||||
|
||||
func runGenerate(cmd *cobra.Command, args []string) error {
|
||||
func runGenerate(_ *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
return errors.Fatal("the generate command expects no arguments, only options - please see `restic help generate` for usage and flags")
|
||||
}
|
||||
|
||||
if genOpts.ManDir != "" {
|
||||
err := writeManpages(genOpts.ManDir)
|
||||
if err != nil {
|
||||
|
@ -50,6 +50,10 @@ func init() {
|
||||
}
|
||||
|
||||
func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []string) error {
|
||||
if len(args) > 0 {
|
||||
return errors.Fatal("the init command expects no arguments, only options - please see `restic help init` for usage and flags")
|
||||
}
|
||||
|
||||
var version uint
|
||||
if opts.RepositoryVersion == "latest" || opts.RepositoryVersion == "" {
|
||||
version = restic.MaxRepoVersion
|
||||
|
@ -67,10 +67,10 @@ func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalO
|
||||
return err
|
||||
}
|
||||
|
||||
return rebuildIndex(ctx, opts, gopts, repo, restic.NewIDSet())
|
||||
return rebuildIndex(ctx, opts, gopts, repo)
|
||||
}
|
||||
|
||||
func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, repo *repository.Repository, ignorePacks restic.IDSet) error {
|
||||
func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, repo *repository.Repository) error {
|
||||
var obsoleteIndexes restic.IDs
|
||||
packSizeFromList := make(map[restic.ID]int64)
|
||||
packSizeFromIndex := make(map[restic.ID]int64)
|
||||
|
@ -180,7 +180,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
|
||||
progress = restoreui.NewProgress(restoreui.NewProgressPrinter(term), calculateProgressInterval(!gopts.Quiet, gopts.JSON))
|
||||
}
|
||||
|
||||
res := restorer.NewRestorer(ctx, repo, sn, opts.Sparse, progress)
|
||||
res := restorer.NewRestorer(repo, sn, opts.Sparse, progress)
|
||||
|
||||
totalErrors := 0
|
||||
res.Error = func(location string, err error) error {
|
||||
|
@ -49,7 +49,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runStats(cmd.Context(), globalOptions, args)
|
||||
return runStats(cmd.Context(), statsOptions, globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
@ -70,8 +70,8 @@ func init() {
|
||||
initMultiSnapshotFilter(f, &statsOptions.SnapshotFilter, true)
|
||||
}
|
||||
|
||||
func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
|
||||
err := verifyStatsInput(gopts, args)
|
||||
func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args []string) error {
|
||||
err := verifyStatsInput(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -111,8 +111,8 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
|
||||
SnapshotsCount: 0,
|
||||
}
|
||||
|
||||
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &statsOptions.SnapshotFilter, args) {
|
||||
err = statsWalkSnapshot(ctx, sn, repo, stats)
|
||||
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) {
|
||||
err = statsWalkSnapshot(ctx, sn, repo, opts, stats)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error walking snapshot: %v", err)
|
||||
}
|
||||
@ -122,7 +122,7 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if statsOptions.countMode == countModeRawData {
|
||||
if opts.countMode == countModeRawData {
|
||||
// the blob handles have been collected, but not yet counted
|
||||
for blobHandle := range stats.blobs {
|
||||
pbs := repo.Index().Lookup(blobHandle)
|
||||
@ -156,7 +156,7 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
Printf("Stats in %s mode:\n", statsOptions.countMode)
|
||||
Printf("Stats in %s mode:\n", opts.countMode)
|
||||
Printf(" Snapshots processed: %d\n", stats.SnapshotsCount)
|
||||
if stats.TotalBlobCount > 0 {
|
||||
Printf(" Total Blob Count: %d\n", stats.TotalBlobCount)
|
||||
@ -181,21 +181,21 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Repository, stats *statsContainer) error {
|
||||
func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Repository, opts StatsOptions, stats *statsContainer) error {
|
||||
if snapshot.Tree == nil {
|
||||
return fmt.Errorf("snapshot %s has nil tree", snapshot.ID().Str())
|
||||
}
|
||||
|
||||
stats.SnapshotsCount++
|
||||
|
||||
if statsOptions.countMode == countModeRawData {
|
||||
if opts.countMode == countModeRawData {
|
||||
// count just the sizes of unique blobs; we don't need to walk the tree
|
||||
// ourselves in this case, since a nifty function does it for us
|
||||
return restic.FindUsedBlobs(ctx, repo, restic.IDs{*snapshot.Tree}, stats.blobs, nil)
|
||||
}
|
||||
|
||||
uniqueInodes := make(map[uint64]struct{})
|
||||
err := walker.Walk(ctx, repo, *snapshot.Tree, restic.NewIDSet(), statsWalkTree(repo, stats, uniqueInodes))
|
||||
err := walker.Walk(ctx, repo, *snapshot.Tree, restic.NewIDSet(), statsWalkTree(repo, opts, stats, uniqueInodes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("walking tree %s: %v", *snapshot.Tree, err)
|
||||
}
|
||||
@ -203,7 +203,7 @@ func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo rest
|
||||
return nil
|
||||
}
|
||||
|
||||
func statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes map[uint64]struct{}) walker.WalkFunc {
|
||||
func statsWalkTree(repo restic.Repository, opts StatsOptions, stats *statsContainer, uniqueInodes map[uint64]struct{}) walker.WalkFunc {
|
||||
return func(parentTreeID restic.ID, npath string, node *restic.Node, nodeErr error) (bool, error) {
|
||||
if nodeErr != nil {
|
||||
return true, nodeErr
|
||||
@ -212,19 +212,19 @@ func statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes m
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if statsOptions.countMode == countModeUniqueFilesByContents || statsOptions.countMode == countModeBlobsPerFile {
|
||||
if opts.countMode == countModeUniqueFilesByContents || opts.countMode == countModeBlobsPerFile {
|
||||
// only count this file if we haven't visited it before
|
||||
fid := makeFileIDByContents(node)
|
||||
if _, ok := stats.uniqueFiles[fid]; !ok {
|
||||
// mark the file as visited
|
||||
stats.uniqueFiles[fid] = struct{}{}
|
||||
|
||||
if statsOptions.countMode == countModeUniqueFilesByContents {
|
||||
if opts.countMode == countModeUniqueFilesByContents {
|
||||
// simply count the size of each unique file (unique by contents only)
|
||||
stats.TotalSize += node.Size
|
||||
stats.TotalFileCount++
|
||||
}
|
||||
if statsOptions.countMode == countModeBlobsPerFile {
|
||||
if opts.countMode == countModeBlobsPerFile {
|
||||
// count the size of each unique blob reference, which is
|
||||
// by unique file (unique by contents and file path)
|
||||
for _, blobID := range node.Content {
|
||||
@ -254,7 +254,7 @@ func statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes m
|
||||
}
|
||||
}
|
||||
|
||||
if statsOptions.countMode == countModeRestoreSize {
|
||||
if opts.countMode == countModeRestoreSize {
|
||||
// as this is a file in the snapshot, we can simply count its
|
||||
// size without worrying about uniqueness, since duplicate files
|
||||
// will still be restored
|
||||
@ -284,15 +284,15 @@ func makeFileIDByContents(node *restic.Node) fileID {
|
||||
return sha256.Sum256(bb)
|
||||
}
|
||||
|
||||
func verifyStatsInput(gopts GlobalOptions, args []string) error {
|
||||
func verifyStatsInput(opts StatsOptions) error {
|
||||
// require a recognized counting mode
|
||||
switch statsOptions.countMode {
|
||||
switch opts.countMode {
|
||||
case countModeRestoreSize:
|
||||
case countModeUniqueFilesByContents:
|
||||
case countModeBlobsPerFile:
|
||||
case countModeRawData:
|
||||
default:
|
||||
return fmt.Errorf("unknown counting mode: %s (use the -h flag to get a list of supported modes)", statsOptions.countMode)
|
||||
return fmt.Errorf("unknown counting mode: %s (use the -h flag to get a list of supported modes)", opts.countMode)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
@ -31,7 +30,7 @@ func Test_PrintFunctionsRespectsGlobalStdout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReadRepo(t *testing.T) {
|
||||
tempDir := test.TempDir(t)
|
||||
tempDir := rtest.TempDir(t)
|
||||
|
||||
// test --repo option
|
||||
var opts GlobalOptions
|
||||
|
@ -70,28 +70,28 @@ func TestRestoreFailsWhenUsingInvalidPatterns(t *testing.T) {
|
||||
var err error
|
||||
|
||||
// Test --exclude
|
||||
err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{Exclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
err = testRunRestoreAssumeFailure("latest", RestoreOptions{Exclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
|
||||
rtest.Equals(t, `Fatal: --exclude: invalid pattern(s) provided:
|
||||
*[._]log[.-][0-9]
|
||||
!*[._]log[.-][0-9]`, err.Error())
|
||||
|
||||
// Test --iexclude
|
||||
err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{InsensitiveExclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
err = testRunRestoreAssumeFailure("latest", RestoreOptions{InsensitiveExclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
|
||||
rtest.Equals(t, `Fatal: --iexclude: invalid pattern(s) provided:
|
||||
*[._]log[.-][0-9]
|
||||
!*[._]log[.-][0-9]`, err.Error())
|
||||
|
||||
// Test --include
|
||||
err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{Include: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
err = testRunRestoreAssumeFailure("latest", RestoreOptions{Include: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
|
||||
rtest.Equals(t, `Fatal: --include: invalid pattern(s) provided:
|
||||
*[._]log[.-][0-9]
|
||||
!*[._]log[.-][0-9]`, err.Error())
|
||||
|
||||
// Test --iinclude
|
||||
err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{InsensitiveInclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
err = testRunRestoreAssumeFailure("latest", RestoreOptions{InsensitiveInclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
|
||||
|
||||
rtest.Equals(t, `Fatal: --iinclude: invalid pattern(s) provided:
|
||||
*[._]log[.-][0-9]
|
||||
|
@ -64,7 +64,7 @@ func testRunMount(t testing.TB, gopts GlobalOptions, dir string, wg *sync.WaitGr
|
||||
rtest.OK(t, runMount(context.TODO(), opts, gopts, []string{dir}))
|
||||
}
|
||||
|
||||
func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
|
||||
func testRunUmount(t testing.TB, dir string) {
|
||||
var err error
|
||||
for i := 0; i < mountWait; i++ {
|
||||
if err = umount(dir); err == nil {
|
||||
@ -95,7 +95,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
|
||||
go testRunMount(t, global, mountpoint, &wg)
|
||||
waitForMount(t, mountpoint)
|
||||
defer wg.Wait()
|
||||
defer testRunUmount(t, global, mountpoint)
|
||||
defer testRunUmount(t, mountpoint)
|
||||
|
||||
if !snapshotsDirExists(t, mountpoint) {
|
||||
t.Fatal(`virtual directory "snapshots" doesn't exist`)
|
||||
|
@ -141,7 +141,7 @@ func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snaps
|
||||
rtest.OK(t, runRestore(context.TODO(), opts, gopts, nil, []string{snapshotID.String()}))
|
||||
}
|
||||
|
||||
func testRunRestoreAssumeFailure(t testing.TB, snapshotID string, opts RestoreOptions, gopts GlobalOptions) error {
|
||||
func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts GlobalOptions) error {
|
||||
err := runRestore(context.TODO(), opts, gopts, nil, []string{snapshotID})
|
||||
|
||||
return err
|
||||
@ -1181,7 +1181,7 @@ type emptySaveBackend struct {
|
||||
restic.Backend
|
||||
}
|
||||
|
||||
func (b *emptySaveBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
|
||||
func (b *emptySaveBackend) Save(ctx context.Context, h restic.Handle, _ restic.RewindReader) error {
|
||||
return b.Backend.Save(ctx, h, restic.NewByteReader([]byte{}, nil))
|
||||
}
|
||||
|
||||
@ -1601,7 +1601,7 @@ type appendOnlyBackend struct {
|
||||
}
|
||||
|
||||
// called via repo.Backend().Remove()
|
||||
func (b *appendOnlyBackend) Remove(ctx context.Context, h restic.Handle) error {
|
||||
func (b *appendOnlyBackend) Remove(_ context.Context, h restic.Handle) error {
|
||||
return errors.Errorf("Failed to remove %v", h)
|
||||
}
|
||||
|
||||
@ -2202,7 +2202,7 @@ type writeToOnly struct {
|
||||
rd io.Reader
|
||||
}
|
||||
|
||||
func (r *writeToOnly) Read(p []byte) (n int, err error) {
|
||||
func (r *writeToOnly) Read(_ []byte) (n int, err error) {
|
||||
return 0, fmt.Errorf("should have called WriteTo instead")
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ retryLoop:
|
||||
globalLocks.Lock()
|
||||
globalLocks.locks[lock] = lockInfo
|
||||
go refreshLocks(ctx, lock, lockInfo, refreshChan)
|
||||
go monitorLockRefresh(ctx, lock, lockInfo, refreshChan)
|
||||
go monitorLockRefresh(ctx, lockInfo, refreshChan)
|
||||
globalLocks.Unlock()
|
||||
|
||||
return lock, ctx, err
|
||||
@ -170,7 +170,7 @@ func refreshLocks(ctx context.Context, lock *restic.Lock, lockInfo *lockContext,
|
||||
}
|
||||
}
|
||||
|
||||
func monitorLockRefresh(ctx context.Context, lock *restic.Lock, lockInfo *lockContext, refreshed <-chan struct{}) {
|
||||
func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}) {
|
||||
// time.Now() might use a monotonic timer which is paused during standby
|
||||
// convert to unix time to ensure we compare real time values
|
||||
lastRefresh := time.Now().UnixNano()
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func openTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Repository, func(), *testEnvironment) {
|
||||
@ -22,14 +21,14 @@ func openTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Repository,
|
||||
testRunInit(t, env.gopts)
|
||||
|
||||
repo, err := OpenRepository(context.TODO(), env.gopts)
|
||||
rtest.OK(t, err)
|
||||
test.OK(t, err)
|
||||
return repo, cleanup, env
|
||||
}
|
||||
|
||||
func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, env *testEnvironment) (*restic.Lock, context.Context) {
|
||||
lock, wrappedCtx, err := lockRepo(ctx, repo, env.gopts.RetryLock, env.gopts.JSON)
|
||||
rtest.OK(t, err)
|
||||
rtest.OK(t, wrappedCtx.Err())
|
||||
test.OK(t, err)
|
||||
test.OK(t, wrappedCtx.Err())
|
||||
if lock.Stale() {
|
||||
t.Fatal("lock returned stale lock")
|
||||
}
|
||||
@ -69,7 +68,7 @@ func TestLockUnlockAll(t *testing.T) {
|
||||
|
||||
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
|
||||
_, err := unlockAll(0)
|
||||
rtest.OK(t, err)
|
||||
test.OK(t, err)
|
||||
if wrappedCtx.Err() == nil {
|
||||
t.Fatal("canceled parent context did not cancel context")
|
||||
}
|
||||
@ -82,10 +81,10 @@ func TestLockConflict(t *testing.T) {
|
||||
repo, cleanup, env := openTestRepo(t, nil)
|
||||
defer cleanup()
|
||||
repo2, err := OpenRepository(context.TODO(), env.gopts)
|
||||
rtest.OK(t, err)
|
||||
test.OK(t, err)
|
||||
|
||||
lock, _, err := lockRepoExclusive(context.Background(), repo, env.gopts.RetryLock, env.gopts.JSON)
|
||||
rtest.OK(t, err)
|
||||
test.OK(t, err)
|
||||
defer unlockRepo(lock)
|
||||
_, _, err = lockRepo(context.Background(), repo2, env.gopts.RetryLock, env.gopts.JSON)
|
||||
if err == nil {
|
||||
|
@ -63,11 +63,7 @@ directories in an encrypted repository stored on different backends.
|
||||
|
||||
// run the debug functions for all subcommands (if build tag "debug" is
|
||||
// enabled)
|
||||
if err := runDebug(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return runDebug()
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -419,7 +419,7 @@ type blobCountingRepo struct {
|
||||
}
|
||||
|
||||
func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) {
|
||||
id, exists, size, err := repo.Repository.SaveBlob(ctx, t, buf, id, false)
|
||||
id, exists, size, err := repo.Repository.SaveBlob(ctx, t, buf, id, storeDuplicate)
|
||||
if exists {
|
||||
return id, exists, size, err
|
||||
}
|
||||
@ -1845,27 +1845,27 @@ type noCancelBackend struct {
|
||||
restic.Backend
|
||||
}
|
||||
|
||||
func (c *noCancelBackend) Remove(ctx context.Context, h restic.Handle) error {
|
||||
func (c *noCancelBackend) Remove(_ context.Context, h restic.Handle) error {
|
||||
return c.Backend.Remove(context.Background(), h)
|
||||
}
|
||||
|
||||
func (c *noCancelBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
|
||||
func (c *noCancelBackend) Save(_ context.Context, h restic.Handle, rd restic.RewindReader) error {
|
||||
return c.Backend.Save(context.Background(), h, rd)
|
||||
}
|
||||
|
||||
func (c *noCancelBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
||||
func (c *noCancelBackend) Load(_ context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
||||
return c.Backend.Load(context.Background(), h, length, offset, fn)
|
||||
}
|
||||
|
||||
func (c *noCancelBackend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
|
||||
func (c *noCancelBackend) Stat(_ context.Context, h restic.Handle) (restic.FileInfo, error) {
|
||||
return c.Backend.Stat(context.Background(), h)
|
||||
}
|
||||
|
||||
func (c *noCancelBackend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
|
||||
func (c *noCancelBackend) List(_ context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
|
||||
return c.Backend.List(context.Background(), t, fn)
|
||||
}
|
||||
|
||||
func (c *noCancelBackend) Delete(ctx context.Context) error {
|
||||
func (c *noCancelBackend) Delete(_ context.Context) error {
|
||||
return c.Backend.Delete(context.Background())
|
||||
}
|
||||
|
||||
@ -2166,7 +2166,7 @@ func TestMetadataChanged(t *testing.T) {
|
||||
}
|
||||
|
||||
// modify the mode by wrapping it in a new struct, uses the consts defined above
|
||||
fs.OverrideLstat["testfile"] = wrapFileInfo(t, fi)
|
||||
fs.OverrideLstat["testfile"] = wrapFileInfo(fi)
|
||||
|
||||
// set the override values in the 'want' node which
|
||||
want.Mode = 0400
|
||||
|
@ -6,7 +6,6 @@ package archiver
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type wrappedFileInfo struct {
|
||||
@ -24,7 +23,7 @@ func (fi wrappedFileInfo) Mode() os.FileMode {
|
||||
}
|
||||
|
||||
// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed.
|
||||
func wrapFileInfo(t testing.TB, fi os.FileInfo) os.FileInfo {
|
||||
func wrapFileInfo(fi os.FileInfo) os.FileInfo {
|
||||
// get the underlying stat_t and modify the values
|
||||
stat := fi.Sys().(*syscall.Stat_t)
|
||||
stat.Mode = mockFileInfoMode
|
||||
|
@ -5,7 +5,6 @@ package archiver
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type wrappedFileInfo struct {
|
||||
@ -18,7 +17,7 @@ func (fi wrappedFileInfo) Mode() os.FileMode {
|
||||
}
|
||||
|
||||
// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed.
|
||||
func wrapFileInfo(t testing.TB, fi os.FileInfo) os.FileInfo {
|
||||
func wrapFileInfo(fi os.FileInfo) os.FileInfo {
|
||||
// wrap the os.FileInfo and return the modified mode, uid and gid are ignored on Windows
|
||||
res := wrappedFileInfo{
|
||||
FileInfo: fi,
|
||||
|
@ -22,7 +22,7 @@ type saveFail struct {
|
||||
failAt int32
|
||||
}
|
||||
|
||||
func (b *saveFail) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicates bool) (restic.ID, bool, int, error) {
|
||||
func (b *saveFail) SaveBlob(_ context.Context, _ restic.BlobType, _ []byte, id restic.ID, _ bool) (restic.ID, bool, int, error) {
|
||||
val := atomic.AddInt32(&b.cnt, 1)
|
||||
if val == b.failAt {
|
||||
return restic.ID{}, false, 0, errTest
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
func createTestFiles(t testing.TB, num int) (files []string) {
|
||||
tempdir := test.TempDir(t)
|
||||
|
||||
for i := 0; i < 15; i++ {
|
||||
for i := 0; i < num; i++ {
|
||||
filename := fmt.Sprintf("testfile-%d", i)
|
||||
err := os.WriteFile(filepath.Join(tempdir, filename), []byte(filename), 0600)
|
||||
if err != nil {
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func treeSaveHelper(ctx context.Context, t restic.BlobType, buf *Buffer, cb func(res SaveBlobResponse)) {
|
||||
func treeSaveHelper(_ context.Context, _ restic.BlobType, buf *Buffer, cb func(res SaveBlobResponse)) {
|
||||
cb(SaveBlobResponse{
|
||||
id: restic.NewRandomID(),
|
||||
known: false,
|
||||
|
@ -51,7 +51,7 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
url := fmt.Sprintf("https://%s.blob.core.windows.net/%s", cfg.AccountName, cfg.Container)
|
||||
opts := &azContainer.ClientOptions{
|
||||
ClientOptions: azcore.ClientOptions{
|
||||
Transport: http.DefaultClient,
|
||||
Transport: &http.Client{Transport: rt},
|
||||
},
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
}
|
||||
|
||||
// Open opens the Azure backend at specified container.
|
||||
func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
return open(cfg, rt)
|
||||
}
|
||||
|
||||
|
@ -306,10 +306,7 @@ func (be *b2Backend) List(ctx context.Context, t restic.FileType, fn func(restic
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iter.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return iter.Err()
|
||||
}
|
||||
|
||||
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
|
||||
|
@ -28,7 +28,7 @@ func New(be restic.Backend) *Backend {
|
||||
}
|
||||
|
||||
// Save adds new Data to the backend.
|
||||
func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
|
||||
func (be *Backend) Save(_ context.Context, h restic.Handle, _ restic.RewindReader) error {
|
||||
if err := h.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -38,7 +38,7 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe
|
||||
}
|
||||
|
||||
// Remove deletes a file from the backend.
|
||||
func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
|
||||
func (be *Backend) Remove(_ context.Context, _ restic.Handle) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -52,7 +52,7 @@ func (be *Backend) Location() string {
|
||||
}
|
||||
|
||||
// Delete removes all data in the backend.
|
||||
func (be *Backend) Delete(ctx context.Context) error {
|
||||
func (be *Backend) Delete(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ type LocalFilesystem struct {
|
||||
}
|
||||
|
||||
// ReadDir returns all entries of a directory.
|
||||
func (l *LocalFilesystem) ReadDir(ctx context.Context, dir string) ([]os.FileInfo, error) {
|
||||
func (l *LocalFilesystem) ReadDir(_ context.Context, dir string) ([]os.FileInfo, error) {
|
||||
f, err := fs.Open(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -105,7 +105,7 @@ func (b *Local) IsNotExist(err error) bool {
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
func (b *Local) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) (err error) {
|
||||
func (b *Local) Save(_ context.Context, h restic.Handle, rd restic.RewindReader) (err error) {
|
||||
finalname := b.Filename(h)
|
||||
dir := filepath.Dir(finalname)
|
||||
|
||||
@ -200,7 +200,7 @@ func (b *Local) Load(ctx context.Context, h restic.Handle, length int, offset in
|
||||
return backend.DefaultLoad(ctx, h, length, offset, b.openReader, fn)
|
||||
}
|
||||
|
||||
func (b *Local) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
||||
func (b *Local) openReader(_ context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
||||
f, err := fs.Open(b.Filename(h))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -222,7 +222,7 @@ func (b *Local) openReader(ctx context.Context, h restic.Handle, length int, off
|
||||
}
|
||||
|
||||
// Stat returns information about a blob.
|
||||
func (b *Local) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
|
||||
func (b *Local) Stat(_ context.Context, h restic.Handle) (restic.FileInfo, error) {
|
||||
fi, err := fs.Stat(b.Filename(h))
|
||||
if err != nil {
|
||||
return restic.FileInfo{}, errors.WithStack(err)
|
||||
@ -232,7 +232,7 @@ func (b *Local) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, err
|
||||
}
|
||||
|
||||
// Remove removes the blob with the given name and type.
|
||||
func (b *Local) Remove(ctx context.Context, h restic.Handle) error {
|
||||
func (b *Local) Remove(_ context.Context, h restic.Handle) error {
|
||||
fn := b.Filename(h)
|
||||
|
||||
// reset read-only flag
|
||||
@ -339,7 +339,7 @@ func visitFiles(ctx context.Context, dir string, fn func(restic.FileInfo) error,
|
||||
}
|
||||
|
||||
// Delete removes the repository and all files.
|
||||
func (b *Local) Delete(ctx context.Context) error {
|
||||
func (b *Local) Delete(_ context.Context) error {
|
||||
return fs.RemoveAll(b.Path)
|
||||
}
|
||||
|
||||
|
@ -327,7 +327,7 @@ func (b *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.Fi
|
||||
}
|
||||
|
||||
if resp.Header.Get("Content-Type") == ContentTypeV2 {
|
||||
return b.listv2(ctx, t, resp, fn)
|
||||
return b.listv2(ctx, resp, fn)
|
||||
}
|
||||
|
||||
return b.listv1(ctx, t, resp, fn)
|
||||
@ -370,7 +370,7 @@ func (b *Backend) listv1(ctx context.Context, t restic.FileType, resp *http.Resp
|
||||
|
||||
// listv2 uses the REST protocol v2, where a list HTTP request (e.g. `GET
|
||||
// /data/`) returns the names and sizes of all files.
|
||||
func (b *Backend) listv2(ctx context.Context, t restic.FileType, resp *http.Response, fn func(restic.FileInfo) error) error {
|
||||
func (b *Backend) listv2(ctx context.Context, resp *http.Response, fn func(restic.FileInfo) error) error {
|
||||
debug.Log("parsing API v2 response")
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
|
||||
|
@ -93,12 +93,12 @@ func TestListAPI(t *testing.T) {
|
||||
// stat file in data/, use the first two bytes in the name
|
||||
// of the file as the size :)
|
||||
filename := req.URL.Path[6:]
|
||||
len, err := strconv.ParseInt(filename[:4], 16, 64)
|
||||
length, err := strconv.ParseInt(filename[:4], 16, 64)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
res.Header().Set("Content-Length", fmt.Sprintf("%d", len))
|
||||
res.Header().Set("Content-Length", fmt.Sprintf("%d", length))
|
||||
res.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ func runRESTServer(ctx context.Context, t testing.TB, dir string) (*url.URL, fun
|
||||
return url, cleanup
|
||||
}
|
||||
|
||||
func newTestSuite(ctx context.Context, t testing.TB, url *url.URL, minimalData bool) *test.Suite {
|
||||
func newTestSuite(_ context.Context, t testing.TB, url *url.URL, minimalData bool) *test.Suite {
|
||||
tr, err := backend.Transport(backend.TransportOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create transport for tests: %v", err)
|
||||
|
@ -3,6 +3,6 @@ package retry
|
||||
import "testing"
|
||||
|
||||
// TestFastRetries reduces the initial retry delay to 1 millisecond
|
||||
func TestFastRetries(t testing.TB) {
|
||||
func TestFastRetries(_ testing.TB) {
|
||||
fastRetries = true
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ func (r *SFTP) Join(p ...string) string {
|
||||
}
|
||||
|
||||
// ReadDir returns the entries for a directory.
|
||||
func (r *SFTP) ReadDir(ctx context.Context, dir string) ([]os.FileInfo, error) {
|
||||
func (r *SFTP) ReadDir(_ context.Context, dir string) ([]os.FileInfo, error) {
|
||||
fi, err := r.c.ReadDir(dir)
|
||||
|
||||
// sftp client does not specify dir name on error, so add it here
|
||||
@ -296,7 +296,7 @@ func tempSuffix() string {
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
func (r *SFTP) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
|
||||
func (r *SFTP) Save(_ context.Context, h restic.Handle, rd restic.RewindReader) error {
|
||||
if err := r.clientError(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -400,7 +400,7 @@ func (r *SFTP) Load(ctx context.Context, h restic.Handle, length int, offset int
|
||||
return backend.DefaultLoad(ctx, h, length, offset, r.openReader, fn)
|
||||
}
|
||||
|
||||
func (r *SFTP) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
||||
func (r *SFTP) openReader(_ context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
||||
f, err := r.c.Open(r.Filename(h))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -424,7 +424,7 @@ func (r *SFTP) openReader(ctx context.Context, h restic.Handle, length int, offs
|
||||
}
|
||||
|
||||
// Stat returns information about a blob.
|
||||
func (r *SFTP) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
|
||||
func (r *SFTP) Stat(_ context.Context, h restic.Handle) (restic.FileInfo, error) {
|
||||
if err := r.clientError(); err != nil {
|
||||
return restic.FileInfo{}, err
|
||||
}
|
||||
@ -438,7 +438,7 @@ func (r *SFTP) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, erro
|
||||
}
|
||||
|
||||
// Remove removes the content stored at name.
|
||||
func (r *SFTP) Remove(ctx context.Context, h restic.Handle) error {
|
||||
func (r *SFTP) Remove(_ context.Context, h restic.Handle) error {
|
||||
if err := r.clientError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ func (s *Suite) TestLoad(t *testing.T) {
|
||||
b := s.open(t)
|
||||
defer s.close(t, b)
|
||||
|
||||
err := testLoad(b, restic.Handle{Type: restic.PackFile, Name: "foobar"}, 0, 0)
|
||||
err := testLoad(b, restic.Handle{Type: restic.PackFile, Name: "foobar"})
|
||||
if err == nil {
|
||||
t.Fatalf("Load() did not return an error for non-existing blob")
|
||||
}
|
||||
@ -672,7 +672,7 @@ func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) res
|
||||
}
|
||||
|
||||
// testLoad loads a blob (but discards its contents).
|
||||
func testLoad(b restic.Backend, h restic.Handle, length int, offset int64) error {
|
||||
func testLoad(b restic.Backend, h restic.Handle) error {
|
||||
return b.Load(context.TODO(), h, 0, 0, func(rd io.Reader) (ierr error) {
|
||||
_, ierr = io.Copy(io.Discard, rd)
|
||||
return ierr
|
||||
@ -773,7 +773,7 @@ func (s *Suite) TestBackend(t *testing.T) {
|
||||
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Stat() error: %v", err)
|
||||
|
||||
// try to read not existing blob
|
||||
err = testLoad(b, h, 0, 0)
|
||||
err = testLoad(b, h)
|
||||
test.Assert(t, err != nil, "blob could be read before creation")
|
||||
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Load() error: %v", err)
|
||||
|
||||
|
@ -154,7 +154,7 @@ type mockReader struct {
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (rd *mockReader) Read(p []byte) (n int, err error) {
|
||||
func (rd *mockReader) Read(_ []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
func (rd *mockReader) Close() error {
|
||||
|
6
internal/cache/backend.go
vendored
6
internal/cache/backend.go
vendored
@ -133,7 +133,7 @@ func (b *Backend) cacheFile(ctx context.Context, h restic.Handle) error {
|
||||
}
|
||||
|
||||
// loadFromCache will try to load the file from the cache.
|
||||
func (b *Backend) loadFromCache(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) (bool, error) {
|
||||
func (b *Backend) loadFromCache(h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) (bool, error) {
|
||||
rd, err := b.Cache.load(h, length, offset)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -160,7 +160,7 @@ func (b *Backend) Load(ctx context.Context, h restic.Handle, length int, offset
|
||||
}
|
||||
|
||||
// try loading from cache without checking that the handle is actually cached
|
||||
inCache, err := b.loadFromCache(ctx, h, length, offset, consumer)
|
||||
inCache, err := b.loadFromCache(h, length, offset, consumer)
|
||||
if inCache {
|
||||
if err == nil {
|
||||
return nil
|
||||
@ -183,7 +183,7 @@ func (b *Backend) Load(ctx context.Context, h restic.Handle, length int, offset
|
||||
return err
|
||||
}
|
||||
|
||||
inCache, err = b.loadFromCache(ctx, h, length, offset, consumer)
|
||||
inCache, err = b.loadFromCache(h, length, offset, consumer)
|
||||
if inCache {
|
||||
return err
|
||||
}
|
||||
|
2
internal/cache/backend_test.go
vendored
2
internal/cache/backend_test.go
vendored
@ -118,7 +118,7 @@ type loadErrorBackend struct {
|
||||
loadError error
|
||||
}
|
||||
|
||||
func (be loadErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
||||
func (be loadErrorBackend) Load(_ context.Context, _ restic.Handle, _ int, _ int64, _ func(rd io.Reader) error) error {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
return be.loadError
|
||||
}
|
||||
|
3
internal/cache/file_test.go
vendored
3
internal/cache/file_test.go
vendored
@ -14,7 +14,6 @@ import (
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
@ -271,7 +270,7 @@ func TestFileSaveConcurrent(t *testing.T) {
|
||||
|
||||
func TestFileSaveAfterDamage(t *testing.T) {
|
||||
c := TestNewCache(t)
|
||||
rtest.OK(t, fs.RemoveAll(c.path))
|
||||
test.OK(t, fs.RemoveAll(c.path))
|
||||
|
||||
// save a few bytes of data in the cache
|
||||
data := test.Random(123456789, 42)
|
||||
|
@ -322,7 +322,7 @@ func (k *Key) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
|
||||
//
|
||||
// Even if the function fails, the contents of dst, up to its capacity,
|
||||
// may be overwritten.
|
||||
func (k *Key) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
|
||||
func (k *Key) Open(dst, nonce, ciphertext, _ []byte) ([]byte, error) {
|
||||
if !k.Valid() {
|
||||
return nil, errors.New("invalid key")
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
|
||||
// TestLogToStderr configures debug to log to stderr if not the debug log is
|
||||
// not already configured and returns whether logging was enabled.
|
||||
func TestLogToStderr(t testing.TB) bool {
|
||||
func TestLogToStderr(_ testing.TB) bool {
|
||||
if opts.isEnabled {
|
||||
return false
|
||||
}
|
||||
@ -17,7 +17,7 @@ func TestLogToStderr(t testing.TB) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func TestDisableLog(t testing.TB) {
|
||||
func TestDisableLog(_ testing.TB) {
|
||||
opts.logger = nil
|
||||
opts.isEnabled = false
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ var _ FS = &Reader{}
|
||||
|
||||
// VolumeName returns leading volume name, for the Reader file system it's
|
||||
// always the empty string.
|
||||
func (fs *Reader) VolumeName(path string) string {
|
||||
func (fs *Reader) VolumeName(_ string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
@ -76,7 +76,7 @@ func (fs *Reader) fi() os.FileInfo {
|
||||
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
|
||||
// methods on the returned File can be used for I/O.
|
||||
// If there is an error, it will be of type *os.PathError.
|
||||
func (fs *Reader) OpenFile(name string, flag int, perm os.FileMode) (f File, err error) {
|
||||
func (fs *Reader) OpenFile(name string, flag int, _ os.FileMode) (f File, err error) {
|
||||
if flag & ^(O_RDONLY|O_NOFOLLOW) != 0 {
|
||||
return nil, pathError("open", name,
|
||||
fmt.Errorf("invalid combination of flags 0x%x", flag))
|
||||
@ -149,7 +149,7 @@ func (fs *Reader) Separator() string {
|
||||
}
|
||||
|
||||
// IsAbs reports whether the path is absolute. For the Reader, this is always the case.
|
||||
func (fs *Reader) IsAbs(p string) bool {
|
||||
func (fs *Reader) IsAbs(_ string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
@ -236,11 +236,11 @@ func (f fakeFile) Fd() uintptr {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f fakeFile) Readdirnames(n int) ([]string, error) {
|
||||
func (f fakeFile) Readdirnames(_ int) ([]string, error) {
|
||||
return nil, pathError("readdirnames", f.name, os.ErrInvalid)
|
||||
}
|
||||
|
||||
func (f fakeFile) Readdir(n int) ([]os.FileInfo, error) {
|
||||
func (f fakeFile) Readdir(_ int) ([]os.FileInfo, error) {
|
||||
return nil, pathError("readdir", f.name, os.ErrInvalid)
|
||||
}
|
||||
|
||||
@ -248,7 +248,7 @@ func (f fakeFile) Seek(int64, int) (int64, error) {
|
||||
return 0, pathError("seek", f.name, os.ErrInvalid)
|
||||
}
|
||||
|
||||
func (f fakeFile) Read(p []byte) (int, error) {
|
||||
func (f fakeFile) Read(_ []byte) (int, error) {
|
||||
return 0, pathError("read", f.name, os.ErrInvalid)
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ func HasSufficientPrivilegesForVSS() error {
|
||||
// NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't
|
||||
// finish within the timeout an error is returned.
|
||||
func NewVssSnapshot(
|
||||
volume string, timeoutInSeconds uint, msgError ErrorHandler) (VssSnapshot, error) {
|
||||
_ string, _ uint, _ ErrorHandler) (VssSnapshot, error) {
|
||||
return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows")
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/anacrolix/fuse"
|
||||
"github.com/anacrolix/fuse/fs"
|
||||
@ -119,7 +120,7 @@ func (d *dir) open(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
func (d *dir) Attr(_ context.Context, a *fuse.Attr) error {
|
||||
debug.Log("Attr()")
|
||||
a.Inode = d.inode
|
||||
a.Mode = os.ModeDir | d.node.Mode
|
||||
@ -202,7 +203,7 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
node, ok := d.items[name]
|
||||
if !ok {
|
||||
debug.Log(" Lookup(%v) -> not found", name)
|
||||
return nil, fuse.ENOENT
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
inode := inodeFromNode(d.inode, node)
|
||||
switch node.Type {
|
||||
@ -216,11 +217,11 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
return newOther(d.root, inode, node)
|
||||
default:
|
||||
debug.Log(" node %v has unknown type %v", name, node.Type)
|
||||
return nil, fuse.ENOENT
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
|
||||
func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
|
||||
debug.Log("Listxattr(%v, %v)", d.node.Name, req.Size)
|
||||
for _, attr := range d.node.ExtendedAttributes {
|
||||
resp.Append(attr.Name)
|
||||
@ -228,7 +229,7 @@ func (d *dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *f
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
||||
func (d *dir) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
||||
debug.Log("Getxattr(%v, %v, %v)", d.node.Name, req.Name, req.Size)
|
||||
attrval := d.node.GetExtendedAttribute(req.Name)
|
||||
if attrval != nil {
|
||||
|
@ -45,7 +45,7 @@ func newFile(root *Root, inode uint64, node *restic.Node) (fusefile *file, err e
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *file) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
func (f *file) Attr(_ context.Context, a *fuse.Attr) error {
|
||||
debug.Log("Attr(%v)", f.node.Name)
|
||||
a.Inode = f.inode
|
||||
a.Mode = f.node.Mode
|
||||
@ -66,7 +66,7 @@ func (f *file) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
|
||||
}
|
||||
|
||||
func (f *file) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
|
||||
func (f *file) Open(_ context.Context, _ *fuse.OpenRequest, _ *fuse.OpenResponse) (fs.Handle, error) {
|
||||
debug.Log("open file %v with %d blobs", f.node.Name, len(f.node.Content))
|
||||
|
||||
var bytes uint64
|
||||
@ -166,7 +166,7 @@ func (f *openFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.R
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
|
||||
func (f *file) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
|
||||
debug.Log("Listxattr(%v, %v)", f.node.Name, req.Size)
|
||||
for _, attr := range f.node.ExtendedAttributes {
|
||||
resp.Append(attr.Name)
|
||||
@ -174,7 +174,7 @@ func (f *file) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
||||
func (f *file) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
||||
debug.Log("Getxattr(%v, %v, %v)", f.node.Name, req.Name, req.Size)
|
||||
attrval := f.node.GetExtendedAttribute(req.Name)
|
||||
if attrval != nil {
|
||||
|
@ -24,11 +24,11 @@ func newLink(root *Root, inode uint64, node *restic.Node) (*link, error) {
|
||||
return &link{root: root, inode: inode, node: node}, nil
|
||||
}
|
||||
|
||||
func (l *link) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
|
||||
func (l *link) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) {
|
||||
return l.node.LinkTarget, nil
|
||||
}
|
||||
|
||||
func (l *link) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
func (l *link) Attr(_ context.Context, a *fuse.Attr) error {
|
||||
a.Inode = l.inode
|
||||
a.Mode = l.node.Mode
|
||||
|
||||
|
@ -20,11 +20,11 @@ func newOther(root *Root, inode uint64, node *restic.Node) (*other, error) {
|
||||
return &other{root: root, inode: inode, node: node}, nil
|
||||
}
|
||||
|
||||
func (l *other) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
|
||||
func (l *other) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) {
|
||||
return l.node.LinkTarget, nil
|
||||
}
|
||||
|
||||
func (l *other) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
func (l *other) Attr(_ context.Context, a *fuse.Attr) error {
|
||||
a.Inode = l.inode
|
||||
a.Mode = l.node.Mode
|
||||
|
||||
|
@ -6,6 +6,7 @@ package fuse
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
@ -41,7 +42,7 @@ func NewSnapshotsDir(root *Root, inode, parentInode uint64, dirStruct *Snapshots
|
||||
}
|
||||
|
||||
// Attr returns the attributes for any dir in the snapshots directory structure
|
||||
func (d *SnapshotsDir) Attr(ctx context.Context, attr *fuse.Attr) error {
|
||||
func (d *SnapshotsDir) Attr(_ context.Context, attr *fuse.Attr) error {
|
||||
attr.Inode = d.inode
|
||||
attr.Mode = os.ModeDir | 0555
|
||||
attr.Uid = d.root.uid
|
||||
@ -60,7 +61,7 @@ func (d *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
if err != nil {
|
||||
return nil, unwrapCtxCanceled(err)
|
||||
} else if meta == nil {
|
||||
return nil, fuse.ENOENT
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
|
||||
items := []fuse.Dirent{
|
||||
@ -99,7 +100,7 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error)
|
||||
if err != nil {
|
||||
return nil, unwrapCtxCanceled(err)
|
||||
} else if meta == nil {
|
||||
return nil, fuse.ENOENT
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
|
||||
entry := meta.names[name]
|
||||
@ -114,7 +115,7 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fuse.ENOENT
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
|
||||
// SnapshotLink
|
||||
@ -133,12 +134,12 @@ func newSnapshotLink(root *Root, inode uint64, target string, snapshot *restic.S
|
||||
}
|
||||
|
||||
// Readlink
|
||||
func (l *snapshotLink) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
|
||||
func (l *snapshotLink) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) {
|
||||
return l.target, nil
|
||||
}
|
||||
|
||||
// Attr
|
||||
func (l *snapshotLink) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
func (l *snapshotLink) Attr(_ context.Context, a *fuse.Attr) error {
|
||||
a.Inode = l.inode
|
||||
a.Mode = os.ModeSymlink | 0777
|
||||
a.Size = uint64(len(l.target))
|
||||
|
@ -331,7 +331,7 @@ var (
|
||||
func createFilledRepo(t testing.TB, snapshots int, dup float32, version uint) restic.Repository {
|
||||
repo := repository.TestRepositoryWithVersion(t, version)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := 0; i < snapshots; i++ {
|
||||
restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth, dup)
|
||||
}
|
||||
return repo
|
||||
|
@ -39,7 +39,7 @@ func toS3Backend(b restic.Backend) *s3.Backend {
|
||||
}
|
||||
|
||||
// Check tests whether the migration can be applied.
|
||||
func (m *S3Layout) Check(ctx context.Context, repo restic.Repository) (bool, string, error) {
|
||||
func (m *S3Layout) Check(_ context.Context, repo restic.Repository) (bool, string, error) {
|
||||
be := toS3Backend(repo.Backend())
|
||||
if be == nil {
|
||||
debug.Log("backend is not s3")
|
||||
|
@ -44,7 +44,7 @@ func (*UpgradeRepoV2) Desc() string {
|
||||
return "upgrade a repository to version 2"
|
||||
}
|
||||
|
||||
func (*UpgradeRepoV2) Check(ctx context.Context, repo restic.Repository) (bool, string, error) {
|
||||
func (*UpgradeRepoV2) Check(_ context.Context, repo restic.Repository) (bool, string, error) {
|
||||
isV1 := repo.Config().Version == 1
|
||||
reason := ""
|
||||
if !isV1 {
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"github.com/restic/restic/internal/index"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
@ -322,7 +321,7 @@ func TestRepositoryLoadUnpackedRetryBroken(t *testing.T) {
|
||||
rtest.OK(t, err)
|
||||
repo, err := repository.New(&damageOnceBackend{Backend: be}, repository.Options{})
|
||||
rtest.OK(t, err)
|
||||
err = repo.SearchKey(context.TODO(), test.TestPassword, 10, "")
|
||||
err = repo.SearchKey(context.TODO(), rtest.TestPassword, 10, "")
|
||||
rtest.OK(t, err)
|
||||
|
||||
rtest.OK(t, repo.LoadIndex(context.TODO()))
|
||||
@ -428,7 +427,7 @@ func testRepositoryIncrementalIndex(t *testing.T, version uint) {
|
||||
}
|
||||
|
||||
// buildPackfileWithoutHeader returns a manually built pack file without a header.
|
||||
func buildPackfileWithoutHeader(t testing.TB, blobSizes []int, key *crypto.Key, compress bool) (blobs []restic.Blob, packfile []byte) {
|
||||
func buildPackfileWithoutHeader(blobSizes []int, key *crypto.Key, compress bool) (blobs []restic.Blob, packfile []byte) {
|
||||
opts := []zstd.EOption{
|
||||
// Set the compression level configured.
|
||||
zstd.WithEncoderLevel(zstd.SpeedDefault),
|
||||
@ -446,7 +445,7 @@ func buildPackfileWithoutHeader(t testing.TB, blobSizes []int, key *crypto.Key,
|
||||
|
||||
var offset uint
|
||||
for i, size := range blobSizes {
|
||||
plaintext := test.Random(800+i, size)
|
||||
plaintext := rtest.Random(800+i, size)
|
||||
id := restic.Hash(plaintext)
|
||||
uncompressedLength := uint(0)
|
||||
if compress {
|
||||
@ -525,7 +524,7 @@ func testStreamPack(t *testing.T, version uint) {
|
||||
t.Fatal("test does not suport repository version", version)
|
||||
}
|
||||
|
||||
packfileBlobs, packfile := buildPackfileWithoutHeader(t, blobSizes, &key, compress)
|
||||
packfileBlobs, packfile := buildPackfileWithoutHeader(blobSizes, &key, compress)
|
||||
|
||||
loadCalls := 0
|
||||
load := func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
||||
|
@ -34,7 +34,7 @@ func TestUseLowSecurityKDFParameters(t logger) {
|
||||
}
|
||||
|
||||
// TestBackend returns a fully configured in-memory backend.
|
||||
func TestBackend(t testing.TB) restic.Backend {
|
||||
func TestBackend(_ testing.TB) restic.Backend {
|
||||
return mem.New()
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@ type saver struct {
|
||||
fn func(restic.FileType, []byte) (restic.ID, error)
|
||||
}
|
||||
|
||||
func (s saver) SaveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (restic.ID, error) {
|
||||
func (s saver) SaveUnpacked(_ context.Context, t restic.FileType, buf []byte) (restic.ID, error) {
|
||||
return s.fn(t, buf)
|
||||
}
|
||||
|
||||
@ -24,7 +24,7 @@ type loader struct {
|
||||
fn func(restic.FileType, restic.ID) ([]byte, error)
|
||||
}
|
||||
|
||||
func (l loader) LoadUnpacked(ctx context.Context, t restic.FileType, id restic.ID) (data []byte, err error) {
|
||||
func (l loader) LoadUnpacked(_ context.Context, t restic.FileType, id restic.ID) (data []byte, err error) {
|
||||
return l.fn(t, id)
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ func (r ForbiddenRepo) LoadBlob(context.Context, restic.BlobType, restic.ID, []b
|
||||
return nil, errors.New("should not be called")
|
||||
}
|
||||
|
||||
func (r ForbiddenRepo) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, bool) {
|
||||
func (r ForbiddenRepo) LookupBlobSize(_ restic.ID, _ restic.BlobType) (uint, bool) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
|
@ -605,11 +605,7 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error {
|
||||
return errors.Errorf("invalid node type %q", node.Type)
|
||||
}
|
||||
|
||||
if err := node.fillExtendedAttributes(path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return node.fillExtendedAttributes(path)
|
||||
}
|
||||
|
||||
func (node *Node) fillExtendedAttributes(path string) error {
|
||||
|
@ -143,10 +143,10 @@ func (f *SnapshotFilter) FindAll(ctx context.Context, be Lister, loader LoaderUn
|
||||
if err == nil {
|
||||
if ids.Has(*sn.ID()) {
|
||||
continue
|
||||
} else {
|
||||
ids.Insert(*sn.ID())
|
||||
s = sn.ID().String()
|
||||
}
|
||||
|
||||
ids.Insert(*sn.ID())
|
||||
s = sn.ID().String()
|
||||
}
|
||||
}
|
||||
err = fn(s, sn, err)
|
||||
|
@ -136,7 +136,7 @@ func y(d time.Time, _ int) int {
|
||||
}
|
||||
|
||||
// always returns a unique number for d.
|
||||
func always(d time.Time, nr int) int {
|
||||
func always(_ time.Time, nr int) int {
|
||||
return nr
|
||||
}
|
||||
|
||||
|
@ -209,7 +209,7 @@ func TestParseHandle(s string, t BlobType) BlobHandle {
|
||||
}
|
||||
|
||||
// TestSetSnapshotID sets the snapshot's ID.
|
||||
func TestSetSnapshotID(t testing.TB, sn *Snapshot, id ID) {
|
||||
func TestSetSnapshotID(_ testing.TB, sn *Snapshot, id ID) {
|
||||
sn.id = &id
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@ type Restorer struct {
|
||||
var restorerAbortOnAllErrors = func(location string, err error) error { return err }
|
||||
|
||||
// NewRestorer creates a restorer preloaded with the content from the snapshot id.
|
||||
func NewRestorer(ctx context.Context, repo restic.Repository, sn *restic.Snapshot, sparse bool,
|
||||
func NewRestorer(repo restic.Repository, sn *restic.Snapshot, sparse bool,
|
||||
progress *restoreui.Progress) *Restorer {
|
||||
r := &Restorer{
|
||||
repo: repo,
|
||||
|
@ -325,7 +325,7 @@ func TestRestorer(t *testing.T) {
|
||||
sn, id := saveSnapshot(t, repo, test.Snapshot)
|
||||
t.Logf("snapshot saved as %v", id.Str())
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, nil)
|
||||
res := NewRestorer(repo, sn, false, nil)
|
||||
|
||||
tempdir := rtest.TempDir(t)
|
||||
// make sure we're creating a new subdir of the tempdir
|
||||
@ -442,7 +442,7 @@ func TestRestorerRelative(t *testing.T) {
|
||||
sn, id := saveSnapshot(t, repo, test.Snapshot)
|
||||
t.Logf("snapshot saved as %v", id.Str())
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, nil)
|
||||
res := NewRestorer(repo, sn, false, nil)
|
||||
|
||||
tempdir := rtest.TempDir(t)
|
||||
cleanup := rtest.Chdir(t, tempdir)
|
||||
@ -671,7 +671,7 @@ func TestRestorerTraverseTree(t *testing.T) {
|
||||
repo := repository.TestRepository(t)
|
||||
sn, _ := saveSnapshot(t, repo, test.Snapshot)
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, nil)
|
||||
res := NewRestorer(repo, sn, false, nil)
|
||||
|
||||
res.SelectFilter = test.Select
|
||||
|
||||
@ -747,7 +747,7 @@ func TestRestorerConsistentTimestampsAndPermissions(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, nil)
|
||||
res := NewRestorer(repo, sn, false, nil)
|
||||
|
||||
res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
|
||||
switch filepath.ToSlash(item) {
|
||||
@ -802,7 +802,7 @@ func TestVerifyCancel(t *testing.T) {
|
||||
repo := repository.TestRepository(t)
|
||||
sn, _ := saveSnapshot(t, repo, snapshot)
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, nil)
|
||||
res := NewRestorer(repo, sn, false, nil)
|
||||
|
||||
tempdir := rtest.TempDir(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -844,7 +844,7 @@ func TestRestorerSparseFiles(t *testing.T) {
|
||||
archiver.SnapshotOptions{})
|
||||
rtest.OK(t, err)
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, true, nil)
|
||||
res := NewRestorer(repo, sn, true, nil)
|
||||
|
||||
tempdir := rtest.TempDir(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -31,7 +31,7 @@ func TestRestorerRestoreEmptyHardlinkedFileds(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, nil)
|
||||
res := NewRestorer(repo, sn, false, nil)
|
||||
|
||||
res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
|
||||
return true, true
|
||||
@ -73,9 +73,9 @@ type printerMock struct {
|
||||
filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64
|
||||
}
|
||||
|
||||
func (p *printerMock) Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) {
|
||||
func (p *printerMock) Update(_, _, _, _ uint64, _ time.Duration) {
|
||||
}
|
||||
func (p *printerMock) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) {
|
||||
func (p *printerMock) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, _ time.Duration) {
|
||||
p.filesFinished = filesFinished
|
||||
p.filesTotal = filesTotal
|
||||
p.allBytesWritten = allBytesWritten
|
||||
@ -99,7 +99,7 @@ func TestRestorerProgressBar(t *testing.T) {
|
||||
|
||||
mock := &printerMock{}
|
||||
progress := restoreui.NewProgress(mock, 0)
|
||||
res := NewRestorer(context.TODO(), repo, sn, false, progress)
|
||||
res := NewRestorer(repo, sn, false, progress)
|
||||
res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
|
||||
return true, true
|
||||
}
|
||||
|
@ -68,21 +68,21 @@ func extractToFile(buf []byte, filename, target string, printf func(string, ...i
|
||||
|
||||
// Write everything to a temp file
|
||||
dir := filepath.Dir(target)
|
||||
new, err := os.CreateTemp(dir, "restic")
|
||||
newFile, err := os.CreateTemp(dir, "restic")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := io.Copy(new, rd)
|
||||
n, err := io.Copy(newFile, rd)
|
||||
if err != nil {
|
||||
_ = new.Close()
|
||||
_ = os.Remove(new.Name())
|
||||
_ = newFile.Close()
|
||||
_ = os.Remove(newFile.Name())
|
||||
return err
|
||||
}
|
||||
if err = new.Sync(); err != nil {
|
||||
if err = newFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = new.Close(); err != nil {
|
||||
if err = newFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -98,7 +98,7 @@ func extractToFile(buf []byte, filename, target string, printf func(string, ...i
|
||||
}
|
||||
|
||||
// Rename the temp file to the final location atomically.
|
||||
if err := os.Rename(new.Name(), target); err != nil {
|
||||
if err := os.Rename(newFile.Name(), target); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
package selfupdate
|
||||
|
||||
// Remove the target binary.
|
||||
func removeResticBinary(dir, target string) error {
|
||||
func removeResticBinary(_, _ string) error {
|
||||
// removed on rename on this platform
|
||||
return nil
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ func (b *JSONProgress) Error(item string, err error) error {
|
||||
|
||||
// CompleteItem is the status callback function for the archiver when a
|
||||
// file/dir has been saved successfully.
|
||||
func (b *JSONProgress) CompleteItem(messageType, item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) {
|
||||
func (b *JSONProgress) CompleteItem(messageType, item string, s archiver.ItemStats, d time.Duration) {
|
||||
if b.v < 2 {
|
||||
return
|
||||
}
|
||||
@ -161,7 +161,7 @@ func (b *JSONProgress) CompleteItem(messageType, item string, previous, current
|
||||
}
|
||||
|
||||
// ReportTotal sets the total stats up to now
|
||||
func (b *JSONProgress) ReportTotal(item string, start time.Time, s archiver.ScanStats) {
|
||||
func (b *JSONProgress) ReportTotal(start time.Time, s archiver.ScanStats) {
|
||||
if b.v >= 2 {
|
||||
b.print(verboseUpdate{
|
||||
MessageType: "verbose_status",
|
||||
|
@ -15,8 +15,8 @@ type ProgressPrinter interface {
|
||||
Update(total, processed Counter, errors uint, currentFiles map[string]struct{}, start time.Time, secs uint64)
|
||||
Error(item string, err error) error
|
||||
ScannerError(item string, err error) error
|
||||
CompleteItem(messageType string, item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration)
|
||||
ReportTotal(item string, start time.Time, s archiver.ScanStats)
|
||||
CompleteItem(messageType string, item string, s archiver.ItemStats, d time.Duration)
|
||||
ReportTotal(start time.Time, s archiver.ScanStats)
|
||||
Finish(snapshotID restic.ID, start time.Time, summary *Summary, dryRun bool)
|
||||
Reset()
|
||||
|
||||
@ -144,19 +144,19 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a
|
||||
|
||||
switch {
|
||||
case previous == nil:
|
||||
p.printer.CompleteItem("dir new", item, previous, current, s, d)
|
||||
p.printer.CompleteItem("dir new", item, s, d)
|
||||
p.mu.Lock()
|
||||
p.summary.Dirs.New++
|
||||
p.mu.Unlock()
|
||||
|
||||
case previous.Equals(*current):
|
||||
p.printer.CompleteItem("dir unchanged", item, previous, current, s, d)
|
||||
p.printer.CompleteItem("dir unchanged", item, s, d)
|
||||
p.mu.Lock()
|
||||
p.summary.Dirs.Unchanged++
|
||||
p.mu.Unlock()
|
||||
|
||||
default:
|
||||
p.printer.CompleteItem("dir modified", item, previous, current, s, d)
|
||||
p.printer.CompleteItem("dir modified", item, s, d)
|
||||
p.mu.Lock()
|
||||
p.summary.Dirs.Changed++
|
||||
p.mu.Unlock()
|
||||
@ -170,19 +170,19 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a
|
||||
|
||||
switch {
|
||||
case previous == nil:
|
||||
p.printer.CompleteItem("file new", item, previous, current, s, d)
|
||||
p.printer.CompleteItem("file new", item, s, d)
|
||||
p.mu.Lock()
|
||||
p.summary.Files.New++
|
||||
p.mu.Unlock()
|
||||
|
||||
case previous.Equals(*current):
|
||||
p.printer.CompleteItem("file unchanged", item, previous, current, s, d)
|
||||
p.printer.CompleteItem("file unchanged", item, s, d)
|
||||
p.mu.Lock()
|
||||
p.summary.Files.Unchanged++
|
||||
p.mu.Unlock()
|
||||
|
||||
default:
|
||||
p.printer.CompleteItem("file modified", item, previous, current, s, d)
|
||||
p.printer.CompleteItem("file modified", item, s, d)
|
||||
p.mu.Lock()
|
||||
p.summary.Files.Changed++
|
||||
p.mu.Unlock()
|
||||
@ -200,7 +200,7 @@ func (p *Progress) ReportTotal(item string, s archiver.ScanStats) {
|
||||
|
||||
if item == "" {
|
||||
p.scanFinished = true
|
||||
p.printer.ReportTotal(item, p.start, s)
|
||||
p.printer.ReportTotal(p.start, s)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,12 +15,12 @@ type mockPrinter struct {
|
||||
id restic.ID
|
||||
}
|
||||
|
||||
func (p *mockPrinter) Update(total, processed Counter, errors uint, currentFiles map[string]struct{}, start time.Time, secs uint64) {
|
||||
func (p *mockPrinter) Update(_, _ Counter, _ uint, _ map[string]struct{}, _ time.Time, _ uint64) {
|
||||
}
|
||||
func (p *mockPrinter) Error(item string, err error) error { return err }
|
||||
func (p *mockPrinter) ScannerError(item string, err error) error { return err }
|
||||
func (p *mockPrinter) Error(_ string, err error) error { return err }
|
||||
func (p *mockPrinter) ScannerError(_ string, err error) error { return err }
|
||||
|
||||
func (p *mockPrinter) CompleteItem(messageType string, item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) {
|
||||
func (p *mockPrinter) CompleteItem(messageType string, _ string, _ archiver.ItemStats, _ time.Duration) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
@ -32,8 +32,8 @@ func (p *mockPrinter) CompleteItem(messageType string, item string, previous, cu
|
||||
}
|
||||
}
|
||||
|
||||
func (p *mockPrinter) ReportTotal(_ string, _ time.Time, _ archiver.ScanStats) {}
|
||||
func (p *mockPrinter) Finish(id restic.ID, _ time.Time, summary *Summary, dryRun bool) {
|
||||
func (p *mockPrinter) ReportTotal(_ time.Time, _ archiver.ScanStats) {}
|
||||
func (p *mockPrinter) Finish(id restic.ID, _ time.Time, summary *Summary, _ bool) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
@ -43,8 +43,8 @@ func (p *mockPrinter) Finish(id restic.ID, _ time.Time, summary *Summary, dryRun
|
||||
|
||||
func (p *mockPrinter) Reset() {}
|
||||
|
||||
func (p *mockPrinter) P(msg string, args ...interface{}) {}
|
||||
func (p *mockPrinter) V(msg string, args ...interface{}) {}
|
||||
func (p *mockPrinter) P(_ string, _ ...interface{}) {}
|
||||
func (p *mockPrinter) V(_ string, _ ...interface{}) {}
|
||||
|
||||
func TestProgress(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
@ -72,20 +72,20 @@ func (b *TextProgress) Update(total, processed Counter, errors uint, currentFile
|
||||
|
||||
// ScannerError is the error callback function for the scanner, it prints the
|
||||
// error in verbose mode and returns nil.
|
||||
func (b *TextProgress) ScannerError(item string, err error) error {
|
||||
func (b *TextProgress) ScannerError(_ string, err error) error {
|
||||
b.V("scan: %v\n", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is the error callback function for the archiver, it prints the error and returns nil.
|
||||
func (b *TextProgress) Error(item string, err error) error {
|
||||
func (b *TextProgress) Error(_ string, err error) error {
|
||||
b.E("error: %v\n", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CompleteItem is the status callback function for the archiver when a
|
||||
// file/dir has been saved successfully.
|
||||
func (b *TextProgress) CompleteItem(messageType, item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) {
|
||||
func (b *TextProgress) CompleteItem(messageType, item string, s archiver.ItemStats, d time.Duration) {
|
||||
item = termstatus.Quote(item)
|
||||
|
||||
switch messageType {
|
||||
@ -111,7 +111,7 @@ func (b *TextProgress) CompleteItem(messageType, item string, previous, current
|
||||
}
|
||||
|
||||
// ReportTotal sets the total stats up to now
|
||||
func (b *TextProgress) ReportTotal(item string, start time.Time, s archiver.ScanStats) {
|
||||
func (b *TextProgress) ReportTotal(start time.Time, s archiver.ScanStats) {
|
||||
b.V("scan finished in %.3fs: %v files, %s",
|
||||
time.Since(start).Seconds(),
|
||||
s.Files, ui.FormatBytes(s.Bytes),
|
||||
@ -126,7 +126,7 @@ func (b *TextProgress) Reset() {
|
||||
}
|
||||
|
||||
// Finish prints the finishing messages.
|
||||
func (b *TextProgress) Finish(snapshotID restic.ID, start time.Time, summary *Summary, dryRun bool) {
|
||||
func (b *TextProgress) Finish(_ restic.ID, start time.Time, summary *Summary, dryRun bool) {
|
||||
b.P("\n")
|
||||
b.P("Files: %5d new, %5d changed, %5d unmodified\n", summary.Files.New, summary.Files.Changed, summary.Files.Unchanged)
|
||||
b.P("Dirs: %5d new, %5d changed, %5d unmodified\n", summary.Dirs.New, summary.Dirs.Changed, summary.Dirs.Unchanged)
|
||||
|
@ -59,7 +59,7 @@ func TestCounter(t *testing.T) {
|
||||
t.Log("number of calls:", ncalls)
|
||||
}
|
||||
|
||||
func TestCounterNil(t *testing.T) {
|
||||
func TestCounterNil(_ *testing.T) {
|
||||
// Shouldn't panic.
|
||||
var c *progress.Counter
|
||||
c.Add(1)
|
||||
|
@ -26,7 +26,8 @@ func TestUpdater(t *testing.T) {
|
||||
test.Assert(t, ncalls > 0, "no progress was reported")
|
||||
}
|
||||
|
||||
func TestUpdaterStopTwice(t *testing.T) {
|
||||
func TestUpdaterStopTwice(_ *testing.T) {
|
||||
// must not panic
|
||||
c := progress.NewUpdater(0, func(runtime time.Duration, final bool) {})
|
||||
c.Done()
|
||||
c.Done()
|
||||
|
@ -25,7 +25,7 @@ const mockFinishDuration = 42 * time.Second
|
||||
func (p *mockPrinter) Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) {
|
||||
p.trace = append(p.trace, printerTraceEntry{filesFinished, filesTotal, allBytesWritten, allBytesTotal, duration, false})
|
||||
}
|
||||
func (p *mockPrinter) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) {
|
||||
func (p *mockPrinter) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, _ time.Duration) {
|
||||
p.trace = append(p.trace, printerTraceEntry{filesFinished, filesTotal, allBytesWritten, allBytesTotal, mockFinishDuration, true})
|
||||
}
|
||||
|
||||
|
@ -177,10 +177,10 @@ func (t *Table) Write(w io.Writer) error {
|
||||
|
||||
// write all the lines
|
||||
for i, line := range lines {
|
||||
print := func(w io.Writer, s string) error {
|
||||
printer := func(w io.Writer, s string) error {
|
||||
return t.PrintData(w, i, s)
|
||||
}
|
||||
err := printLine(w, print, t.CellSeparator, line, columnWidths)
|
||||
err := printLine(w, printer, t.CellSeparator, line, columnWidths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -74,8 +74,8 @@ func New(wr io.Writer, errWriter io.Writer, disableStatus bool) *Terminal {
|
||||
// only use the fancy status code when we're running on a real terminal.
|
||||
t.canUpdateStatus = true
|
||||
t.fd = d.Fd()
|
||||
t.clearCurrentLine = clearCurrentLine(wr, t.fd)
|
||||
t.moveCursorUp = moveCursorUp(wr, t.fd)
|
||||
t.clearCurrentLine = clearCurrentLine(t.fd)
|
||||
t.moveCursorUp = moveCursorUp(t.fd)
|
||||
}
|
||||
|
||||
return t
|
||||
|
@ -15,7 +15,7 @@ const (
|
||||
|
||||
// posixClearCurrentLine removes all characters from the current line and resets the
|
||||
// cursor position to the first column.
|
||||
func posixClearCurrentLine(wr io.Writer, fd uintptr) {
|
||||
func posixClearCurrentLine(wr io.Writer, _ uintptr) {
|
||||
// clear current line
|
||||
_, err := wr.Write([]byte(posixControlMoveCursorHome + posixControlClearLine))
|
||||
if err != nil {
|
||||
@ -25,7 +25,7 @@ func posixClearCurrentLine(wr io.Writer, fd uintptr) {
|
||||
}
|
||||
|
||||
// posixMoveCursorUp moves the cursor to the line n lines above the current one.
|
||||
func posixMoveCursorUp(wr io.Writer, fd uintptr, n int) {
|
||||
func posixMoveCursorUp(wr io.Writer, _ uintptr, n int) {
|
||||
data := []byte(posixControlMoveCursorHome)
|
||||
data = append(data, bytes.Repeat([]byte(posixControlMoveCursorUp), n)...)
|
||||
_, err := wr.Write(data)
|
||||
|
@ -12,12 +12,12 @@ import (
|
||||
|
||||
// clearCurrentLine removes all characters from the current line and resets the
|
||||
// cursor position to the first column.
|
||||
func clearCurrentLine(wr io.Writer, fd uintptr) func(io.Writer, uintptr) {
|
||||
func clearCurrentLine(_ uintptr) func(io.Writer, uintptr) {
|
||||
return posixClearCurrentLine
|
||||
}
|
||||
|
||||
// moveCursorUp moves the cursor to the line n lines above the current one.
|
||||
func moveCursorUp(wr io.Writer, fd uintptr) func(io.Writer, uintptr, int) {
|
||||
func moveCursorUp(_ uintptr) func(io.Writer, uintptr, int) {
|
||||
return posixMoveCursorUp
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
|
||||
// clearCurrentLine removes all characters from the current line and resets the
|
||||
// cursor position to the first column.
|
||||
func clearCurrentLine(wr io.Writer, fd uintptr) func(io.Writer, uintptr) {
|
||||
func clearCurrentLine(fd uintptr) func(io.Writer, uintptr) {
|
||||
// easy case, the terminal is cmd or psh, without redirection
|
||||
if isWindowsTerminal(fd) {
|
||||
return windowsClearCurrentLine
|
||||
@ -26,7 +26,7 @@ func clearCurrentLine(wr io.Writer, fd uintptr) func(io.Writer, uintptr) {
|
||||
}
|
||||
|
||||
// moveCursorUp moves the cursor to the line n lines above the current one.
|
||||
func moveCursorUp(wr io.Writer, fd uintptr) func(io.Writer, uintptr, int) {
|
||||
func moveCursorUp(fd uintptr) func(io.Writer, uintptr, int) {
|
||||
// easy case, the terminal is cmd or psh, without redirection
|
||||
if isWindowsTerminal(fd) {
|
||||
return windowsMoveCursorUp
|
||||
@ -45,7 +45,7 @@ var (
|
||||
|
||||
// windowsClearCurrentLine removes all characters from the current line and
|
||||
// resets the cursor position to the first column.
|
||||
func windowsClearCurrentLine(wr io.Writer, fd uintptr) {
|
||||
func windowsClearCurrentLine(_ io.Writer, fd uintptr) {
|
||||
var info windows.ConsoleScreenBufferInfo
|
||||
windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info)
|
||||
|
||||
@ -61,7 +61,7 @@ func windowsClearCurrentLine(wr io.Writer, fd uintptr) {
|
||||
}
|
||||
|
||||
// windowsMoveCursorUp moves the cursor to the line n lines above the current one.
|
||||
func windowsMoveCursorUp(wr io.Writer, fd uintptr, n int) {
|
||||
func windowsMoveCursorUp(_ io.Writer, fd uintptr, n int) {
|
||||
var info windows.ConsoleScreenBufferInfo
|
||||
windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info)
|
||||
|
||||
|
@ -15,7 +15,7 @@ type WritableTreeMap struct {
|
||||
TreeMap
|
||||
}
|
||||
|
||||
func (t WritableTreeMap) SaveBlob(ctx context.Context, tpe restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) {
|
||||
func (t WritableTreeMap) SaveBlob(_ context.Context, tpe restic.BlobType, buf []byte, id restic.ID, _ bool) (newID restic.ID, known bool, size int, err error) {
|
||||
if tpe != restic.TreeBlob {
|
||||
return restic.ID{}, false, 0, errors.New("can only save trees")
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ func buildTreeMap(tree TestTree, m TreeMap) restic.ID {
|
||||
// TreeMap returns the trees from the map on LoadTree.
|
||||
type TreeMap map[restic.ID][]byte
|
||||
|
||||
func (t TreeMap) LoadBlob(ctx context.Context, tpe restic.BlobType, id restic.ID, buf []byte) ([]byte, error) {
|
||||
func (t TreeMap) LoadBlob(_ context.Context, tpe restic.BlobType, id restic.ID, _ []byte) ([]byte, error) {
|
||||
if tpe != restic.TreeBlob {
|
||||
return nil, errors.New("can only load trees")
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user