2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-17 18:45:15 +00:00

Remove unused bits and pieces

Reported by https://github.com/dominikh/go-unused
This commit is contained in:
Alexander Neumann 2016-09-21 20:22:32 +02:00
parent 0873821b98
commit 1dfd3b8aa3
11 changed files with 0 additions and 71 deletions

View File

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"reflect" "reflect"
"restic" "restic"
@ -471,14 +470,6 @@ func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) {
test.OK(t, err) test.OK(t, err)
} }
func read(t testing.TB, rd io.Reader, expectedData []byte) {
buf, err := ioutil.ReadAll(rd)
test.OK(t, err)
if expectedData != nil {
test.Equals(t, expectedData, buf)
}
}
// TestBackend tests all functions of the backend. // TestBackend tests all functions of the backend.
func TestBackend(t testing.TB) { func TestBackend(t testing.TB) {
b := open(t) b := open(t)

View File

@ -16,17 +16,6 @@ import (
var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz") var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz")
func list(repo restic.Repository, t restic.FileType) (IDs []string) {
done := make(chan struct{})
defer close(done)
for id := range repo.List(t, done) {
IDs = append(IDs, id.String())
}
return IDs
}
func collectErrors(f func(chan<- error, <-chan struct{})) (errs []error) { func collectErrors(f func(chan<- error, <-chan struct{})) (errs []error) {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)

View File

@ -1,19 +0,0 @@
package crypto
import "sync"
const defaultBufSize = 32 * 1024 // 32KiB
var bufPool = sync.Pool{
New: func() interface{} {
return make([]byte, defaultBufSize)
},
}
func getBuffer() []byte {
return bufPool.Get().([]byte)
}
func freeBuffer(buf []byte) {
bufPool.Put(buf)
}

View File

@ -77,8 +77,6 @@ func New(repo restic.Repository, p *restic.Progress) (*Index, error) {
return idx, nil return idx, nil
} }
const loadIndexParallelism = 20
type packJSON struct { type packJSON struct {
ID restic.ID `json:"id"` ID restic.ID `json:"id"`
Blobs []blobJSON `json:"blobs"` Blobs []blobJSON `json:"blobs"`

View File

@ -10,7 +10,6 @@ import (
var ( var (
snapshotTime = time.Unix(1470492820, 207401672) snapshotTime = time.Unix(1470492820, 207401672)
snapshots = 3
depth = 3 depth = 3
) )

View File

@ -43,7 +43,6 @@ type Node struct {
tree *Tree tree *Tree
Path string `json:"-"` Path string `json:"-"`
err error
} }
func (node Node) String() string { func (node Node) String() string {

View File

@ -74,12 +74,6 @@ func readDirNames(dirname string) ([]string, error) {
return names, nil return names, nil
} }
func isDir(fi os.FileInfo) bool {
return fi.IsDir()
}
var errCancelled = errors.New("walk cancelled")
// SelectFunc returns true for all items that should be included (files and // SelectFunc returns true for all items that should be included (files and
// dirs). If false is returned, files are ignored and dirs are not even walked. // dirs). If false is returned, files are ignored and dirs are not even walked.
type SelectFunc func(item string, fi os.FileInfo) bool type SelectFunc func(item string, fi os.FileInfo) bool

View File

@ -14,10 +14,6 @@ import (
. "restic/test" . "restic/test"
) )
func isFile(fi os.FileInfo) bool {
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
}
type stats struct { type stats struct {
dirs, files int dirs, files int
} }

View File

@ -346,8 +346,6 @@ type jsonIndex struct {
Packs []*packJSON `json:"packs"` Packs []*packJSON `json:"packs"`
} }
type jsonOldIndex []*packJSON
// Encode writes the JSON serialization of the index to the writer w. // Encode writes the JSON serialization of the index to the writer w.
func (idx *Index) Encode(w io.Writer) error { func (idx *Index) Encode(w io.Writer) error {
debug.Log("Index.Encode", "encoding index") debug.Log("Index.Encode", "encoding index")

View File

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"os" "os"
"restic" "restic"
@ -156,16 +155,6 @@ func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by
return 0, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs)) return 0, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs))
} }
// closeOrErr calls cl.Close() and sets err to the returned error value if
// itself is not yet set.
func closeOrErr(cl io.Closer, err *error) {
e := cl.Close()
if *err != nil {
return
}
*err = e
}
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on // LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
// the item. // the item.
func (r *Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) (err error) { func (r *Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) (err error) {

View File

@ -234,11 +234,6 @@ func TestRepositoryIncrementalIndex(t *testing.T) {
// save final index // save final index
OK(t, repo.SaveIndex()) OK(t, repo.SaveIndex())
type packEntry struct {
id restic.ID
indexes []*repository.Index
}
packEntries := make(map[restic.ID]map[restic.ID]struct{}) packEntries := make(map[restic.ID]map[restic.ID]struct{})
for id := range repo.List(restic.IndexFile, nil) { for id := range repo.List(restic.IndexFile, nil) {