2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-23 11:28:54 +00:00

Remove unused bits and pieces

Reported by https://github.com/dominikh/go-unused
This commit is contained in:
Alexander Neumann 2016-09-21 20:22:32 +02:00
parent 0873821b98
commit 1dfd3b8aa3
11 changed files with 0 additions and 71 deletions

View File

@ -4,7 +4,6 @@ import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math/rand"
"reflect"
"restic"
@ -471,14 +470,6 @@ func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) {
test.OK(t, err)
}
func read(t testing.TB, rd io.Reader, expectedData []byte) {
buf, err := ioutil.ReadAll(rd)
test.OK(t, err)
if expectedData != nil {
test.Equals(t, expectedData, buf)
}
}
// TestBackend tests all functions of the backend.
func TestBackend(t testing.TB) {
b := open(t)

View File

@ -16,17 +16,6 @@ import (
var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz")
func list(repo restic.Repository, t restic.FileType) (IDs []string) {
done := make(chan struct{})
defer close(done)
for id := range repo.List(t, done) {
IDs = append(IDs, id.String())
}
return IDs
}
func collectErrors(f func(chan<- error, <-chan struct{})) (errs []error) {
done := make(chan struct{})
defer close(done)

View File

@ -1,19 +0,0 @@
package crypto
import "sync"
const defaultBufSize = 32 * 1024 // 32KiB
var bufPool = sync.Pool{
New: func() interface{} {
return make([]byte, defaultBufSize)
},
}
func getBuffer() []byte {
return bufPool.Get().([]byte)
}
func freeBuffer(buf []byte) {
bufPool.Put(buf)
}

View File

@ -77,8 +77,6 @@ func New(repo restic.Repository, p *restic.Progress) (*Index, error) {
return idx, nil
}
const loadIndexParallelism = 20
type packJSON struct {
ID restic.ID `json:"id"`
Blobs []blobJSON `json:"blobs"`

View File

@ -10,7 +10,6 @@ import (
var (
snapshotTime = time.Unix(1470492820, 207401672)
snapshots = 3
depth = 3
)

View File

@ -43,7 +43,6 @@ type Node struct {
tree *Tree
Path string `json:"-"`
err error
}
func (node Node) String() string {

View File

@ -74,12 +74,6 @@ func readDirNames(dirname string) ([]string, error) {
return names, nil
}
func isDir(fi os.FileInfo) bool {
return fi.IsDir()
}
var errCancelled = errors.New("walk cancelled")
// SelectFunc returns true for all items that should be included (files and
// dirs). If false is returned, files are ignored and dirs are not even walked.
type SelectFunc func(item string, fi os.FileInfo) bool

View File

@ -14,10 +14,6 @@ import (
. "restic/test"
)
func isFile(fi os.FileInfo) bool {
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
}
type stats struct {
dirs, files int
}

View File

@ -346,8 +346,6 @@ type jsonIndex struct {
Packs []*packJSON `json:"packs"`
}
type jsonOldIndex []*packJSON
// Encode writes the JSON serialization of the index to the writer w.
func (idx *Index) Encode(w io.Writer) error {
debug.Log("Index.Encode", "encoding index")

View File

@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"restic"
@ -156,16 +155,6 @@ func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by
return 0, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs))
}
// closeOrErr calls cl.Close() and sets err to the returned error value if
// itself is not yet set.
func closeOrErr(cl io.Closer, err *error) {
e := cl.Close()
if *err != nil {
return
}
*err = e
}
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
// the item.
func (r *Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) (err error) {

View File

@ -234,11 +234,6 @@ func TestRepositoryIncrementalIndex(t *testing.T) {
// save final index
OK(t, repo.SaveIndex())
type packEntry struct {
id restic.ID
indexes []*repository.Index
}
packEntries := make(map[restic.ID]map[restic.ID]struct{})
for id := range repo.List(restic.IndexFile, nil) {