mirror of
https://github.com/octoleo/restic.git
synced 2024-11-23 05:12:10 +00:00
Merge pull request #630 from restic/remove-unused
Remove unused bits and pieces
This commit is contained in:
commit
eeec0d63c2
@ -95,17 +95,6 @@ func formatDuration(d time.Duration) string {
|
||||
return formatSeconds(sec)
|
||||
}
|
||||
|
||||
func printTree2(indent int, t *restic.Tree) {
|
||||
for _, node := range t.Nodes {
|
||||
if node.Tree() != nil {
|
||||
fmt.Printf("%s%s/\n", strings.Repeat(" ", indent), node.Name)
|
||||
printTree2(indent+1, node.Tree())
|
||||
} else {
|
||||
fmt.Printf("%s%s\n", strings.Repeat(" ", indent), node.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd CmdBackup) Usage() string {
|
||||
return "DIR/FILE [DIR/FILE] [...]"
|
||||
}
|
||||
|
@ -178,15 +178,6 @@ func configureRestic(t testing.TB, cache, repo string) GlobalOptions {
|
||||
}
|
||||
}
|
||||
|
||||
func cleanupTempdir(t testing.TB, tempdir string) {
|
||||
if !TestCleanupTempDirs {
|
||||
t.Logf("leaving temporary directory %v used for test", tempdir)
|
||||
return
|
||||
}
|
||||
|
||||
RemoveAll(t, tempdir)
|
||||
}
|
||||
|
||||
// withTestEnvironment creates a test environment and calls f with it. After f has
|
||||
// returned, the temporary directory is removed.
|
||||
func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions)) {
|
||||
@ -219,13 +210,3 @@ func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions))
|
||||
|
||||
RemoveAll(t, tempdir)
|
||||
}
|
||||
|
||||
// removeFile resets the read-only flag and then deletes the file.
|
||||
func removeFile(fn string) error {
|
||||
err := os.Chmod(fn, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Remove(fn)
|
||||
}
|
||||
|
@ -809,27 +809,6 @@ func TestRebuildIndexAlwaysFull(t *testing.T) {
|
||||
TestRebuildIndex(t)
|
||||
}
|
||||
|
||||
var optimizeTests = []struct {
|
||||
testFilename string
|
||||
snapshots restic.IDSet
|
||||
}{
|
||||
{
|
||||
filepath.Join("..", "..", "restic", "checker", "testdata", "checker-test-repo.tar.gz"),
|
||||
restic.NewIDSet(restic.TestParseID("a13c11e582b77a693dd75ab4e3a3ba96538a056594a4b9076e4cacebe6e06d43")),
|
||||
},
|
||||
{
|
||||
filepath.Join("testdata", "old-index-repo.tar.gz"),
|
||||
nil,
|
||||
},
|
||||
{
|
||||
filepath.Join("testdata", "old-index-repo.tar.gz"),
|
||||
restic.NewIDSet(
|
||||
restic.TestParseID("f7d83db709977178c9d1a09e4009355e534cde1a135b8186b8b118a3fc4fcd41"),
|
||||
restic.TestParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
func TestCheckRestoreNoLock(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
datafile := filepath.Join("testdata", "small-repo.tar.gz")
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"restic"
|
||||
@ -471,14 +470,6 @@ func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) {
|
||||
test.OK(t, err)
|
||||
}
|
||||
|
||||
func read(t testing.TB, rd io.Reader, expectedData []byte) {
|
||||
buf, err := ioutil.ReadAll(rd)
|
||||
test.OK(t, err)
|
||||
if expectedData != nil {
|
||||
test.Equals(t, expectedData, buf)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBackend tests all functions of the backend.
|
||||
func TestBackend(t testing.TB) {
|
||||
b := open(t)
|
||||
|
@ -46,6 +46,8 @@ func (t BlobType) String() string {
|
||||
return "data"
|
||||
case TreeBlob:
|
||||
return "tree"
|
||||
case InvalidBlob:
|
||||
return "invalid"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("<BlobType %d>", t)
|
||||
|
@ -649,11 +649,6 @@ func (c *Checker) UnusedBlobs() (blobs restic.IDs) {
|
||||
return blobs
|
||||
}
|
||||
|
||||
// OrphanedPacks returns a slice of unused packs (only available after Packs() was run).
|
||||
func (c *Checker) OrphanedPacks() restic.IDs {
|
||||
return c.orphanedPacks
|
||||
}
|
||||
|
||||
// CountPacks returns the number of packs in the repository.
|
||||
func (c *Checker) CountPacks() uint64 {
|
||||
return uint64(len(c.packs))
|
||||
|
@ -16,17 +16,6 @@ import (
|
||||
|
||||
var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz")
|
||||
|
||||
func list(repo restic.Repository, t restic.FileType) (IDs []string) {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
for id := range repo.List(t, done) {
|
||||
IDs = append(IDs, id.String())
|
||||
}
|
||||
|
||||
return IDs
|
||||
}
|
||||
|
||||
func collectErrors(f func(chan<- error, <-chan struct{})) (errs []error) {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
@ -21,11 +21,6 @@ type Config struct {
|
||||
// is newly created with Init().
|
||||
const RepoVersion = 1
|
||||
|
||||
// JSONUnpackedSaver saves unpacked JSON.
|
||||
type JSONUnpackedSaver interface {
|
||||
SaveJSONUnpacked(FileType, interface{}) (ID, error)
|
||||
}
|
||||
|
||||
// JSONUnpackedLoader loads unpacked JSON.
|
||||
type JSONUnpackedLoader interface {
|
||||
LoadJSONUnpacked(FileType, ID, interface{}) error
|
||||
|
@ -1,19 +0,0 @@
|
||||
package crypto
|
||||
|
||||
import "sync"
|
||||
|
||||
const defaultBufSize = 32 * 1024 // 32KiB
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, defaultBufSize)
|
||||
},
|
||||
}
|
||||
|
||||
func getBuffer() []byte {
|
||||
return bufPool.Get().([]byte)
|
||||
}
|
||||
|
||||
func freeBuffer(buf []byte) {
|
||||
bufPool.Put(buf)
|
||||
}
|
@ -26,10 +26,6 @@ const (
|
||||
var (
|
||||
// ErrUnauthenticated is returned when ciphertext verification has failed.
|
||||
ErrUnauthenticated = errors.New("ciphertext verification failed")
|
||||
|
||||
// ErrBufferTooSmall is returned when the destination slice is too small
|
||||
// for the ciphertext.
|
||||
ErrBufferTooSmall = errors.New("destination buffer too small")
|
||||
)
|
||||
|
||||
// Key holds encryption and message authentication keys for a repository. It is stored
|
||||
|
@ -1,7 +1,6 @@
|
||||
package restic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
@ -95,11 +94,6 @@ func (id ID) EqualString(other string) (bool, error) {
|
||||
return id == id2, nil
|
||||
}
|
||||
|
||||
// Compare compares this ID to another one, returning -1, 0, or 1.
|
||||
func (id ID) Compare(other ID) int {
|
||||
return bytes.Compare(other[:], id[:])
|
||||
}
|
||||
|
||||
// MarshalJSON returns the JSON encoding of id.
|
||||
func (id ID) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(id.String())
|
||||
|
@ -77,8 +77,6 @@ func New(repo restic.Repository, p *restic.Progress) (*Index, error) {
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
const loadIndexParallelism = 20
|
||||
|
||||
type packJSON struct {
|
||||
ID restic.ID `json:"id"`
|
||||
Blobs []blobJSON `json:"blobs"`
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
|
||||
var (
|
||||
snapshotTime = time.Unix(1470492820, 207401672)
|
||||
snapshots = 3
|
||||
depth = 3
|
||||
)
|
||||
|
||||
|
@ -40,10 +40,7 @@ type Node struct {
|
||||
|
||||
Error string `json:"error,omitempty"`
|
||||
|
||||
tree *Tree
|
||||
|
||||
Path string `json:"-"`
|
||||
err error
|
||||
}
|
||||
|
||||
func (node Node) String() string {
|
||||
@ -59,11 +56,6 @@ func (node Node) String() string {
|
||||
return fmt.Sprintf("<Node(%s) %s>", node.Type, node.Name)
|
||||
}
|
||||
|
||||
// Tree returns this node's tree object.
|
||||
func (node Node) Tree() *Tree {
|
||||
return node.tree
|
||||
}
|
||||
|
||||
// NodeFromFileInfo returns a new node from the given path and FileInfo.
|
||||
func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) {
|
||||
mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky
|
||||
|
@ -74,12 +74,6 @@ func readDirNames(dirname string) ([]string, error) {
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func isDir(fi os.FileInfo) bool {
|
||||
return fi.IsDir()
|
||||
}
|
||||
|
||||
var errCancelled = errors.New("walk cancelled")
|
||||
|
||||
// SelectFunc returns true for all items that should be included (files and
|
||||
// dirs). If false is returned, files are ignored and dirs are not even walked.
|
||||
type SelectFunc func(item string, fi os.FileInfo) bool
|
||||
|
@ -14,10 +14,6 @@ import (
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
func isFile(fi os.FileInfo) bool {
|
||||
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
|
||||
}
|
||||
|
||||
type stats struct {
|
||||
dirs, files int
|
||||
}
|
||||
|
@ -275,15 +275,6 @@ func (idx *Index) Count(t restic.BlobType) (n uint) {
|
||||
return
|
||||
}
|
||||
|
||||
// Length returns the number of entries in the Index.
|
||||
func (idx *Index) Length() uint {
|
||||
debug.Log("Index.Count", "counting blobs")
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
return uint(len(idx.pack))
|
||||
}
|
||||
|
||||
type packJSON struct {
|
||||
ID restic.ID `json:"id"`
|
||||
Blobs []blobJSON `json:"blobs"`
|
||||
@ -346,8 +337,6 @@ type jsonIndex struct {
|
||||
Packs []*packJSON `json:"packs"`
|
||||
}
|
||||
|
||||
type jsonOldIndex []*packJSON
|
||||
|
||||
// Encode writes the JSON serialization of the index to the writer w.
|
||||
func (idx *Index) Encode(w io.Writer) error {
|
||||
debug.Log("Index.Encode", "encoding index")
|
||||
@ -552,28 +541,3 @@ func LoadIndexWithDecoder(repo restic.Repository, id restic.ID, fn func(io.Reade
|
||||
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
// ConvertIndex loads the given index from the repo and converts them to the new
|
||||
// format (if necessary). When the conversion is succcessful, the old index
|
||||
// is removed. Returned is either the old id (if no conversion was needed) or
|
||||
// the new id.
|
||||
func ConvertIndex(repo *Repository, id restic.ID) (restic.ID, error) {
|
||||
debug.Log("ConvertIndex", "checking index %v", id.Str())
|
||||
|
||||
idx, err := LoadIndexWithDecoder(repo, id, DecodeOldIndex)
|
||||
if err != nil {
|
||||
debug.Log("ConvertIndex", "LoadIndexWithDecoder(%v) returned error: %v", id.Str(), err)
|
||||
return id, err
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
idx.supersedes = restic.IDs{id}
|
||||
|
||||
err = idx.Encode(buf)
|
||||
if err != nil {
|
||||
debug.Log("ConvertIndex", "oldIdx.Encode() returned error: %v", err)
|
||||
return id, err
|
||||
}
|
||||
|
||||
return repo.SaveUnpacked(restic.IndexFile, buf.Bytes())
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"restic"
|
||||
|
||||
@ -43,13 +42,6 @@ func (r *Repository) Config() restic.Config {
|
||||
return r.cfg
|
||||
}
|
||||
|
||||
// Find loads the list of all blobs of type t and searches for names which start
|
||||
// with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
|
||||
// more than one is found, nil and ErrMultipleIDMatches is returned.
|
||||
func (r *Repository) Find(t restic.FileType, prefix string) (string, error) {
|
||||
return restic.Find(r.be, t, prefix)
|
||||
}
|
||||
|
||||
// PrefixLength returns the number of bytes required so that all prefixes of
|
||||
// all IDs of type t are unique.
|
||||
func (r *Repository) PrefixLength(t restic.FileType) (int, error) {
|
||||
@ -156,16 +148,6 @@ func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by
|
||||
return 0, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs))
|
||||
}
|
||||
|
||||
// closeOrErr calls cl.Close() and sets err to the returned error value if
|
||||
// itself is not yet set.
|
||||
func closeOrErr(cl io.Closer, err *error) {
|
||||
e := cl.Close()
|
||||
if *err != nil {
|
||||
return
|
||||
}
|
||||
*err = e
|
||||
}
|
||||
|
||||
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
|
||||
// the item.
|
||||
func (r *Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) (err error) {
|
||||
|
@ -234,11 +234,6 @@ func TestRepositoryIncrementalIndex(t *testing.T) {
|
||||
// save final index
|
||||
OK(t, repo.SaveIndex())
|
||||
|
||||
type packEntry struct {
|
||||
id restic.ID
|
||||
indexes []*repository.Index
|
||||
}
|
||||
|
||||
packEntries := make(map[restic.ID]map[restic.ID]struct{})
|
||||
|
||||
for id := range repo.List(restic.IndexFile, nil) {
|
||||
|
@ -3,7 +3,6 @@ package test
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -90,56 +89,6 @@ func Random(seed, count int) []byte {
|
||||
return p
|
||||
}
|
||||
|
||||
type rndReader struct {
|
||||
src *mrand.Rand
|
||||
}
|
||||
|
||||
func (r *rndReader) Read(p []byte) (int, error) {
|
||||
for i := 0; i < len(p); i += 8 {
|
||||
val := r.src.Int63()
|
||||
var data = []byte{
|
||||
byte((val >> 0) & 0xff),
|
||||
byte((val >> 8) & 0xff),
|
||||
byte((val >> 16) & 0xff),
|
||||
byte((val >> 24) & 0xff),
|
||||
byte((val >> 32) & 0xff),
|
||||
byte((val >> 40) & 0xff),
|
||||
byte((val >> 48) & 0xff),
|
||||
byte((val >> 56) & 0xff),
|
||||
}
|
||||
|
||||
for j := range data {
|
||||
cur := i + j
|
||||
if len(p) >= cur {
|
||||
break
|
||||
}
|
||||
p[cur] = data[j]
|
||||
}
|
||||
}
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// RandomReader returns a reader that returns deterministic pseudo-random data
|
||||
// derived from the seed.
|
||||
func RandomReader(seed int) io.Reader {
|
||||
return &rndReader{src: mrand.New(mrand.NewSource(int64(seed)))}
|
||||
}
|
||||
|
||||
// RandomLimitReader returns a reader that returns size bytes of deterministic
|
||||
// pseudo-random data derived from the seed.
|
||||
func RandomLimitReader(seed, size int) io.Reader {
|
||||
return io.LimitReader(RandomReader(seed), int64(size))
|
||||
}
|
||||
|
||||
// GenRandom returns a []byte filled with up to 1000 random bytes.
|
||||
func GenRandom(t testing.TB) []byte {
|
||||
buf := make([]byte, mrand.Intn(1000))
|
||||
_, err := io.ReadFull(rand.Reader, buf)
|
||||
OK(t, err)
|
||||
return buf
|
||||
}
|
||||
|
||||
// SetupTarTestFixture extracts the tarFile to outputDir.
|
||||
func SetupTarTestFixture(t testing.TB, outputDir, tarFile string) {
|
||||
input, err := os.Open(tarFile)
|
||||
|
@ -71,12 +71,6 @@ func (t Tree) binarySearch(name string) (int, *Node, error) {
|
||||
return pos, nil, errors.New("named node not found")
|
||||
}
|
||||
|
||||
// Find returns a node with the given name.
|
||||
func (t Tree) Find(name string) (*Node, error) {
|
||||
_, node, err := t.binarySearch(name)
|
||||
return node, err
|
||||
}
|
||||
|
||||
// Subtrees returns a slice of all subtree IDs of the tree.
|
||||
func (t Tree) Subtrees() (trees IDs) {
|
||||
for _, node := range t.Nodes {
|
||||
|
@ -94,11 +94,6 @@ func (p *Pool) runWorker(numWorker int) {
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel signals termination to all worker goroutines.
|
||||
func (p *Pool) Cancel() {
|
||||
close(p.done)
|
||||
}
|
||||
|
||||
// Wait waits for all worker goroutines to terminate, afterwards the output
|
||||
// channel is closed.
|
||||
func (p *Pool) Wait() {
|
||||
|
Loading…
Reference in New Issue
Block a user