2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-22 12:55:18 +00:00

Wrap errors #3

This commit is contained in:
Alexander Neumann 2016-08-29 22:16:58 +02:00
parent 4a0f77650b
commit 9cf63c99cf
11 changed files with 42 additions and 35 deletions

View File

@ -168,7 +168,7 @@ func (m *MACKey) UnmarshalJSON(data []byte) error {
j := jsonMACKey{}
err := json.Unmarshal(data, &j)
if err != nil {
return err
return errors.Wrap(err, "Unmarshal")
}
copy(m.K[:], j.K)
copy(m.R[:], j.R)
@ -206,7 +206,7 @@ func (k *EncryptionKey) UnmarshalJSON(data []byte) error {
d := make([]byte, aesKeySize)
err := json.Unmarshal(data, &d)
if err != nil {
return err
return errors.Wrap(err, "Unmarshal")
}
copy(k[:], d)

View File

@ -37,7 +37,7 @@ func Calibrate(timeout time.Duration, memory int) (KDFParams, error) {
params, err := sscrypt.Calibrate(timeout, memory, defaultParams)
if err != nil {
return DefaultKDFParams, err
return DefaultKDFParams, errors.Wrap(err, "scrypt.Calibrate")
}
return KDFParams{
@ -64,7 +64,7 @@ func KDF(p KDFParams, salt []byte, password string) (*Key, error) {
}
if err := params.Check(); err != nil {
return nil, err
return nil, errors.Wrap(err, "Check")
}
derKeys := &Key{}
@ -72,7 +72,7 @@ func KDF(p KDFParams, salt []byte, password string) (*Key, error) {
keybytes := macKeySize + aesKeySize
scryptKeys, err := scrypt.Key([]byte(password), salt, p.N, p.R, p.P, keybytes)
if err != nil {
return nil, errors.Errorf("error deriving keys from password: %v", err)
return nil, errors.Wrap(err, "scrypt.Key")
}
if len(scryptKeys) != keybytes {

View File

@ -85,7 +85,7 @@ func match(patterns, strs []string) (matched bool, err error) {
for i := len(patterns) - 1; i >= 0; i-- {
ok, err := filepath.Match(patterns[i], strs[offset+i])
if err != nil {
return false, err
return false, errors.Wrap(err, "Match")
}
if !ok {

View File

@ -107,7 +107,7 @@ func (p *Packer) Add(t BlobType, id backend.ID, data []byte) (int, error) {
p.bytes += uint(n)
p.blobs = append(p.blobs, c)
return n, err
return n, errors.Wrap(err, "Write")
}
var entrySize = uint(binary.Size(BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize)
@ -142,7 +142,7 @@ func (p *Packer) Finalize() (uint, error) {
// append the header
n, err := p.wr.Write(encryptedHeader)
if err != nil {
return 0, err
return 0, errors.Wrap(err, "Write")
}
hdrBytes := bytesHeader + crypto.Extension
@ -155,7 +155,7 @@ func (p *Packer) Finalize() (uint, error) {
// write length
err = binary.Write(p.wr, binary.LittleEndian, uint32(uint(len(p.blobs))*entrySize+crypto.Extension))
if err != nil {
return 0, err
return 0, errors.Wrap(err, "binary.Write")
}
bytesWritten += uint(binary.Size(uint32(0)))
@ -187,7 +187,7 @@ func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) {
err := binary.Write(wr, binary.LittleEndian, entry)
if err != nil {
return bytesWritten, err
return bytesWritten, errors.Wrap(err, "binary.Write")
}
bytesWritten += entrySize
@ -237,7 +237,7 @@ func readHeaderLength(rd io.ReaderAt, size int64) (uint32, error) {
buf := make([]byte, binary.Size(uint32(0)))
n, err := rd.ReadAt(buf, off)
if err != nil {
return 0, err
return 0, errors.Wrap(err, "ReadAt")
}
if n != len(buf) {
@ -268,7 +268,7 @@ func readHeader(rd io.ReaderAt, size int64) ([]byte, error) {
buf := make([]byte, int(hl))
n, err := rd.ReadAt(buf, size-int64(hl)-int64(binary.Size(hl)))
if err != nil {
return nil, err
return nil, errors.Wrap(err, "ReadAt")
}
if n != len(buf) {
@ -301,7 +301,7 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []Blob, err error)
}
if err != nil {
return nil, err
return nil, errors.Wrap(err, "binary.Read")
}
entry := Blob{

View File

@ -63,12 +63,12 @@ func (e Dir) Result() chan<- Result { return e.result }
func readDirNames(dirname string) ([]string, error) {
f, err := fs.Open(dirname)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "Open")
}
names, err := f.Readdirnames(-1)
f.Close()
if err != nil {
return nil, err
return nil, errors.Wrap(err, "Readdirnames")
}
sort.Strings(names)
return names, nil
@ -94,6 +94,7 @@ func walk(basedir, dir string, selectFunc SelectFunc, done <-chan struct{}, jobs
info, err := fs.Lstat(dir)
if err != nil {
err = errors.Wrap(err, "Lstat")
debug.Log("pipe.walk", "error for %v: %v, res %p", dir, err, res)
select {
case jobs <- Dir{basedir: basedir, path: relpath, info: info, error: err, result: res}:
@ -147,6 +148,7 @@ func walk(basedir, dir string, selectFunc SelectFunc, done <-chan struct{}, jobs
entries = append(entries, ch)
if statErr != nil {
statErr = errors.Wrap(statErr, "Lstat")
debug.Log("pipe.walk", "sending file job for %v, err %v, res %p", subpath, err, res)
select {
case jobs <- Entry{info: fi, error: statErr, basedir: basedir, path: filepath.Join(relpath, name), result: ch}:

View File

@ -49,13 +49,13 @@ func CreateConfig() (Config, error) {
cfg.ChunkerPolynomial, err = chunker.RandomPolynomial()
if err != nil {
return Config{}, err
return Config{}, errors.Wrap(err, "chunker.RandomPolynomial")
}
newID := make([]byte, repositoryIDSize)
_, err = io.ReadFull(rand.Reader, newID)
if err != nil {
return Config{}, err
return Config{}, errors.Wrap(err, "io.ReadFull")
}
cfg.ID = hex.EncodeToString(newID)

View File

@ -456,7 +456,7 @@ func (idx *Index) Dump(w io.Writer) error {
_, err = w.Write(append(buf, '\n'))
if err != nil {
return err
return errors.Wrap(err, "Write")
}
debug.Log("Index.Dump", "done")
@ -492,7 +492,7 @@ func DecodeIndex(rd io.Reader) (idx *Index, err error) {
err = ErrOldIndexFormat
}
return nil, err
return nil, errors.Wrap(err, "Decode")
}
idx = NewIndex()
@ -511,7 +511,7 @@ func DecodeIndex(rd io.Reader) (idx *Index, err error) {
idx.final = true
debug.Log("Index.DecodeIndex", "done")
return idx, err
return idx, nil
}
// DecodeOldIndex loads and unserializes an index in the old format from rd.
@ -523,7 +523,7 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
err = dec.Decode(&list)
if err != nil {
debug.Log("Index.DecodeOldIndex", "Error %#v", err)
return nil, err
return nil, errors.Wrap(err, "Decode")
}
idx = NewIndex()
@ -541,7 +541,7 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
idx.final = true
debug.Log("Index.DecodeOldIndex", "done")
return idx, err
return idx, nil
}
// LoadIndexWithDecoder loads the index and decodes it with fn.

View File

@ -80,7 +80,7 @@ func OpenKey(s *Repository, name string, password string) (*Key, error) {
}
k.user, err = crypto.KDF(params, k.Salt, password)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "crypto.KDF")
}
// decrypt master keys
@ -94,7 +94,7 @@ func OpenKey(s *Repository, name string, password string) (*Key, error) {
err = json.Unmarshal(buf, k.master)
if err != nil {
debug.Log("OpenKey", "Unmarshal() returned error %v", err)
return nil, err
return nil, errors.Wrap(err, "Unmarshal")
}
k.name = name
@ -151,7 +151,7 @@ func LoadKey(s *Repository, name string) (k *Key, err error) {
k = &Key{}
err = json.Unmarshal(data, k)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "Unmarshal")
}
return k, nil
@ -163,7 +163,7 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error)
if KDFParams == nil {
p, err := crypto.Calibrate(KDFTimeout, KDFMemory)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "Calibrate")
}
KDFParams = &p
@ -212,7 +212,7 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error)
// encrypt master keys (as json) with user key
buf, err := json.Marshal(newkey.master)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "Marshal")
}
newkey.Data, err = crypto.Encrypt(newkey.user, nil, buf)
@ -220,7 +220,7 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error)
// dump as json
buf, err = json.Marshal(newkey)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "Marshal")
}
// store in repository and return

View File

@ -71,7 +71,7 @@ func (r *packerManager) findPacker(size uint) (packer *pack.Packer, err error) {
debug.Log("Repo.findPacker", "create new pack for %d bytes", size)
tmpfile, err := ioutil.TempFile("", "restic-temp-pack-")
if err != nil {
return nil, err
return nil, errors.Wrap(err, "ioutil.TempFile")
}
return pack.NewPacker(r.key, tmpfile), nil
@ -97,18 +97,21 @@ func (r *Repository) savePacker(p *pack.Packer) error {
tmpfile := p.Writer().(*os.File)
f, err := fs.Open(tmpfile.Name())
if err != nil {
return err
return errors.Wrap(err, "Open")
}
data := make([]byte, n)
m, err := io.ReadFull(f, data)
if err != nil {
return errors.Wrap(err, "ReadFul")
}
if uint(m) != n {
return errors.Errorf("read wrong number of bytes from %v: want %v, got %v", tmpfile.Name(), n, m)
}
if err = f.Close(); err != nil {
return err
return errors.Wrap(err, "Close")
}
id := backend.Hash(data)
@ -124,7 +127,7 @@ func (r *Repository) savePacker(p *pack.Packer) error {
err = fs.Remove(tmpfile.Name())
if err != nil {
return err
return errors.Wrap(err, "Remove")
}
// update blobs in the index

View File

@ -3,6 +3,8 @@ package repository
import (
"io"
"math/rand"
"github.com/pkg/errors"
)
// RandReader allows reading from a rand.Rand.
@ -56,7 +58,7 @@ func (rd *RandReader) Read(p []byte) (int, error) {
n, err := rd.read(p[:l])
pos += n
if err != nil {
return pos, err
return pos, errors.Wrap(err, "Read")
}
p = p[n:]
@ -64,7 +66,7 @@ func (rd *RandReader) Read(p []byte) (int, error) {
rd.buf = rd.buf[:7]
n, err = rd.read(rd.buf)
if err != nil {
return pos, err
return pos, errors.Wrap(err, "Read")
}
// copy the remaining bytes from the buffer to p

View File

@ -257,7 +257,7 @@ func (r *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend
debug.Log("Repo.SaveJSONUnpacked", "save new blob %v", t)
plaintext, err := json.Marshal(item)
if err != nil {
return backend.ID{}, errors.Errorf("json.Encode: %v", err)
return backend.ID{}, errors.Wrap(err, "json.Marshal")
}
return r.SaveUnpacked(t, plaintext)