2015-05-10 15:20:58 +00:00
|
|
|
package s3
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"io"
|
2016-08-31 20:39:36 +00:00
|
|
|
"restic"
|
2015-05-10 15:20:58 +00:00
|
|
|
"strings"
|
|
|
|
|
2016-09-01 20:17:37 +00:00
|
|
|
"restic/errors"
|
2016-08-21 15:46:23 +00:00
|
|
|
|
2015-11-06 21:31:59 +00:00
|
|
|
"github.com/minio/minio-go"
|
2015-05-10 15:20:58 +00:00
|
|
|
|
2016-02-14 14:29:28 +00:00
|
|
|
"restic/debug"
|
2015-05-10 15:20:58 +00:00
|
|
|
)
|
|
|
|
|
2015-06-14 12:17:38 +00:00
|
|
|
const connLimit = 10
|
2015-05-10 15:20:58 +00:00
|
|
|
|
2016-01-26 21:19:10 +00:00
|
|
|
// s3 is a backend which stores the data on an S3 endpoint.
|
|
|
|
type s3 struct {
|
2016-04-18 19:29:17 +00:00
|
|
|
client *minio.Client
|
2015-11-06 21:31:59 +00:00
|
|
|
connChan chan struct{}
|
|
|
|
bucketname string
|
2016-02-07 19:28:29 +00:00
|
|
|
prefix string
|
2015-05-15 22:29:48 +00:00
|
|
|
}
|
|
|
|
|
2015-12-28 17:55:15 +00:00
|
|
|
// Open opens the S3 backend at bucket and region. The bucket is created if it
|
|
|
|
// does not exist yet.
|
2016-08-31 20:39:36 +00:00
|
|
|
func Open(cfg Config) (restic.Backend, error) {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("open, config %#v", cfg)
|
2015-05-10 15:20:58 +00:00
|
|
|
|
2016-06-08 19:33:18 +00:00
|
|
|
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP)
|
2015-12-06 22:21:48 +00:00
|
|
|
if err != nil {
|
2016-08-29 19:54:50 +00:00
|
|
|
return nil, errors.Wrap(err, "minio.New")
|
2015-08-26 11:25:05 +00:00
|
|
|
}
|
|
|
|
|
2016-02-07 19:28:29 +00:00
|
|
|
be := &s3{client: client, bucketname: cfg.Bucket, prefix: cfg.Prefix}
|
2015-12-06 22:21:48 +00:00
|
|
|
be.createConnections()
|
|
|
|
|
2016-08-21 14:14:58 +00:00
|
|
|
ok, err := client.BucketExists(cfg.Bucket)
|
|
|
|
if err != nil {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
|
2016-08-29 19:54:50 +00:00
|
|
|
return nil, errors.Wrap(err, "client.BucketExists")
|
2016-08-21 14:14:58 +00:00
|
|
|
}
|
2015-12-28 17:55:15 +00:00
|
|
|
|
2016-08-21 14:14:58 +00:00
|
|
|
if !ok {
|
2016-01-03 20:46:07 +00:00
|
|
|
// create new bucket with default ACL in default region
|
2016-04-18 19:29:17 +00:00
|
|
|
err = client.MakeBucket(cfg.Bucket, "")
|
2016-01-03 20:46:07 +00:00
|
|
|
if err != nil {
|
2016-08-29 19:54:50 +00:00
|
|
|
return nil, errors.Wrap(err, "client.MakeBucket")
|
2016-01-03 20:46:07 +00:00
|
|
|
}
|
2015-11-06 21:31:59 +00:00
|
|
|
}
|
|
|
|
|
2015-12-06 22:21:48 +00:00
|
|
|
return be, nil
|
|
|
|
}
|
|
|
|
|
2016-08-31 20:39:36 +00:00
|
|
|
func (be *s3) s3path(t restic.FileType, name string) string {
|
2016-02-14 14:40:15 +00:00
|
|
|
var path string
|
|
|
|
|
|
|
|
if be.prefix != "" {
|
|
|
|
path = be.prefix + "/"
|
|
|
|
}
|
|
|
|
path += string(t)
|
|
|
|
|
2016-08-31 20:39:36 +00:00
|
|
|
if t == restic.ConfigFile {
|
2016-02-14 14:40:15 +00:00
|
|
|
return path
|
|
|
|
}
|
|
|
|
return path + "/" + name
|
|
|
|
}
|
|
|
|
|
2016-01-26 21:19:10 +00:00
|
|
|
func (be *s3) createConnections() {
|
2015-12-06 22:21:48 +00:00
|
|
|
be.connChan = make(chan struct{}, connLimit)
|
|
|
|
for i := 0; i < connLimit; i++ {
|
|
|
|
be.connChan <- struct{}{}
|
|
|
|
}
|
2015-05-10 15:20:58 +00:00
|
|
|
}
|
|
|
|
|
2015-05-13 17:48:52 +00:00
|
|
|
// Location returns this backend's location (the bucket name).
|
2016-01-26 21:19:10 +00:00
|
|
|
func (be *s3) Location() string {
|
2015-11-06 21:31:59 +00:00
|
|
|
return be.bucketname
|
2015-05-10 15:20:58 +00:00
|
|
|
}
|
|
|
|
|
2016-01-23 13:12:12 +00:00
|
|
|
// Load returns the data stored in the backend for h at the given offset
|
|
|
|
// and saves it in p. Load has the same semantics as io.ReaderAt.
|
2016-08-31 20:39:36 +00:00
|
|
|
func (be s3) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
|
2016-08-08 19:58:26 +00:00
|
|
|
var obj *minio.Object
|
|
|
|
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("%v, offset %v, len %v", h, off, len(p))
|
2016-09-01 19:19:30 +00:00
|
|
|
path := be.s3path(h.Type, h.Name)
|
2016-01-23 13:12:12 +00:00
|
|
|
|
2016-08-08 19:58:26 +00:00
|
|
|
<-be.connChan
|
|
|
|
defer func() {
|
|
|
|
be.connChan <- struct{}{}
|
|
|
|
}()
|
2016-08-07 12:50:24 +00:00
|
|
|
|
2016-08-08 19:58:26 +00:00
|
|
|
obj, err = be.client.GetObject(be.bucketname, path)
|
2016-08-07 12:50:24 +00:00
|
|
|
if err != nil {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log(" err %v", err)
|
2016-08-29 19:54:50 +00:00
|
|
|
return 0, errors.Wrap(err, "client.GetObject")
|
2016-01-23 13:12:12 +00:00
|
|
|
}
|
|
|
|
|
2016-08-08 19:58:26 +00:00
|
|
|
// make sure that the object is closed properly.
|
2016-01-24 00:15:35 +00:00
|
|
|
defer func() {
|
2016-08-08 19:58:26 +00:00
|
|
|
e := obj.Close()
|
|
|
|
if err == nil {
|
2016-08-29 19:54:50 +00:00
|
|
|
err = errors.Wrap(e, "Close")
|
2016-08-08 19:58:26 +00:00
|
|
|
}
|
2016-01-24 00:15:35 +00:00
|
|
|
}()
|
2016-07-29 12:23:52 +00:00
|
|
|
|
2016-08-08 19:58:26 +00:00
|
|
|
info, err := obj.Stat()
|
2016-07-29 12:23:52 +00:00
|
|
|
if err != nil {
|
2016-08-29 19:54:50 +00:00
|
|
|
return 0, errors.Wrap(err, "obj.Stat")
|
2016-07-29 12:23:52 +00:00
|
|
|
}
|
|
|
|
|
2016-08-08 19:58:26 +00:00
|
|
|
// handle negative offsets
|
|
|
|
if off < 0 {
|
|
|
|
// if the negative offset is larger than the object itself, read from
|
|
|
|
// the beginning.
|
|
|
|
if -off > info.Size {
|
|
|
|
off = 0
|
|
|
|
} else {
|
|
|
|
// otherwise compute the offset from the end of the file.
|
|
|
|
off = info.Size + off
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// return an error if the offset is beyond the end of the file
|
|
|
|
if off > info.Size {
|
2016-08-29 17:18:57 +00:00
|
|
|
return 0, errors.Wrap(io.EOF, "")
|
2016-08-08 19:58:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var nextError error
|
|
|
|
|
|
|
|
// manually create an io.ErrUnexpectedEOF
|
|
|
|
if off+int64(len(p)) > info.Size {
|
|
|
|
newlen := info.Size - off
|
|
|
|
p = p[:newlen]
|
|
|
|
|
|
|
|
nextError = io.ErrUnexpectedEOF
|
|
|
|
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log(" capped buffer to %v byte", len(p))
|
2016-08-08 19:58:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
n, err = obj.ReadAt(p, off)
|
2016-08-29 17:18:57 +00:00
|
|
|
if int64(n) == info.Size-off && errors.Cause(err) == io.EOF {
|
2016-08-08 19:58:26 +00:00
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
err = nextError
|
|
|
|
}
|
|
|
|
|
|
|
|
return n, err
|
2016-01-23 13:12:12 +00:00
|
|
|
}
|
|
|
|
|
2016-01-24 00:15:35 +00:00
|
|
|
// Save stores data in the backend at the handle.
|
2016-08-31 20:39:36 +00:00
|
|
|
func (be s3) Save(h restic.Handle, p []byte) (err error) {
|
2016-01-24 00:15:35 +00:00
|
|
|
if err := h.Valid(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("%v with %d bytes", h, len(p))
|
2016-01-24 00:15:35 +00:00
|
|
|
|
2016-09-01 19:19:30 +00:00
|
|
|
path := be.s3path(h.Type, h.Name)
|
2016-01-24 00:15:35 +00:00
|
|
|
|
2016-01-24 20:13:24 +00:00
|
|
|
// Check key does not already exist
|
|
|
|
_, err = be.client.StatObject(be.bucketname, path)
|
|
|
|
if err == nil {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("%v already exists", h)
|
2016-01-24 20:13:24 +00:00
|
|
|
return errors.New("key already exists")
|
|
|
|
}
|
|
|
|
|
2016-01-24 00:15:35 +00:00
|
|
|
<-be.connChan
|
|
|
|
defer func() {
|
|
|
|
be.connChan <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("PutObject(%v, %v, %v, %v)",
|
2016-01-24 00:15:35 +00:00
|
|
|
be.bucketname, path, int64(len(p)), "binary/octet-stream")
|
|
|
|
n, err := be.client.PutObject(be.bucketname, path, bytes.NewReader(p), "binary/octet-stream")
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("%v -> %v bytes, err %#v", path, n, err)
|
2016-01-24 00:15:35 +00:00
|
|
|
|
2016-08-29 19:54:50 +00:00
|
|
|
return errors.Wrap(err, "client.PutObject")
|
2016-01-24 00:15:35 +00:00
|
|
|
}
|
|
|
|
|
2016-01-23 22:27:58 +00:00
|
|
|
// Stat returns information about a blob.
|
2016-08-31 20:39:36 +00:00
|
|
|
func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("%v", h)
|
2016-08-21 14:15:24 +00:00
|
|
|
|
2016-09-01 19:19:30 +00:00
|
|
|
path := be.s3path(h.Type, h.Name)
|
2016-08-21 14:15:24 +00:00
|
|
|
var obj *minio.Object
|
|
|
|
|
|
|
|
obj, err = be.client.GetObject(be.bucketname, path)
|
2016-01-23 22:27:58 +00:00
|
|
|
if err != nil {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("GetObject() err %v", err)
|
2016-08-31 20:39:36 +00:00
|
|
|
return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
|
2016-01-23 22:27:58 +00:00
|
|
|
}
|
|
|
|
|
2016-08-21 14:15:24 +00:00
|
|
|
// make sure that the object is closed properly.
|
|
|
|
defer func() {
|
|
|
|
e := obj.Close()
|
|
|
|
if err == nil {
|
2016-08-29 19:54:50 +00:00
|
|
|
err = errors.Wrap(e, "Close")
|
2016-08-21 14:15:24 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2016-01-23 22:27:58 +00:00
|
|
|
fi, err := obj.Stat()
|
|
|
|
if err != nil {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("Stat() err %v", err)
|
2016-08-31 20:39:36 +00:00
|
|
|
return restic.FileInfo{}, errors.Wrap(err, "Stat")
|
2016-01-23 22:27:58 +00:00
|
|
|
}
|
|
|
|
|
2016-08-31 20:39:36 +00:00
|
|
|
return restic.FileInfo{Size: fi.Size}, nil
|
2016-01-23 22:27:58 +00:00
|
|
|
}
|
|
|
|
|
2015-05-10 15:20:58 +00:00
|
|
|
// Test returns true if a blob of the given type and name exists in the backend.
|
2016-08-31 20:39:36 +00:00
|
|
|
func (be *s3) Test(t restic.FileType, name string) (bool, error) {
|
2015-05-10 15:20:58 +00:00
|
|
|
found := false
|
2016-02-14 14:40:15 +00:00
|
|
|
path := be.s3path(t, name)
|
2015-12-28 23:27:29 +00:00
|
|
|
_, err := be.client.StatObject(be.bucketname, path)
|
2015-08-26 11:25:05 +00:00
|
|
|
if err == nil {
|
2015-05-10 15:20:58 +00:00
|
|
|
found = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// If error, then not found
|
|
|
|
return found, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove removes the blob with the given name and type.
|
2016-08-31 20:39:36 +00:00
|
|
|
func (be *s3) Remove(t restic.FileType, name string) error {
|
2016-02-14 14:40:15 +00:00
|
|
|
path := be.s3path(t, name)
|
2015-12-28 23:27:29 +00:00
|
|
|
err := be.client.RemoveObject(be.bucketname, path)
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("%v %v -> err %v", t, name, err)
|
2016-08-29 19:54:50 +00:00
|
|
|
return errors.Wrap(err, "client.RemoveObject")
|
2015-05-10 15:20:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// List returns a channel that yields all names of blobs of type t. A
|
|
|
|
// goroutine is started for this. If the channel done is closed, sending
|
|
|
|
// stops.
|
2016-08-31 20:39:36 +00:00
|
|
|
func (be *s3) List(t restic.FileType, done <-chan struct{}) <-chan string {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("listing %v", t)
|
2015-05-10 15:20:58 +00:00
|
|
|
ch := make(chan string)
|
|
|
|
|
2016-02-14 14:40:15 +00:00
|
|
|
prefix := be.s3path(t, "")
|
2015-05-10 15:20:58 +00:00
|
|
|
|
2015-12-28 23:27:29 +00:00
|
|
|
listresp := be.client.ListObjects(be.bucketname, prefix, true, done)
|
2015-05-13 17:48:52 +00:00
|
|
|
|
2015-05-10 15:20:58 +00:00
|
|
|
go func() {
|
|
|
|
defer close(ch)
|
2015-11-06 21:31:59 +00:00
|
|
|
for obj := range listresp {
|
2015-12-28 23:27:29 +00:00
|
|
|
m := strings.TrimPrefix(obj.Key, prefix)
|
2015-05-10 15:20:58 +00:00
|
|
|
if m == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case ch <- m:
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
2015-12-06 22:21:48 +00:00
|
|
|
// Remove keys for a specified backend type.
|
2016-08-31 20:39:36 +00:00
|
|
|
func (be *s3) removeKeys(t restic.FileType) error {
|
2015-12-06 22:21:48 +00:00
|
|
|
done := make(chan struct{})
|
|
|
|
defer close(done)
|
2016-08-31 20:39:36 +00:00
|
|
|
for key := range be.List(restic.DataFile, done) {
|
|
|
|
err := be.Remove(restic.DataFile, key)
|
2015-12-06 22:21:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-06-14 13:02:29 +00:00
|
|
|
}
|
2015-12-06 22:21:48 +00:00
|
|
|
|
|
|
|
return nil
|
2015-06-14 13:02:29 +00:00
|
|
|
}
|
|
|
|
|
2015-12-19 12:23:05 +00:00
|
|
|
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
|
2016-01-26 21:19:10 +00:00
|
|
|
func (be *s3) Delete() error {
|
2016-08-31 20:39:36 +00:00
|
|
|
alltypes := []restic.FileType{
|
|
|
|
restic.DataFile,
|
|
|
|
restic.KeyFile,
|
|
|
|
restic.LockFile,
|
|
|
|
restic.SnapshotFile,
|
|
|
|
restic.IndexFile}
|
2015-12-06 22:21:48 +00:00
|
|
|
|
|
|
|
for _, t := range alltypes {
|
|
|
|
err := be.removeKeys(t)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-31 20:39:36 +00:00
|
|
|
return be.Remove(restic.ConfigFile, "")
|
2015-05-10 15:20:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close does nothing
|
2016-01-26 21:19:10 +00:00
|
|
|
func (be *s3) Close() error { return nil }
|