2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-02 19:49:44 +00:00
restic/backend/s3/s3.go

279 lines
6.1 KiB
Go
Raw Normal View History

2015-05-10 15:20:58 +00:00
package s3
import (
"bytes"
"errors"
2016-01-02 13:38:45 +00:00
"fmt"
2015-05-10 15:20:58 +00:00
"io"
"strings"
"github.com/minio/minio-go"
2015-05-10 15:20:58 +00:00
"github.com/restic/restic/backend"
"github.com/restic/restic/debug"
2015-05-10 15:20:58 +00:00
)
const maxKeysInList = 1000
const connLimit = 10
const backendPrefix = "restic"
2015-05-10 15:20:58 +00:00
func s3path(t backend.Type, name string) string {
if t == backend.Config {
return backendPrefix + "/" + string(t)
2015-05-10 15:20:58 +00:00
}
return backendPrefix + "/" + string(t) + "/" + name
2015-05-10 15:20:58 +00:00
}
2015-06-14 13:08:39 +00:00
type S3Backend struct {
2015-12-28 23:27:29 +00:00
client minio.CloudStorageClient
connChan chan struct{}
bucketname string
}
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
2015-12-28 17:23:02 +00:00
func Open(cfg Config) (backend.Backend, error) {
2015-12-28 23:27:29 +00:00
debug.Log("s3.Open", "open, config %#v", cfg)
2015-05-10 15:20:58 +00:00
2015-12-28 23:27:29 +00:00
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, cfg.UseHTTP)
if err != nil {
return nil, err
}
2015-12-28 23:27:29 +00:00
be := &S3Backend{client: client, bucketname: cfg.Bucket}
be.createConnections()
2015-12-28 23:27:29 +00:00
// create new bucket with default ACL in default region
err = client.MakeBucket(cfg.Bucket, "", "")
if err != nil {
e, ok := err.(minio.ErrorResponse)
if ok && e.Code == "BucketAlreadyExists" {
debug.Log("s3.Open", "ignoring error that bucket %q already exists", cfg.Bucket)
err = nil
}
}
if err != nil {
return nil, err
}
return be, nil
}
func (be *S3Backend) createConnections() {
be.connChan = make(chan struct{}, connLimit)
for i := 0; i < connLimit; i++ {
be.connChan <- struct{}{}
}
2015-05-10 15:20:58 +00:00
}
// Location returns this backend's location (the bucket name).
2015-06-14 13:08:39 +00:00
func (be *S3Backend) Location() string {
return be.bucketname
2015-05-10 15:20:58 +00:00
}
type s3Blob struct {
2015-06-14 13:08:39 +00:00
b *S3Backend
2015-05-10 15:20:58 +00:00
buf *bytes.Buffer
final bool
}
func (bb *s3Blob) Write(p []byte) (int, error) {
if bb.final {
return 0, errors.New("blob already closed")
}
n, err := bb.buf.Write(p)
return n, err
}
func (bb *s3Blob) Read(p []byte) (int, error) {
return bb.buf.Read(p)
}
func (bb *s3Blob) Close() error {
bb.final = true
bb.buf.Reset()
return nil
}
func (bb *s3Blob) Size() uint {
return uint(bb.buf.Len())
}
func (bb *s3Blob) Finalize(t backend.Type, name string) error {
if bb.final {
return errors.New("Already finalized")
}
bb.final = true
path := s3path(t, name)
// Check key does not already exist
2015-12-28 23:27:29 +00:00
_, err := bb.b.client.StatObject(bb.b.bucketname, path)
if err == nil {
return errors.New("key already exists")
}
2016-01-02 13:38:45 +00:00
expectedBytes := bb.buf.Len()
<-bb.b.connChan
2016-01-02 13:38:45 +00:00
n, err := bb.b.client.PutObject(bb.b.bucketname, path, bb.buf, int64(bb.buf.Len()), "binary/octet-stream")
bb.b.connChan <- struct{}{}
2015-12-28 23:27:29 +00:00
2016-01-02 13:38:45 +00:00
debug.Log("s3.Finalize", "finalized %v -> n %v, err %v", path, n, err)
if err != nil {
return err
}
2015-12-28 23:27:29 +00:00
2016-01-02 13:38:45 +00:00
if n != int64(expectedBytes) {
return errors.New("could not store all bytes")
}
return nil
2015-05-10 15:20:58 +00:00
}
// Create creates a new Blob. The data is available only after Finalize()
// has been called on the returned Blob.
2015-06-14 13:08:39 +00:00
func (be *S3Backend) Create() (backend.Blob, error) {
2015-05-10 15:20:58 +00:00
blob := s3Blob{
2015-06-14 13:08:39 +00:00
b: be,
2015-05-10 15:20:58 +00:00
buf: &bytes.Buffer{},
}
return &blob, nil
}
// Get returns a reader that yields the content stored under the given
// name. The reader should be closed after draining it.
2015-06-14 13:08:39 +00:00
func (be *S3Backend) Get(t backend.Type, name string) (io.ReadCloser, error) {
path := s3path(t, name)
2015-12-28 23:27:29 +00:00
rc, _, err := be.client.GetObject(be.bucketname, path)
debug.Log("s3.Get", "%v %v -> err %v", t, name, err)
return rc, err
2015-05-10 15:20:58 +00:00
}
// GetReader returns an io.ReadCloser for the Blob with the given name of
// type t at offset and length. If length is 0, the reader reads until EOF.
2015-06-14 13:08:39 +00:00
func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) {
2015-12-28 23:27:29 +00:00
debug.Log("s3.GetReader", "%v %v, offset %v len %v", t, name, offset, length)
path := s3path(t, name)
2015-12-28 23:27:29 +00:00
rd, stat, err := be.client.GetObjectPartial(be.bucketname, path)
debug.Log("s3.GetReader", " stat %v, err %v", stat, err)
if err != nil {
return nil, err
}
l, o := int64(length), int64(offset)
if l == 0 {
2016-01-02 13:38:45 +00:00
l = stat.Size
}
if o > stat.Size {
return nil, fmt.Errorf("offset beyond end of file (%v > %v)", o, stat.Size)
2015-12-28 23:27:29 +00:00
}
2016-01-02 13:38:45 +00:00
if o+l > stat.Size {
2015-12-28 23:27:29 +00:00
l = stat.Size - o
}
debug.Log("s3.GetReader", "%v %v, o %v l %v", t, name, o, l)
2016-01-02 13:38:45 +00:00
var r io.Reader
r = &ContinuousReader{R: rd, Offset: o}
if length > 0 {
r = io.LimitReader(r, int64(length))
2015-12-28 23:27:29 +00:00
}
2016-01-02 13:38:45 +00:00
return backend.ReadCloser(r), nil
2015-05-10 15:20:58 +00:00
}
// Test returns true if a blob of the given type and name exists in the backend.
2015-06-14 13:08:39 +00:00
func (be *S3Backend) Test(t backend.Type, name string) (bool, error) {
2015-05-10 15:20:58 +00:00
found := false
path := s3path(t, name)
2015-12-28 23:27:29 +00:00
_, err := be.client.StatObject(be.bucketname, path)
if err == nil {
2015-05-10 15:20:58 +00:00
found = true
}
// If error, then not found
return found, nil
}
// Remove removes the blob with the given name and type.
2015-06-14 13:08:39 +00:00
func (be *S3Backend) Remove(t backend.Type, name string) error {
2015-05-10 15:20:58 +00:00
path := s3path(t, name)
2015-12-28 23:27:29 +00:00
err := be.client.RemoveObject(be.bucketname, path)
debug.Log("s3.Remove", "%v %v -> err %v", t, name, err)
return err
2015-05-10 15:20:58 +00:00
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
2015-06-14 13:08:39 +00:00
func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string {
2015-05-10 15:20:58 +00:00
ch := make(chan string)
prefix := s3path(t, "")
2015-05-10 15:20:58 +00:00
2015-12-28 23:27:29 +00:00
listresp := be.client.ListObjects(be.bucketname, prefix, true, done)
2015-05-10 15:20:58 +00:00
go func() {
defer close(ch)
for obj := range listresp {
2015-12-28 23:27:29 +00:00
m := strings.TrimPrefix(obj.Key, prefix)
2015-05-10 15:20:58 +00:00
if m == "" {
continue
}
select {
case ch <- m:
case <-done:
return
}
}
}()
return ch
}
// Remove keys for a specified backend type.
func (be *S3Backend) removeKeys(t backend.Type) error {
done := make(chan struct{})
defer close(done)
for key := range be.List(backend.Data, done) {
err := be.Remove(backend.Data, key)
if err != nil {
return err
}
}
return nil
}
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
2015-06-14 13:08:39 +00:00
func (be *S3Backend) Delete() error {
alltypes := []backend.Type{
backend.Data,
backend.Key,
backend.Lock,
backend.Snapshot,
backend.Index}
for _, t := range alltypes {
err := be.removeKeys(t)
if err != nil {
return nil
}
}
return be.Remove(backend.Config, "")
2015-05-10 15:20:58 +00:00
}
// Close does nothing
2015-06-14 13:08:39 +00:00
func (be *S3Backend) Close() error { return nil }