2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-29 13:22:43 +00:00
restic/internal/backend/s3/s3.go

552 lines
14 KiB
Go
Raw Normal View History

2015-05-10 15:20:58 +00:00
package s3
import (
2017-06-03 15:39:57 +00:00
"context"
"fmt"
"hash"
2015-05-10 15:20:58 +00:00
"io"
2017-12-08 20:52:50 +00:00
"io/ioutil"
"net/http"
2017-05-13 21:55:22 +00:00
"os"
"path"
2015-05-10 15:20:58 +00:00
"strings"
"time"
2015-05-10 15:20:58 +00:00
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/sema"
2020-12-17 11:47:53 +00:00
"github.com/restic/restic/internal/debug"
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/errors"
2017-07-24 15:42:25 +00:00
"github.com/restic/restic/internal/restic"
2020-12-17 11:47:53 +00:00
"github.com/cenkalti/backoff/v4"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
2015-05-10 15:20:58 +00:00
)
2017-06-07 19:59:01 +00:00
// Backend stores data on an S3 endpoint.
type Backend struct {
2017-06-15 14:41:09 +00:00
client *minio.Client
sem sema.Semaphore
2017-06-15 14:41:09 +00:00
cfg Config
2017-04-11 20:04:18 +00:00
backend.Layout
}
2017-06-07 19:59:01 +00:00
// make sure that *Backend implements backend.Backend
var _ restic.Backend = &Backend{}
2017-06-03 15:39:57 +00:00
const defaultLayout = "default"
func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
2016-09-27 20:35:08 +00:00
debug.Log("open, config %#v", cfg)
2015-05-10 15:20:58 +00:00
if cfg.MaxRetries > 0 {
minio.MaxRetry = int(cfg.MaxRetries)
}
// Chains all credential types, in the following order:
// - Static credentials provided by user
// - AWS env vars (i.e. AWS_ACCESS_KEY_ID)
// - Minio env vars (i.e. MINIO_ACCESS_KEY)
// - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials)
// - Minio creds file (i.e. MINIO_SHARED_CREDENTIALS_FILE or ~/.mc/config.json)
// - IAM profile based credentials. (performs an HTTP
// call to a pre-defined endpoint, only valid inside
// configured ec2 instances)
creds := credentials.NewChainCredentials([]credentials.Provider{
2018-07-12 13:18:19 +00:00
&credentials.EnvAWS{},
&credentials.Static{
Value: credentials.Value{
AccessKeyID: cfg.KeyID,
SecretAccessKey: cfg.Secret.Unwrap(),
},
},
&credentials.EnvMinio{},
&credentials.FileAWSCredentials{},
&credentials.FileMinioClient{},
&credentials.IAM{
Client: &http.Client{
Transport: http.DefaultTransport,
},
},
})
c, err := creds.Get()
if err != nil {
return nil, errors.Wrap(err, "creds.Get")
}
if c.SignerType == credentials.SignatureAnonymous {
debug.Log("using anonymous access for %#v", cfg.Endpoint)
}
options := &minio.Options{
Creds: creds,
Secure: !cfg.UseHTTP,
Region: cfg.Region,
Transport: rt,
}
switch strings.ToLower(cfg.BucketLookup) {
case "", "auto":
options.BucketLookup = minio.BucketLookupAuto
case "dns":
options.BucketLookup = minio.BucketLookupDNS
case "path":
options.BucketLookup = minio.BucketLookupPath
default:
return nil, fmt.Errorf(`bad bucket-lookup style %q must be "auto", "path" or "dns"`, cfg.BucketLookup)
}
client, err := minio.New(cfg.Endpoint, options)
if err != nil {
return nil, errors.Wrap(err, "minio.New")
}
sem, err := sema.New(cfg.Connections)
2017-06-05 22:17:39 +00:00
if err != nil {
return nil, err
}
2017-06-07 19:59:01 +00:00
be := &Backend{
2017-06-15 14:41:09 +00:00
client: client,
sem: sem,
cfg: cfg,
}
2017-02-10 18:24:54 +00:00
l, err := backend.ParseLayout(ctx, be, cfg.Layout, defaultLayout, cfg.Prefix)
if err != nil {
return nil, err
}
be.Layout = l
2017-06-17 20:15:58 +00:00
return be, nil
}
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (restic.Backend, error) {
return open(ctx, cfg, rt)
2017-06-17 20:15:58 +00:00
}
// Create opens the S3 backend at bucket and region and creates the bucket if
// it does not exist yet.
func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (restic.Backend, error) {
be, err := open(ctx, cfg, rt)
2017-07-17 08:33:19 +00:00
if err != nil {
return nil, errors.Wrap(err, "open")
}
found, err := be.client.BucketExists(ctx, cfg.Bucket)
if err != nil && isAccessDenied(err) {
err = nil
found = true
}
2016-08-21 14:14:58 +00:00
if err != nil {
debug.Log("BucketExists(%v) returned err %v", cfg.Bucket, err)
2016-08-29 19:54:50 +00:00
return nil, errors.Wrap(err, "client.BucketExists")
2016-08-21 14:14:58 +00:00
}
if !found {
// create new bucket with default ACL in default region
err = be.client.MakeBucket(ctx, cfg.Bucket, minio.MakeBucketOptions{})
if err != nil {
2016-08-29 19:54:50 +00:00
return nil, errors.Wrap(err, "client.MakeBucket")
}
}
return be, nil
}
// isAccessDenied returns true if the error is caused by Access Denied.
func isAccessDenied(err error) bool {
debug.Log("isAccessDenied(%T, %#v)", err, err)
var e minio.ErrorResponse
return errors.As(err, &e) && e.Code == "Access Denied"
}
// IsNotExist returns true if the error is caused by a not existing file.
2017-06-07 19:59:01 +00:00
func (be *Backend) IsNotExist(err error) bool {
debug.Log("IsNotExist(%T, %#v)", err, err)
if errors.Is(err, os.ErrNotExist) {
2017-06-16 08:54:46 +00:00
return true
}
var e minio.ErrorResponse
return errors.As(err, &e) && e.Code == "NoSuchKey"
}
// Join combines path components with slashes.
2017-06-07 19:59:01 +00:00
func (be *Backend) Join(p ...string) string {
return path.Join(p...)
}
type fileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
isDir bool
}
func (fi *fileInfo) Name() string { return fi.name } // base name of the file
func (fi *fileInfo) Size() int64 { return fi.size } // length in bytes for regular files; system-dependent for others
func (fi *fileInfo) Mode() os.FileMode { return fi.mode } // file mode bits
func (fi *fileInfo) ModTime() time.Time { return fi.modTime } // modification time
func (fi *fileInfo) IsDir() bool { return fi.isDir } // abbreviation for Mode().IsDir()
func (fi *fileInfo) Sys() interface{} { return nil } // underlying data source (can return nil)
// ReadDir returns the entries for a directory.
func (be *Backend) ReadDir(ctx context.Context, dir string) (list []os.FileInfo, err error) {
debug.Log("ReadDir(%v)", dir)
// make sure dir ends with a slash
if dir[len(dir)-1] != '/' {
dir += "/"
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
debug.Log("using ListObjectsV1(%v)", be.cfg.ListObjectsV1)
for obj := range be.client.ListObjects(ctx, be.cfg.Bucket, minio.ListObjectsOptions{
Prefix: dir,
Recursive: false,
UseV1: be.cfg.ListObjectsV1,
}) {
2018-06-01 20:15:23 +00:00
if obj.Err != nil {
return nil, err
}
if obj.Key == "" {
continue
}
name := strings.TrimPrefix(obj.Key, dir)
// Sometimes s3 returns an entry for the dir itself. Ignore it.
if name == "" {
continue
}
entry := &fileInfo{
name: name,
size: obj.Size,
modTime: obj.LastModified,
}
if name[len(name)-1] == '/' {
entry.isDir = true
entry.mode = os.ModeDir | 0755
entry.name = name[:len(name)-1]
} else {
entry.mode = 0644
}
list = append(list, entry)
}
return list, nil
}
2021-08-07 20:20:49 +00:00
func (be *Backend) Connections() uint {
return be.cfg.Connections
}
// Location returns this backend's location (the bucket name).
2017-06-07 19:59:01 +00:00
func (be *Backend) Location() string {
2017-06-15 14:41:09 +00:00
return be.Join(be.cfg.Bucket, be.cfg.Prefix)
}
// Hasher may return a hash function for calculating a content hash for the backend
func (be *Backend) Hasher() hash.Hash {
return nil
}
// HasAtomicReplace returns whether Save() can atomically replace files
func (be *Backend) HasAtomicReplace() bool {
return true
}
// Path returns the path in the bucket that is used for this backend.
func (be *Backend) Path() string {
2017-06-15 14:41:09 +00:00
return be.cfg.Prefix
2015-05-10 15:20:58 +00:00
}
2016-01-24 00:15:35 +00:00
// Save stores data in the backend at the handle.
func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
debug.Log("Save %v", h)
2016-01-24 00:15:35 +00:00
if err := h.Valid(); err != nil {
2020-12-17 11:47:53 +00:00
return backoff.Permanent(err)
2016-01-24 00:15:35 +00:00
}
2017-04-11 20:04:18 +00:00
objName := be.Filename(h)
2017-04-17 17:18:47 +00:00
be.sem.GetToken()
defer be.sem.ReleaseToken()
opts := minio.PutObjectOptions{StorageClass: be.cfg.StorageClass}
2017-12-08 20:52:50 +00:00
opts.ContentType = "application/octet-stream"
// the only option with the high-level api is to let the library handle the checksum computation
opts.SendContentMd5 = true
// only use multipart uploads for very large files
opts.PartSize = 200 * 1024 * 1024
2017-12-08 20:52:50 +00:00
debug.Log("PutObject(%v, %v, %v)", be.cfg.Bucket, objName, rd.Length())
info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, ioutil.NopCloser(rd), int64(rd.Length()), opts)
debug.Log("%v -> %v bytes, err %#v: %v", objName, info.Size, err, err)
// sanity check
if err == nil && info.Size != rd.Length() {
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", info.Size, rd.Length())
}
2016-01-24 00:15:35 +00:00
2016-08-29 19:54:50 +00:00
return errors.Wrap(err, "client.PutObject")
2016-01-24 00:15:35 +00:00
}
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset.
func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
return backend.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
}
func (be *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
2017-04-17 17:18:47 +00:00
debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h))
2017-01-22 21:01:12 +00:00
if err := h.Valid(); err != nil {
2020-12-17 11:47:53 +00:00
return nil, backoff.Permanent(err)
2017-01-22 21:01:12 +00:00
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
if length < 0 {
return nil, errors.Errorf("invalid length %d", length)
}
2017-04-11 20:04:18 +00:00
objName := be.Filename(h)
2017-12-08 20:52:50 +00:00
opts := minio.GetObjectOptions{}
2017-01-22 21:01:12 +00:00
2017-12-08 20:52:50 +00:00
var err error
s3: Use low-level API with a Range header for Load benchmark old ns/op new ns/op delta BenchmarkBackendMinio/LoadFile-4 9213315 11001787 +19.41% BenchmarkBackendMinio/LoadPartialFile-4 4176619 3479707 -16.69% BenchmarkBackendMinio/LoadPartialFileOffset-4 4391521 3139214 -28.52% BenchmarkBackendS3/LoadFile-4 2886070905 2505907501 -13.17% BenchmarkBackendS3/LoadPartialFile-4 762702722 735694398 -3.54% BenchmarkBackendS3/LoadPartialFileOffset-4 789724328 1108989142 +40.43% benchmark old MB/s new MB/s speedup BenchmarkBackendMinio/LoadFile-4 1821.21 1525.15 0.84x BenchmarkBackendMinio/LoadPartialFile-4 1004.49 1205.67 1.20x BenchmarkBackendMinio/LoadPartialFileOffset-4 955.34 1336.45 1.40x BenchmarkBackendS3/LoadFile-4 5.81 6.70 1.15x BenchmarkBackendS3/LoadPartialFile-4 5.50 5.70 1.04x BenchmarkBackendS3/LoadPartialFileOffset-4 5.31 3.78 0.71x benchmark old allocs new allocs delta BenchmarkBackendMinio/LoadFile-4 406 204 -49.75% BenchmarkBackendMinio/LoadPartialFile-4 225 206 -8.44% BenchmarkBackendMinio/LoadPartialFileOffset-4 227 207 -8.81% BenchmarkBackendS3/LoadFile-4 600 388 -35.33% BenchmarkBackendS3/LoadPartialFile-4 416 302 -27.40% BenchmarkBackendS3/LoadPartialFileOffset-4 417 303 -27.34% benchmark old bytes new bytes delta BenchmarkBackendMinio/LoadFile-4 29475 13904 -52.83% BenchmarkBackendMinio/LoadPartialFile-4 4218838 13958 -99.67% BenchmarkBackendMinio/LoadPartialFileOffset-4 4219175 14332 -99.66% BenchmarkBackendS3/LoadFile-4 114152 97424 -14.65% BenchmarkBackendS3/LoadPartialFile-4 4265416 56212 -98.68% BenchmarkBackendS3/LoadPartialFileOffset-4 4266520 56308 -98.68%
2017-05-13 19:18:14 +00:00
if length > 0 {
2017-12-08 20:52:50 +00:00
debug.Log("range: %v-%v", offset, offset+int64(length)-1)
err = opts.SetRange(offset, offset+int64(length)-1)
} else if offset > 0 {
debug.Log("range: %v-", offset)
err = opts.SetRange(offset, 0)
2017-01-22 21:01:12 +00:00
}
2017-12-08 20:52:50 +00:00
if err != nil {
return nil, errors.Wrap(err, "SetRange")
}
2017-01-22 21:01:12 +00:00
2017-12-08 20:52:50 +00:00
be.sem.GetToken()
2021-08-07 17:45:52 +00:00
ctx, cancel := context.WithCancel(ctx)
2017-06-05 22:17:39 +00:00
coreClient := minio.Core{Client: be.client}
rd, _, _, err := coreClient.GetObject(ctx, be.cfg.Bucket, objName, opts)
2017-05-13 22:09:59 +00:00
if err != nil {
2021-08-07 17:45:52 +00:00
cancel()
2017-06-05 22:17:39 +00:00
be.sem.ReleaseToken()
2017-05-13 22:09:59 +00:00
return nil, err
}
2017-01-22 21:01:12 +00:00
2021-08-07 17:45:52 +00:00
return be.sem.ReleaseTokenOnClose(rd, cancel), err
2017-01-22 21:01:12 +00:00
}
2016-01-23 22:27:58 +00:00
// Stat returns information about a blob.
2017-06-07 19:59:01 +00:00
func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) {
2016-09-27 20:35:08 +00:00
debug.Log("%v", h)
2017-04-11 20:04:18 +00:00
objName := be.Filename(h)
var obj *minio.Object
2017-12-08 20:52:50 +00:00
opts := minio.GetObjectOptions{}
be.sem.GetToken()
obj, err = be.client.GetObject(ctx, be.cfg.Bucket, objName, opts)
2016-01-23 22:27:58 +00:00
if err != nil {
2016-09-27 20:35:08 +00:00
debug.Log("GetObject() err %v", err)
be.sem.ReleaseToken()
2016-08-31 20:39:36 +00:00
return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
2016-01-23 22:27:58 +00:00
}
// make sure that the object is closed properly.
defer func() {
e := obj.Close()
be.sem.ReleaseToken()
if err == nil {
2016-08-29 19:54:50 +00:00
err = errors.Wrap(e, "Close")
}
}()
2016-01-23 22:27:58 +00:00
fi, err := obj.Stat()
if err != nil {
2016-09-27 20:35:08 +00:00
debug.Log("Stat() err %v", err)
2016-08-31 20:39:36 +00:00
return restic.FileInfo{}, errors.Wrap(err, "Stat")
2016-01-23 22:27:58 +00:00
}
return restic.FileInfo{Size: fi.Size, Name: h.Name}, nil
2016-01-23 22:27:58 +00:00
}
2015-05-10 15:20:58 +00:00
// Test returns true if a blob of the given type and name exists in the backend.
2017-06-07 19:59:01 +00:00
func (be *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
2015-05-10 15:20:58 +00:00
found := false
2017-04-11 20:04:18 +00:00
objName := be.Filename(h)
be.sem.GetToken()
_, err := be.client.StatObject(ctx, be.cfg.Bucket, objName, minio.StatObjectOptions{})
be.sem.ReleaseToken()
if err == nil {
2015-05-10 15:20:58 +00:00
found = true
}
// If error, then not found
return found, nil
}
// Remove removes the blob with the given name and type.
2017-06-07 19:59:01 +00:00
func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
2017-04-11 20:04:18 +00:00
objName := be.Filename(h)
be.sem.GetToken()
err := be.client.RemoveObject(ctx, be.cfg.Bucket, objName, minio.RemoveObjectOptions{})
be.sem.ReleaseToken()
2017-04-17 17:18:47 +00:00
debug.Log("Remove(%v) at %v -> err %v", h, objName, err)
if be.IsNotExist(err) {
err = nil
}
2016-08-29 19:54:50 +00:00
return errors.Wrap(err, "client.RemoveObject")
2015-05-10 15:20:58 +00:00
}
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
2016-09-27 20:35:08 +00:00
debug.Log("listing %v", t)
2015-05-10 15:20:58 +00:00
prefix, recursive := be.Basedir(t)
2015-05-10 15:20:58 +00:00
// make sure prefix ends with a slash
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
debug.Log("using ListObjectsV1(%v)", be.cfg.ListObjectsV1)
// NB: unfortunately we can't protect this with be.sem.GetToken() here.
// Doing so would enable a deadlock situation (gh-1399), as ListObjects()
// starts its own goroutine and returns results via a channel.
listresp := be.client.ListObjects(ctx, be.cfg.Bucket, minio.ListObjectsOptions{
Prefix: prefix,
Recursive: recursive,
UseV1: be.cfg.ListObjectsV1,
})
for obj := range listresp {
2018-06-01 20:15:23 +00:00
if obj.Err != nil {
return obj.Err
}
m := strings.TrimPrefix(obj.Key, prefix)
if m == "" {
continue
2015-05-10 15:20:58 +00:00
}
fi := restic.FileInfo{
Name: path.Base(m),
Size: obj.Size,
}
2015-05-10 15:20:58 +00:00
if ctx.Err() != nil {
return ctx.Err()
}
err := fn(fi)
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
}
return ctx.Err()
}
// Remove keys for a specified backend type.
func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
return be.List(ctx, restic.PackFile, func(fi restic.FileInfo) error {
return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name})
})
}
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
2017-06-07 19:59:01 +00:00
func (be *Backend) Delete(ctx context.Context) error {
2016-08-31 20:39:36 +00:00
alltypes := []restic.FileType{
restic.PackFile,
2016-08-31 20:39:36 +00:00
restic.KeyFile,
restic.LockFile,
restic.SnapshotFile,
restic.IndexFile}
for _, t := range alltypes {
2017-06-03 15:39:57 +00:00
err := be.removeKeys(ctx, t)
if err != nil {
return nil
}
}
2017-06-03 15:39:57 +00:00
return be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
2015-05-10 15:20:58 +00:00
}
// Close does nothing
2017-06-07 19:59:01 +00:00
func (be *Backend) Close() error { return nil }
// Rename moves a file based on the new layout l.
func (be *Backend) Rename(ctx context.Context, h restic.Handle, l backend.Layout) error {
debug.Log("Rename %v to %v", h, l)
oldname := be.Filename(h)
newname := l.Filename(h)
if oldname == newname {
debug.Log(" %v is already renamed", newname)
return nil
}
debug.Log(" %v -> %v", oldname, newname)
src := minio.CopySrcOptions{
Bucket: be.cfg.Bucket,
Object: oldname,
}
2017-07-17 18:43:45 +00:00
dst := minio.CopyDestOptions{
Bucket: be.cfg.Bucket,
Object: newname,
2017-07-17 18:43:45 +00:00
}
_, err := be.client.CopyObject(ctx, dst, src)
if err != nil && be.IsNotExist(err) {
debug.Log("copy failed: %v, seems to already have been renamed", err)
return nil
}
if err != nil {
debug.Log("copy failed: %v", err)
return err
}
return be.client.RemoveObject(ctx, be.cfg.Bucket, oldname, minio.RemoveObjectOptions{})
}