2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-24 03:45:27 +00:00
restic/internal/backend/s3/s3.go

536 lines
14 KiB
Go
Raw Normal View History

2015-05-10 15:20:58 +00:00
package s3
import (
2017-06-03 15:39:57 +00:00
"context"
"fmt"
"hash"
2015-05-10 15:20:58 +00:00
"io"
"net/http"
2017-05-13 21:55:22 +00:00
"os"
"path"
2015-05-10 15:20:58 +00:00
"strings"
"time"
2015-05-10 15:20:58 +00:00
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/layout"
"github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/backend/util"
2020-12-17 11:47:53 +00:00
"github.com/restic/restic/internal/debug"
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/errors"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
2015-05-10 15:20:58 +00:00
)
2017-06-07 19:59:01 +00:00
// Backend stores data on an S3 endpoint.
type Backend struct {
2017-06-15 14:41:09 +00:00
client *minio.Client
cfg Config
layout.Layout
}
2017-06-07 19:59:01 +00:00
// make sure that *Backend implements backend.Backend
var _ backend.Backend = &Backend{}
2017-06-03 15:39:57 +00:00
func NewFactory() location.Factory {
return location.NewHTTPBackendFactory("s3", ParseConfig, location.NoPassword, Create, Open)
}
const defaultLayout = "default"
func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
2016-09-27 20:35:08 +00:00
debug.Log("open, config %#v", cfg)
2015-05-10 15:20:58 +00:00
if cfg.KeyID == "" && cfg.Secret.String() != "" {
return nil, errors.Fatalf("unable to open S3 backend: Key ID ($AWS_ACCESS_KEY_ID) is empty")
} else if cfg.KeyID != "" && cfg.Secret.String() == "" {
return nil, errors.Fatalf("unable to open S3 backend: Secret ($AWS_SECRET_ACCESS_KEY) is empty")
}
if cfg.MaxRetries > 0 {
minio.MaxRetry = int(cfg.MaxRetries)
}
2023-09-18 18:09:32 +00:00
creds, err := getCredentials(cfg)
if err != nil {
2023-09-18 18:09:32 +00:00
return nil, errors.Wrap(err, "s3.getCredentials")
}
options := &minio.Options{
Creds: creds,
Secure: !cfg.UseHTTP,
Region: cfg.Region,
Transport: rt,
}
switch strings.ToLower(cfg.BucketLookup) {
case "", "auto":
options.BucketLookup = minio.BucketLookupAuto
case "dns":
options.BucketLookup = minio.BucketLookupDNS
case "path":
options.BucketLookup = minio.BucketLookupPath
default:
return nil, fmt.Errorf(`bad bucket-lookup style %q must be "auto", "path" or "dns"`, cfg.BucketLookup)
}
client, err := minio.New(cfg.Endpoint, options)
if err != nil {
return nil, errors.Wrap(err, "minio.New")
}
2017-06-07 19:59:01 +00:00
be := &Backend{
2017-06-15 14:41:09 +00:00
client: client,
cfg: cfg,
}
2017-02-10 18:24:54 +00:00
l, err := layout.ParseLayout(ctx, be, cfg.Layout, defaultLayout, cfg.Prefix)
if err != nil {
return nil, err
}
be.Layout = l
2017-06-17 20:15:58 +00:00
return be, nil
}
2023-09-18 18:09:32 +00:00
// getCredentials -- runs through the various credential types and returns the first one that works.
// additionally if the user has specified a role to assume, it will do that as well.
func getCredentials(cfg Config) (*credentials.Credentials, error) {
// Chains all credential types, in the following order:
// - Static credentials provided by user
// - AWS env vars (i.e. AWS_ACCESS_KEY_ID)
// - Minio env vars (i.e. MINIO_ACCESS_KEY)
// - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials)
// - Minio creds file (i.e. MINIO_SHARED_CREDENTIALS_FILE or ~/.mc/config.json)
// - IAM profile based credentials. (performs an HTTP
// call to a pre-defined endpoint, only valid inside
// configured ec2 instances)
creds := credentials.NewChainCredentials([]credentials.Provider{
&credentials.EnvAWS{},
2023-09-18 18:09:32 +00:00
&credentials.Static{
Value: credentials.Value{
AccessKeyID: cfg.KeyID,
SecretAccessKey: cfg.Secret.Unwrap(),
},
},
&credentials.EnvMinio{},
&credentials.FileAWSCredentials{},
&credentials.FileMinioClient{},
&credentials.IAM{
Client: &http.Client{
Transport: http.DefaultTransport,
},
},
})
c, err := creds.Get()
if err != nil {
return nil, errors.Wrap(err, "creds.Get")
}
if c.SignerType == credentials.SignatureAnonymous {
debug.Log("using anonymous access for %#v", cfg.Endpoint)
}
roleArn := os.Getenv("RESTIC_AWS_ASSUME_ROLE_ARN")
if roleArn != "" {
// use the region provided by the configuration by default
awsRegion := cfg.Region
// allow the region to be overridden if for some reason it is required
2024-01-06 20:44:53 +00:00
if os.Getenv("RESTIC_AWS_ASSUME_ROLE_REGION") != "" {
2023-09-18 18:09:32 +00:00
awsRegion = os.Getenv("RESTIC_AWS_ASSUME_ROLE_REGION")
}
sessionName := os.Getenv("RESTIC_AWS_ASSUME_ROLE_SESSION_NAME")
externalID := os.Getenv("RESTIC_AWS_ASSUME_ROLE_EXTERNAL_ID")
policy := os.Getenv("RESTIC_AWS_ASSUME_ROLE_POLICY")
stsEndpoint := os.Getenv("RESTIC_AWS_ASSUME_ROLE_STS_ENDPOINT")
if stsEndpoint == "" {
2024-01-06 20:44:53 +00:00
if awsRegion != "" {
2023-09-18 18:09:32 +00:00
if strings.HasPrefix(awsRegion, "cn-") {
stsEndpoint = "https://sts." + awsRegion + ".amazonaws.com.cn"
} else {
stsEndpoint = "https://sts." + awsRegion + ".amazonaws.com"
}
} else {
stsEndpoint = "https://sts.amazonaws.com"
}
}
opts := credentials.STSAssumeRoleOptions{
RoleARN: roleArn,
AccessKey: c.AccessKeyID,
SecretKey: c.SecretAccessKey,
SessionToken: c.SessionToken,
RoleSessionName: sessionName,
ExternalID: externalID,
Policy: policy,
2024-01-06 20:44:53 +00:00
Location: awsRegion,
2023-09-18 18:09:32 +00:00
}
creds, err = credentials.NewSTSAssumeRole(stsEndpoint, opts)
if err != nil {
return nil, errors.Wrap(err, "creds.AssumeRole")
}
}
return creds, nil
}
2017-06-17 20:15:58 +00:00
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) {
return open(ctx, cfg, rt)
2017-06-17 20:15:58 +00:00
}
// Create opens the S3 backend at bucket and region and creates the bucket if
// it does not exist yet.
func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) {
be, err := open(ctx, cfg, rt)
2017-07-17 08:33:19 +00:00
if err != nil {
return nil, errors.Wrap(err, "open")
}
found, err := be.client.BucketExists(ctx, cfg.Bucket)
if err != nil && isAccessDenied(err) {
err = nil
found = true
}
2016-08-21 14:14:58 +00:00
if err != nil {
debug.Log("BucketExists(%v) returned err %v", cfg.Bucket, err)
2016-08-29 19:54:50 +00:00
return nil, errors.Wrap(err, "client.BucketExists")
2016-08-21 14:14:58 +00:00
}
if !found {
// create new bucket with default ACL in default region
err = be.client.MakeBucket(ctx, cfg.Bucket, minio.MakeBucketOptions{})
if err != nil {
2016-08-29 19:54:50 +00:00
return nil, errors.Wrap(err, "client.MakeBucket")
}
}
return be, nil
}
// isAccessDenied returns true if the error is caused by Access Denied.
func isAccessDenied(err error) bool {
debug.Log("isAccessDenied(%T, %#v)", err, err)
var e minio.ErrorResponse
return errors.As(err, &e) && e.Code == "AccessDenied"
}
// IsNotExist returns true if the error is caused by a not existing file.
2017-06-07 19:59:01 +00:00
func (be *Backend) IsNotExist(err error) bool {
var e minio.ErrorResponse
return errors.As(err, &e) && e.Code == "NoSuchKey"
}
// Join combines path components with slashes.
2017-06-07 19:59:01 +00:00
func (be *Backend) Join(p ...string) string {
return path.Join(p...)
}
type fileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
isDir bool
}
func (fi *fileInfo) Name() string { return fi.name } // base name of the file
func (fi *fileInfo) Size() int64 { return fi.size } // length in bytes for regular files; system-dependent for others
func (fi *fileInfo) Mode() os.FileMode { return fi.mode } // file mode bits
func (fi *fileInfo) ModTime() time.Time { return fi.modTime } // modification time
func (fi *fileInfo) IsDir() bool { return fi.isDir } // abbreviation for Mode().IsDir()
func (fi *fileInfo) Sys() interface{} { return nil } // underlying data source (can return nil)
// ReadDir returns the entries for a directory.
func (be *Backend) ReadDir(ctx context.Context, dir string) (list []os.FileInfo, err error) {
debug.Log("ReadDir(%v)", dir)
// make sure dir ends with a slash
if dir[len(dir)-1] != '/' {
dir += "/"
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
debug.Log("using ListObjectsV1(%v)", be.cfg.ListObjectsV1)
for obj := range be.client.ListObjects(ctx, be.cfg.Bucket, minio.ListObjectsOptions{
Prefix: dir,
Recursive: false,
UseV1: be.cfg.ListObjectsV1,
}) {
2018-06-01 20:15:23 +00:00
if obj.Err != nil {
return nil, err
}
if obj.Key == "" {
continue
}
name := strings.TrimPrefix(obj.Key, dir)
// Sometimes s3 returns an entry for the dir itself. Ignore it.
if name == "" {
continue
}
entry := &fileInfo{
name: name,
size: obj.Size,
modTime: obj.LastModified,
}
if name[len(name)-1] == '/' {
entry.isDir = true
entry.mode = os.ModeDir | 0755
entry.name = name[:len(name)-1]
} else {
entry.mode = 0644
}
list = append(list, entry)
}
return list, nil
}
2021-08-07 20:20:49 +00:00
func (be *Backend) Connections() uint {
return be.cfg.Connections
}
// Location returns this backend's location (the bucket name).
2017-06-07 19:59:01 +00:00
func (be *Backend) Location() string {
2017-06-15 14:41:09 +00:00
return be.Join(be.cfg.Bucket, be.cfg.Prefix)
}
// Hasher may return a hash function for calculating a content hash for the backend
func (be *Backend) Hasher() hash.Hash {
return nil
}
// HasAtomicReplace returns whether Save() can atomically replace files
func (be *Backend) HasAtomicReplace() bool {
return true
}
// Path returns the path in the bucket that is used for this backend.
func (be *Backend) Path() string {
2017-06-15 14:41:09 +00:00
return be.cfg.Prefix
2015-05-10 15:20:58 +00:00
}
// useStorageClass returns whether file should be saved in the provided Storage Class
// For archive storage classes, only data files are stored using that class; metadata
// must remain instantly accessible.
func (be *Backend) useStorageClass(h backend.Handle) bool {
notArchiveClass := be.cfg.StorageClass != "GLACIER" && be.cfg.StorageClass != "DEEP_ARCHIVE"
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
return isDataFile || notArchiveClass
}
2016-01-24 00:15:35 +00:00
// Save stores data in the backend at the handle.
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
2017-04-11 20:04:18 +00:00
objName := be.Filename(h)
2017-04-17 17:18:47 +00:00
opts := minio.PutObjectOptions{
ContentType: "application/octet-stream",
// the only option with the high-level api is to let the library handle the checksum computation
SendContentMd5: true,
// only use multipart uploads for very large files
PartSize: 200 * 1024 * 1024,
}
if be.useStorageClass(h) {
opts.StorageClass = be.cfg.StorageClass
}
2017-12-08 20:52:50 +00:00
info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, io.NopCloser(rd), int64(rd.Length()), opts)
// sanity check
if err == nil && info.Size != rd.Length() {
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", info.Size, rd.Length())
}
2016-01-24 00:15:35 +00:00
2016-08-29 19:54:50 +00:00
return errors.Wrap(err, "client.PutObject")
2016-01-24 00:15:35 +00:00
}
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset.
func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
}
func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
2017-04-11 20:04:18 +00:00
objName := be.Filename(h)
2017-12-08 20:52:50 +00:00
opts := minio.GetObjectOptions{}
2017-01-22 21:01:12 +00:00
2017-12-08 20:52:50 +00:00
var err error
s3: Use low-level API with a Range header for Load benchmark old ns/op new ns/op delta BenchmarkBackendMinio/LoadFile-4 9213315 11001787 +19.41% BenchmarkBackendMinio/LoadPartialFile-4 4176619 3479707 -16.69% BenchmarkBackendMinio/LoadPartialFileOffset-4 4391521 3139214 -28.52% BenchmarkBackendS3/LoadFile-4 2886070905 2505907501 -13.17% BenchmarkBackendS3/LoadPartialFile-4 762702722 735694398 -3.54% BenchmarkBackendS3/LoadPartialFileOffset-4 789724328 1108989142 +40.43% benchmark old MB/s new MB/s speedup BenchmarkBackendMinio/LoadFile-4 1821.21 1525.15 0.84x BenchmarkBackendMinio/LoadPartialFile-4 1004.49 1205.67 1.20x BenchmarkBackendMinio/LoadPartialFileOffset-4 955.34 1336.45 1.40x BenchmarkBackendS3/LoadFile-4 5.81 6.70 1.15x BenchmarkBackendS3/LoadPartialFile-4 5.50 5.70 1.04x BenchmarkBackendS3/LoadPartialFileOffset-4 5.31 3.78 0.71x benchmark old allocs new allocs delta BenchmarkBackendMinio/LoadFile-4 406 204 -49.75% BenchmarkBackendMinio/LoadPartialFile-4 225 206 -8.44% BenchmarkBackendMinio/LoadPartialFileOffset-4 227 207 -8.81% BenchmarkBackendS3/LoadFile-4 600 388 -35.33% BenchmarkBackendS3/LoadPartialFile-4 416 302 -27.40% BenchmarkBackendS3/LoadPartialFileOffset-4 417 303 -27.34% benchmark old bytes new bytes delta BenchmarkBackendMinio/LoadFile-4 29475 13904 -52.83% BenchmarkBackendMinio/LoadPartialFile-4 4218838 13958 -99.67% BenchmarkBackendMinio/LoadPartialFileOffset-4 4219175 14332 -99.66% BenchmarkBackendS3/LoadFile-4 114152 97424 -14.65% BenchmarkBackendS3/LoadPartialFile-4 4265416 56212 -98.68% BenchmarkBackendS3/LoadPartialFileOffset-4 4266520 56308 -98.68%
2017-05-13 19:18:14 +00:00
if length > 0 {
2017-12-08 20:52:50 +00:00
err = opts.SetRange(offset, offset+int64(length)-1)
} else if offset > 0 {
err = opts.SetRange(offset, 0)
2017-01-22 21:01:12 +00:00
}
2017-12-08 20:52:50 +00:00
if err != nil {
return nil, errors.Wrap(err, "SetRange")
}
2017-01-22 21:01:12 +00:00
2017-06-05 22:17:39 +00:00
coreClient := minio.Core{Client: be.client}
rd, _, _, err := coreClient.GetObject(ctx, be.cfg.Bucket, objName, opts)
2017-05-13 22:09:59 +00:00
if err != nil {
return nil, err
}
2017-01-22 21:01:12 +00:00
return rd, err
2017-01-22 21:01:12 +00:00
}
2016-01-23 22:27:58 +00:00
// Stat returns information about a blob.
func (be *Backend) Stat(ctx context.Context, h backend.Handle) (bi backend.FileInfo, err error) {
2017-04-11 20:04:18 +00:00
objName := be.Filename(h)
var obj *minio.Object
2017-12-08 20:52:50 +00:00
opts := minio.GetObjectOptions{}
obj, err = be.client.GetObject(ctx, be.cfg.Bucket, objName, opts)
2016-01-23 22:27:58 +00:00
if err != nil {
return backend.FileInfo{}, errors.Wrap(err, "client.GetObject")
2016-01-23 22:27:58 +00:00
}
// make sure that the object is closed properly.
defer func() {
e := obj.Close()
if err == nil {
2016-08-29 19:54:50 +00:00
err = errors.Wrap(e, "Close")
}
}()
2016-01-23 22:27:58 +00:00
fi, err := obj.Stat()
if err != nil {
return backend.FileInfo{}, errors.Wrap(err, "Stat")
2016-01-23 22:27:58 +00:00
}
return backend.FileInfo{Size: fi.Size, Name: h.Name}, nil
2016-01-23 22:27:58 +00:00
}
2015-05-10 15:20:58 +00:00
// Remove removes the blob with the given name and type.
func (be *Backend) Remove(ctx context.Context, h backend.Handle) error {
2017-04-11 20:04:18 +00:00
objName := be.Filename(h)
err := be.client.RemoveObject(ctx, be.cfg.Bucket, objName, minio.RemoveObjectOptions{})
if be.IsNotExist(err) {
err = nil
}
2016-08-29 19:54:50 +00:00
return errors.Wrap(err, "client.RemoveObject")
2015-05-10 15:20:58 +00:00
}
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
prefix, recursive := be.Basedir(t)
2015-05-10 15:20:58 +00:00
// make sure prefix ends with a slash
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
debug.Log("using ListObjectsV1(%v)", be.cfg.ListObjectsV1)
// NB: unfortunately we can't protect this with be.sem.GetToken() here.
// Doing so would enable a deadlock situation (gh-1399), as ListObjects()
// starts its own goroutine and returns results via a channel.
listresp := be.client.ListObjects(ctx, be.cfg.Bucket, minio.ListObjectsOptions{
Prefix: prefix,
Recursive: recursive,
UseV1: be.cfg.ListObjectsV1,
})
for obj := range listresp {
2018-06-01 20:15:23 +00:00
if obj.Err != nil {
return obj.Err
}
m := strings.TrimPrefix(obj.Key, prefix)
if m == "" {
continue
2015-05-10 15:20:58 +00:00
}
fi := backend.FileInfo{
Name: path.Base(m),
Size: obj.Size,
}
2015-05-10 15:20:58 +00:00
if ctx.Err() != nil {
return ctx.Err()
}
err := fn(fi)
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
}
return ctx.Err()
}
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
2017-06-07 19:59:01 +00:00
func (be *Backend) Delete(ctx context.Context) error {
return util.DefaultDelete(ctx, be)
2015-05-10 15:20:58 +00:00
}
// Close does nothing
2017-06-07 19:59:01 +00:00
func (be *Backend) Close() error { return nil }
// Rename moves a file based on the new layout l.
func (be *Backend) Rename(ctx context.Context, h backend.Handle, l layout.Layout) error {
debug.Log("Rename %v to %v", h, l)
oldname := be.Filename(h)
newname := l.Filename(h)
if oldname == newname {
debug.Log(" %v is already renamed", newname)
return nil
}
debug.Log(" %v -> %v", oldname, newname)
src := minio.CopySrcOptions{
Bucket: be.cfg.Bucket,
Object: oldname,
}
2017-07-17 18:43:45 +00:00
dst := minio.CopyDestOptions{
Bucket: be.cfg.Bucket,
Object: newname,
2017-07-17 18:43:45 +00:00
}
_, err := be.client.CopyObject(ctx, dst, src)
if err != nil && be.IsNotExist(err) {
debug.Log("copy failed: %v, seems to already have been renamed", err)
return nil
}
if err != nil {
debug.Log("copy failed: %v", err)
return err
}
return be.client.RemoveObject(ctx, be.cfg.Bucket, oldname, minio.RemoveObjectOptions{})
}