2017-09-24 17:10:56 +00:00
|
|
|
// Package gs provides a restic backend for Google Cloud Storage.
|
2017-07-08 13:34:23 +00:00
|
|
|
package gs
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-12-19 11:49:58 +00:00
|
|
|
"crypto/md5"
|
2020-12-19 11:39:48 +00:00
|
|
|
"hash"
|
2017-07-08 13:34:23 +00:00
|
|
|
"io"
|
2018-01-27 19:12:34 +00:00
|
|
|
"net/http"
|
2017-07-08 13:34:23 +00:00
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"strings"
|
|
|
|
|
2020-10-03 09:03:41 +00:00
|
|
|
"cloud.google.com/go/storage"
|
2017-07-08 13:34:23 +00:00
|
|
|
"github.com/pkg/errors"
|
2023-10-01 09:40:12 +00:00
|
|
|
"github.com/restic/restic/internal/backend"
|
2022-10-15 14:23:39 +00:00
|
|
|
"github.com/restic/restic/internal/backend/layout"
|
2023-06-08 11:04:34 +00:00
|
|
|
"github.com/restic/restic/internal/backend/location"
|
2023-10-01 08:24:33 +00:00
|
|
|
"github.com/restic/restic/internal/backend/util"
|
2017-07-08 13:34:23 +00:00
|
|
|
"github.com/restic/restic/internal/debug"
|
|
|
|
|
2018-11-25 17:51:48 +00:00
|
|
|
"golang.org/x/oauth2"
|
2017-07-08 13:34:23 +00:00
|
|
|
"golang.org/x/oauth2/google"
|
|
|
|
"google.golang.org/api/googleapi"
|
2020-10-03 09:03:41 +00:00
|
|
|
"google.golang.org/api/iterator"
|
2020-10-01 07:44:39 +00:00
|
|
|
"google.golang.org/api/option"
|
2017-07-08 13:34:23 +00:00
|
|
|
)
|
|
|
|
|
2017-09-24 18:25:57 +00:00
|
|
|
// Backend stores data in a GCS bucket.
|
|
|
|
//
|
|
|
|
// The service account used to access the bucket must have these permissions:
|
2022-08-19 17:12:26 +00:00
|
|
|
// - storage.objects.create
|
|
|
|
// - storage.objects.delete
|
|
|
|
// - storage.objects.get
|
|
|
|
// - storage.objects.list
|
2017-07-08 13:34:23 +00:00
|
|
|
type Backend struct {
|
2020-10-03 09:03:41 +00:00
|
|
|
gcsClient *storage.Client
|
2017-09-18 10:01:54 +00:00
|
|
|
projectID string
|
2021-08-07 20:20:49 +00:00
|
|
|
connections uint
|
2017-09-18 10:01:54 +00:00
|
|
|
bucketName string
|
2023-02-27 06:53:25 +00:00
|
|
|
region string
|
2020-10-03 09:03:41 +00:00
|
|
|
bucket *storage.BucketHandle
|
2017-09-18 10:01:54 +00:00
|
|
|
prefix string
|
|
|
|
listMaxItems int
|
2022-10-15 14:23:39 +00:00
|
|
|
layout.Layout
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
// Ensure that *Backend implements backend.Backend.
|
|
|
|
var _ backend.Backend = &Backend{}
|
2017-07-08 13:34:23 +00:00
|
|
|
|
2023-06-08 11:04:34 +00:00
|
|
|
func NewFactory() location.Factory {
|
2023-06-08 15:32:43 +00:00
|
|
|
return location.NewHTTPBackendFactory("gs", ParseConfig, location.NoPassword, Create, Open)
|
2023-06-08 11:04:34 +00:00
|
|
|
}
|
|
|
|
|
2020-10-03 09:03:41 +00:00
|
|
|
func getStorageClient(rt http.RoundTripper) (*storage.Client, error) {
|
2018-11-25 17:51:48 +00:00
|
|
|
// create a new HTTP client
|
|
|
|
httpClient := &http.Client{
|
|
|
|
Transport: rt,
|
|
|
|
}
|
|
|
|
|
2020-07-21 17:24:30 +00:00
|
|
|
// create a new context with the HTTP client stored at the oauth2.HTTPClient key
|
2018-11-25 17:51:48 +00:00
|
|
|
ctx := context.WithValue(context.Background(), oauth2.HTTPClient, httpClient)
|
|
|
|
|
2020-07-21 17:24:30 +00:00
|
|
|
var ts oauth2.TokenSource
|
|
|
|
if token := os.Getenv("GOOGLE_ACCESS_TOKEN"); token != "" {
|
|
|
|
ts = oauth2.StaticTokenSource(&oauth2.Token{
|
|
|
|
AccessToken: token,
|
|
|
|
TokenType: "Bearer",
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
var err error
|
2020-10-03 09:03:41 +00:00
|
|
|
ts, err = google.DefaultTokenSource(ctx, storage.ScopeReadWrite)
|
2020-07-21 17:24:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
2020-10-03 09:03:41 +00:00
|
|
|
oauthClient := oauth2.NewClient(ctx, ts)
|
2020-07-21 17:24:30 +00:00
|
|
|
|
2020-10-03 09:03:41 +00:00
|
|
|
gcsClient, err := storage.NewClient(ctx, option.WithHTTPClient(oauthClient))
|
2017-07-08 13:34:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-10-03 09:03:41 +00:00
|
|
|
return gcsClient, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (be *Backend) bucketExists(ctx context.Context, bucket *storage.BucketHandle) (bool, error) {
|
|
|
|
_, err := bucket.Attrs(ctx)
|
|
|
|
if err == storage.ErrBucketNotExist {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
return err == nil, err
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
const defaultListMaxItems = 1000
|
|
|
|
|
2018-01-27 19:12:34 +00:00
|
|
|
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
2017-07-08 13:34:23 +00:00
|
|
|
debug.Log("open, config %#v", cfg)
|
|
|
|
|
2020-10-03 09:03:41 +00:00
|
|
|
gcsClient, err := getStorageClient(rt)
|
2017-07-08 13:34:23 +00:00
|
|
|
if err != nil {
|
2020-10-03 09:03:41 +00:00
|
|
|
return nil, errors.Wrap(err, "getStorageClient")
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
be := &Backend{
|
2021-08-07 20:20:49 +00:00
|
|
|
gcsClient: gcsClient,
|
|
|
|
projectID: cfg.ProjectID,
|
|
|
|
connections: cfg.Connections,
|
|
|
|
bucketName: cfg.Bucket,
|
2023-02-27 06:53:25 +00:00
|
|
|
region: cfg.Region,
|
2021-08-07 20:20:49 +00:00
|
|
|
bucket: gcsClient.Bucket(cfg.Bucket),
|
|
|
|
prefix: cfg.Prefix,
|
2022-10-15 14:23:39 +00:00
|
|
|
Layout: &layout.DefaultLayout{
|
2017-07-08 13:34:23 +00:00
|
|
|
Path: cfg.Prefix,
|
|
|
|
Join: path.Join,
|
|
|
|
},
|
2017-09-18 10:01:54 +00:00
|
|
|
listMaxItems: defaultListMaxItems,
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return be, nil
|
|
|
|
}
|
|
|
|
|
2017-09-24 17:10:56 +00:00
|
|
|
// Open opens the gs backend at the specified bucket.
|
2023-10-01 09:40:12 +00:00
|
|
|
func Open(_ context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) {
|
2018-01-27 19:12:34 +00:00
|
|
|
return open(cfg, rt)
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
2017-09-26 04:53:21 +00:00
|
|
|
// Create opens the gs backend at the specified bucket and attempts to creates
|
|
|
|
// the bucket if it does not exist yet.
|
2017-09-24 18:25:57 +00:00
|
|
|
//
|
2017-09-26 04:53:21 +00:00
|
|
|
// The service account must have the "storage.buckets.create" permission to
|
|
|
|
// create a bucket the does not yet exist.
|
2023-10-01 09:40:12 +00:00
|
|
|
func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) {
|
2018-01-27 19:12:34 +00:00
|
|
|
be, err := open(cfg, rt)
|
2017-07-08 13:34:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "open")
|
|
|
|
}
|
|
|
|
|
2017-09-26 04:53:21 +00:00
|
|
|
// Try to determine if the bucket exists. If it does not, try to create it.
|
2020-10-03 09:03:41 +00:00
|
|
|
exists, err := be.bucketExists(ctx, be.bucket)
|
|
|
|
if err != nil {
|
2020-11-18 00:44:26 +00:00
|
|
|
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusForbidden {
|
|
|
|
// the bucket might exist!
|
|
|
|
// however, the client doesn't have storage.bucket.get permission
|
|
|
|
return be, nil
|
|
|
|
}
|
2020-10-03 09:03:41 +00:00
|
|
|
return nil, errors.Wrap(err, "service.Buckets.Get")
|
|
|
|
}
|
2017-07-08 13:34:23 +00:00
|
|
|
|
2020-10-03 09:03:41 +00:00
|
|
|
if !exists {
|
2023-02-27 06:53:25 +00:00
|
|
|
bucketAttrs := &storage.BucketAttrs{
|
|
|
|
Location: cfg.Region,
|
|
|
|
}
|
2020-10-03 09:03:41 +00:00
|
|
|
// Bucket doesn't exist, try to create it.
|
2023-02-27 06:53:25 +00:00
|
|
|
if err := be.bucket.Create(ctx, be.projectID, bucketAttrs); err != nil {
|
2020-10-03 09:03:41 +00:00
|
|
|
// Always an error, as the bucket definitely doesn't exist.
|
|
|
|
return nil, errors.Wrap(err, "service.Buckets.Insert")
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
2020-10-03 09:03:41 +00:00
|
|
|
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return be, nil
|
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
// SetListMaxItems sets the number of list items to load per request.
|
|
|
|
func (be *Backend) SetListMaxItems(i int) {
|
|
|
|
be.listMaxItems = i
|
|
|
|
}
|
|
|
|
|
2017-07-08 13:34:23 +00:00
|
|
|
// IsNotExist returns true if the error is caused by a not existing file.
|
|
|
|
func (be *Backend) IsNotExist(err error) bool {
|
2022-12-03 17:15:41 +00:00
|
|
|
return errors.Is(err, storage.ErrObjectNotExist)
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Join combines path components with slashes.
|
|
|
|
func (be *Backend) Join(p ...string) string {
|
|
|
|
return path.Join(p...)
|
|
|
|
}
|
|
|
|
|
2021-08-07 20:20:49 +00:00
|
|
|
func (be *Backend) Connections() uint {
|
|
|
|
return be.connections
|
|
|
|
}
|
|
|
|
|
2017-07-08 13:34:23 +00:00
|
|
|
// Location returns this backend's location (the bucket name).
|
|
|
|
func (be *Backend) Location() string {
|
|
|
|
return be.Join(be.bucketName, be.prefix)
|
|
|
|
}
|
|
|
|
|
2020-12-19 11:39:48 +00:00
|
|
|
// Hasher may return a hash function for calculating a content hash for the backend
|
|
|
|
func (be *Backend) Hasher() hash.Hash {
|
2020-12-19 11:49:58 +00:00
|
|
|
return md5.New()
|
2020-12-19 11:39:48 +00:00
|
|
|
}
|
|
|
|
|
2022-05-01 18:07:29 +00:00
|
|
|
// HasAtomicReplace returns whether Save() can atomically replace files
|
|
|
|
func (be *Backend) HasAtomicReplace() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-07-08 13:34:23 +00:00
|
|
|
// Path returns the path in the bucket that is used for this backend.
|
|
|
|
func (be *Backend) Path() string {
|
|
|
|
return be.prefix
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save stores data in the backend at the handle.
|
2023-10-01 09:40:12 +00:00
|
|
|
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
|
2017-07-08 13:34:23 +00:00
|
|
|
objName := be.Filename(h)
|
|
|
|
|
gs: disable resumable uploads
By default, the GCS Go packages have an internal "chunk size" of 8MB,
used for blob uploads.
Media().Do() will buffer a full 8MB from the io.Reader (or less if EOF
is reached) then write that full 8MB to the network all at once.
This behavior does not play nicely with --limit-upload, which only
limits the Reader passed to Media. While the long-term average upload
rate will be correctly limited, the actual network bandwidth will be
very spikey.
e.g., if an 8MB/s connection is limited to 1MB/s, Media().Do() will
spend 8s reading from the rate-limited reader (performing no network
requests), then 1s writing to the network at 8MB/s.
This is bad for network connections hurt by full-speed uploads,
particularly when writing 8MB will take several seconds.
Disable resumable uploads entirely by setting the chunk size to zero.
This causes the io.Reader to be passed further down the request stack,
where there is less (but still some) buffering.
My connection is around 1.5MB/s up, with nominal ~15ms ping times to
8.8.8.8.
Without this change, --limit-upload 1024 results in several seconds of
~200ms ping times (uploading), followed by several seconds of ~15ms ping
times (reading from rate-limited reader). A bandwidth monitor reports
this as several seconds of ~1.5MB/s followed by several seconds of
0.0MB/s.
With this change, --limit-upload 1024 results in ~20ms ping times and
the bandwidth monitor reports a constant ~1MB/s.
I've elected to make this change unconditional of --limit-upload because
the resumable uploads shouldn't be providing much benefit anyways, as
restic already uploads mostly small blobs and already has a retry
mechanism.
--limit-download is not affected by this problem, as Get().Download()
returns the real http.Response.Body without any internal buffering.
Updates #1216
2017-10-18 04:04:35 +00:00
|
|
|
// Set chunk size to zero to disable resumable uploads.
|
|
|
|
//
|
|
|
|
// With a non-zero chunk size (the default is
|
|
|
|
// googleapi.DefaultUploadChunkSize, 8MB), Insert will buffer data from
|
|
|
|
// rd in chunks of this size so it can upload these chunks in
|
|
|
|
// individual requests.
|
|
|
|
//
|
|
|
|
// This chunking allows the library to automatically handle network
|
|
|
|
// interruptions and re-upload only the last chunk rather than the full
|
|
|
|
// file.
|
|
|
|
//
|
|
|
|
// Unfortunately, this buffering doesn't play nicely with
|
|
|
|
// --limit-upload, which applies a rate limit to rd. This rate limit
|
|
|
|
// ends up only limiting the read from rd into the buffer rather than
|
|
|
|
// the network traffic itself. This results in poor network rate limit
|
|
|
|
// behavior, where individual chunks are written to the network at full
|
|
|
|
// bandwidth for several seconds, followed by several seconds of no
|
|
|
|
// network traffic as the next chunk is read through the rate limiter.
|
|
|
|
//
|
|
|
|
// By disabling chunking, rd is passed further down the request stack,
|
|
|
|
// where there is less (but some) buffering, which ultimately results
|
|
|
|
// in better rate limiting behavior.
|
|
|
|
//
|
|
|
|
// restic typically writes small blobs (4MB-30MB), so the resumable
|
|
|
|
// uploads are not providing significant benefit anyways.
|
2020-10-03 09:03:41 +00:00
|
|
|
w := be.bucket.Object(objName).NewWriter(ctx)
|
|
|
|
w.ChunkSize = 0
|
2020-12-19 11:49:58 +00:00
|
|
|
w.MD5 = rd.Hash()
|
2020-10-03 09:03:41 +00:00
|
|
|
wbytes, err := io.Copy(w, rd)
|
2021-01-29 23:39:32 +00:00
|
|
|
cerr := w.Close()
|
|
|
|
if err == nil {
|
|
|
|
err = cerr
|
|
|
|
}
|
2017-07-08 13:34:23 +00:00
|
|
|
|
gs: fix nil dereference
info can be nil if err != nil, resulting in a nil dereference while
logging:
$ # GCS config
$ ./restic init
debug enabled
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x935947]
goroutine 1 [running]:
github.com/restic/restic/internal/backend/gs.(*Backend).Save(0xc420012690, 0xe84e80, 0xc420010448, 0xb57149, 0x3, 0xc4203fc140, 0x40, 0xe7be40, 0xc4201d8f90, 0xa0, ...)
src/github.com/restic/restic/internal/backend/gs/gs.go:226 +0x6d7
github.com/restic/restic/internal/repository.AddKey(0xe84e80, 0xc420010448, 0xc4202f0360, 0xc42000a1b0, 0x4, 0x0, 0xa55b60, 0xc4203043e0, 0xa55420)
src/github.com/restic/restic/internal/repository/key.go:235 +0x4a1
github.com/restic/restic/internal/repository.createMasterKey(0xc4202f0360, 0xc42000a1b0, 0x4, 0xa55420, 0xc420304370, 0x6a6070)
src/github.com/restic/restic/internal/repository/key.go:62 +0x60
github.com/restic/restic/internal/repository.(*Repository).init(0xc4202f0360, 0xe84e80, 0xc420010448, 0xc42000a1b0, 0x4, 0x1, 0xc42030a440, 0x40, 0x32a4573d3d9eb5, 0x0, ...)
src/github.com/restic/restic/internal/repository/repository.go:403 +0x5d
github.com/restic/restic/internal/repository.(*Repository).Init(0xc4202f0360, 0xe84e80, 0xc420010448, 0xc42000a1b0, 0x4, 0xe84e40, 0xc42004ad80)
src/github.com/restic/restic/internal/repository/repository.go:397 +0x12c
main.runInit(0xc420018072, 0x16, 0x0, 0x0, 0x0, 0xe84e40, 0xc42004ad80, 0xc42000a1b0, 0x4, 0xe7dac0, ...)
src/github.com/restic/restic/cmd/restic/cmd_init.go:47 +0x2a4
main.glob..func9(0xeb5000, 0xedad70, 0x0, 0x0, 0x0, 0x0)
src/github.com/restic/restic/cmd/restic/cmd_init.go:20 +0x8e
github.com/restic/restic/vendor/github.com/spf13/cobra.(*Command).execute(0xeb5000, 0xedad70, 0x0, 0x0, 0xeb5000, 0xedad70)
src/github.com/restic/restic/vendor/github.com/spf13/cobra/command.go:649 +0x457
github.com/restic/restic/vendor/github.com/spf13/cobra.(*Command).ExecuteC(0xeb3e00, 0xc420011650, 0xa55b60, 0xc420011660)
src/github.com/restic/restic/vendor/github.com/spf13/cobra/command.go:728 +0x339
github.com/restic/restic/vendor/github.com/spf13/cobra.(*Command).Execute(0xeb3e00, 0x25, 0xc4201a7eb8)
src/github.com/restic/restic/vendor/github.com/spf13/cobra/command.go:687 +0x2b
main.main()
src/github.com/restic/restic/cmd/restic/main.go:72 +0x268
(The error was likely because I had just enabled the GCS API. Subsequent
runs were fine.)
2017-08-28 04:28:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "service.Objects.Insert")
|
|
|
|
}
|
|
|
|
|
2020-12-18 22:41:29 +00:00
|
|
|
// sanity check
|
|
|
|
if wbytes != rd.Length() {
|
|
|
|
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", wbytes, rd.Length())
|
|
|
|
}
|
gs: fix nil dereference
info can be nil if err != nil, resulting in a nil dereference while
logging:
$ # GCS config
$ ./restic init
debug enabled
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x935947]
goroutine 1 [running]:
github.com/restic/restic/internal/backend/gs.(*Backend).Save(0xc420012690, 0xe84e80, 0xc420010448, 0xb57149, 0x3, 0xc4203fc140, 0x40, 0xe7be40, 0xc4201d8f90, 0xa0, ...)
src/github.com/restic/restic/internal/backend/gs/gs.go:226 +0x6d7
github.com/restic/restic/internal/repository.AddKey(0xe84e80, 0xc420010448, 0xc4202f0360, 0xc42000a1b0, 0x4, 0x0, 0xa55b60, 0xc4203043e0, 0xa55420)
src/github.com/restic/restic/internal/repository/key.go:235 +0x4a1
github.com/restic/restic/internal/repository.createMasterKey(0xc4202f0360, 0xc42000a1b0, 0x4, 0xa55420, 0xc420304370, 0x6a6070)
src/github.com/restic/restic/internal/repository/key.go:62 +0x60
github.com/restic/restic/internal/repository.(*Repository).init(0xc4202f0360, 0xe84e80, 0xc420010448, 0xc42000a1b0, 0x4, 0x1, 0xc42030a440, 0x40, 0x32a4573d3d9eb5, 0x0, ...)
src/github.com/restic/restic/internal/repository/repository.go:403 +0x5d
github.com/restic/restic/internal/repository.(*Repository).Init(0xc4202f0360, 0xe84e80, 0xc420010448, 0xc42000a1b0, 0x4, 0xe84e40, 0xc42004ad80)
src/github.com/restic/restic/internal/repository/repository.go:397 +0x12c
main.runInit(0xc420018072, 0x16, 0x0, 0x0, 0x0, 0xe84e40, 0xc42004ad80, 0xc42000a1b0, 0x4, 0xe7dac0, ...)
src/github.com/restic/restic/cmd/restic/cmd_init.go:47 +0x2a4
main.glob..func9(0xeb5000, 0xedad70, 0x0, 0x0, 0x0, 0x0)
src/github.com/restic/restic/cmd/restic/cmd_init.go:20 +0x8e
github.com/restic/restic/vendor/github.com/spf13/cobra.(*Command).execute(0xeb5000, 0xedad70, 0x0, 0x0, 0xeb5000, 0xedad70)
src/github.com/restic/restic/vendor/github.com/spf13/cobra/command.go:649 +0x457
github.com/restic/restic/vendor/github.com/spf13/cobra.(*Command).ExecuteC(0xeb3e00, 0xc420011650, 0xa55b60, 0xc420011660)
src/github.com/restic/restic/vendor/github.com/spf13/cobra/command.go:728 +0x339
github.com/restic/restic/vendor/github.com/spf13/cobra.(*Command).Execute(0xeb3e00, 0x25, 0xc4201a7eb8)
src/github.com/restic/restic/vendor/github.com/spf13/cobra/command.go:687 +0x2b
main.main()
src/github.com/restic/restic/cmd/restic/main.go:72 +0x268
(The error was likely because I had just enabled the GCS API. Subsequent
runs were fine.)
2017-08-28 04:28:39 +00:00
|
|
|
return nil
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
2018-01-17 04:59:16 +00:00
|
|
|
// Load runs fn with a reader that yields the contents of the file at h at the
|
|
|
|
// given offset.
|
2023-10-01 09:40:12 +00:00
|
|
|
func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
2023-04-07 21:02:35 +00:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
2023-10-01 08:24:33 +00:00
|
|
|
return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
|
2018-01-17 04:59:16 +00:00
|
|
|
}
|
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
|
2020-10-03 09:03:41 +00:00
|
|
|
if length == 0 {
|
|
|
|
// negative length indicates read till end to GCS lib
|
|
|
|
length = -1
|
|
|
|
}
|
2017-07-08 13:34:23 +00:00
|
|
|
|
|
|
|
objName := be.Filename(h)
|
|
|
|
|
2020-10-03 09:03:41 +00:00
|
|
|
r, err := be.bucket.Object(objName).NewRangeReader(ctx, offset, int64(length))
|
2017-07-08 13:34:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-04-07 21:02:35 +00:00
|
|
|
return r, err
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stat returns information about a blob.
|
2023-10-01 09:40:12 +00:00
|
|
|
func (be *Backend) Stat(ctx context.Context, h backend.Handle) (bi backend.FileInfo, err error) {
|
2017-07-08 13:34:23 +00:00
|
|
|
objName := be.Filename(h)
|
|
|
|
|
2020-10-03 09:03:41 +00:00
|
|
|
attr, err := be.bucket.Object(objName).Attrs(ctx)
|
2017-10-31 12:01:43 +00:00
|
|
|
|
2017-07-08 13:34:23 +00:00
|
|
|
if err != nil {
|
2023-10-01 09:40:12 +00:00
|
|
|
return backend.FileInfo{}, errors.Wrap(err, "service.Objects.Get")
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
return backend.FileInfo{Size: attr.Size, Name: h.Name}, nil
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove removes the blob with the given name and type.
|
2023-10-01 09:40:12 +00:00
|
|
|
func (be *Backend) Remove(ctx context.Context, h backend.Handle) error {
|
2017-07-08 13:34:23 +00:00
|
|
|
objName := be.Filename(h)
|
|
|
|
|
2020-10-03 09:03:41 +00:00
|
|
|
err := be.bucket.Object(objName).Delete(ctx)
|
2017-10-31 12:01:43 +00:00
|
|
|
|
2023-04-07 21:16:08 +00:00
|
|
|
if be.IsNotExist(err) {
|
2020-10-03 09:03:41 +00:00
|
|
|
err = nil
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return errors.Wrap(err, "client.RemoveObject")
|
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
// List runs fn for each file in the backend which has the type t. When an
|
|
|
|
// error occurs (or fn returns an error), List stops and returns it.
|
2023-10-01 09:40:12 +00:00
|
|
|
func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
|
2017-12-14 18:13:01 +00:00
|
|
|
prefix, _ := be.Basedir(t)
|
2017-07-08 13:34:23 +00:00
|
|
|
|
|
|
|
// make sure prefix ends with a slash
|
2018-01-20 18:34:38 +00:00
|
|
|
if !strings.HasSuffix(prefix, "/") {
|
2017-07-08 13:34:23 +00:00
|
|
|
prefix += "/"
|
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
2017-07-08 13:34:23 +00:00
|
|
|
|
2020-10-03 09:03:41 +00:00
|
|
|
itr := be.bucket.Objects(ctx, &storage.Query{Prefix: prefix})
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
for {
|
2020-10-03 09:03:41 +00:00
|
|
|
attrs, err := itr.Next()
|
|
|
|
if err == iterator.Done {
|
|
|
|
break
|
|
|
|
}
|
2018-01-20 18:34:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-10-03 09:03:41 +00:00
|
|
|
m := strings.TrimPrefix(attrs.Name, prefix)
|
|
|
|
if m == "" {
|
|
|
|
continue
|
|
|
|
}
|
2018-01-20 18:34:38 +00:00
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
fi := backend.FileInfo{
|
2020-10-03 09:03:41 +00:00
|
|
|
Name: path.Base(m),
|
|
|
|
Size: int64(attrs.Size),
|
|
|
|
}
|
2017-07-08 13:34:23 +00:00
|
|
|
|
2020-10-03 09:03:41 +00:00
|
|
|
err = fn(fi)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
2020-10-03 09:03:41 +00:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
2018-01-20 18:34:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx.Err()
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
|
|
|
|
func (be *Backend) Delete(ctx context.Context) error {
|
2023-10-01 08:24:33 +00:00
|
|
|
return util.DefaultDelete(ctx, be)
|
2017-07-08 13:34:23 +00:00
|
|
|
}
|
|
|
|
|
2017-09-24 17:10:56 +00:00
|
|
|
// Close does nothing.
|
2017-07-08 13:34:23 +00:00
|
|
|
func (be *Backend) Close() error { return nil }
|