2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-30 13:40:36 +00:00
restic/internal/backend/gs/gs.go

459 lines
11 KiB
Go
Raw Normal View History

// Package gs provides a restic backend for Google Cloud Storage.
package gs
import (
"context"
"fmt"
"io"
"os"
"path"
"strings"
"github.com/pkg/errors"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
"io/ioutil"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
storage "google.golang.org/api/storage/v1"
)
// Backend stores data in a GCS bucket.
//
// The service account used to access the bucket must have these permissions:
// * storage.objects.create
// * storage.objects.delete
// * storage.objects.get
// * storage.objects.list
type Backend struct {
service *storage.Service
projectID string
sem *backend.Semaphore
bucketName string
prefix string
listMaxItems int
backend.Layout
}
// Ensure that *Backend implements restic.Backend.
var _ restic.Backend = &Backend{}
func getStorageService(jsonKeyPath string) (*storage.Service, error) {
raw, err := ioutil.ReadFile(jsonKeyPath)
if err != nil {
return nil, errors.Wrap(err, "ReadFile")
}
conf, err := google.JWTConfigFromJSON(raw, storage.DevstorageReadWriteScope)
if err != nil {
return nil, err
}
client := conf.Client(context.TODO())
service, err := storage.New(client)
if err != nil {
return nil, err
}
return service, nil
}
const defaultListMaxItems = 1000
func open(cfg Config) (*Backend, error) {
debug.Log("open, config %#v", cfg)
service, err := getStorageService(cfg.JSONKeyPath)
if err != nil {
return nil, errors.Wrap(err, "getStorageService")
}
sem, err := backend.NewSemaphore(cfg.Connections)
if err != nil {
return nil, err
}
be := &Backend{
service: service,
projectID: cfg.ProjectID,
sem: sem,
bucketName: cfg.Bucket,
prefix: cfg.Prefix,
Layout: &backend.DefaultLayout{
Path: cfg.Prefix,
Join: path.Join,
},
listMaxItems: defaultListMaxItems,
}
return be, nil
}
// Open opens the gs backend at the specified bucket.
func Open(cfg Config) (restic.Backend, error) {
return open(cfg)
}
gs: allow backend creation without storage.buckets.get If the service account used with restic does not have the storage.buckets.get permission (in the "Storage Admin" role), Create cannot use Get to determine if the bucket is accessible. Rather than always trying to create the bucket on Get error, gracefully fall back to assuming the bucket is accessible. If it is, restic init will complete successfully. If it is not, it will fail on a later call. Here is what init looks like now in different cases. Service account without "Storage Admin": Bucket exists and is accessible (this is the case that didn't work before): $ ./restic init -r gs:this-bucket-does-exist:/ enter password for new backend: enter password again: created restic backend c02e2edb67 at gs:this-bucket-does-exist:/ Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. Bucket exists but is not accessible: $ ./restic init -r gs:this-bucket-does-exist:/ enter password for new backend: enter password again: create key in backend at gs:this-bucket-does-exist:/ failed: service.Objects.Insert: googleapi: Error 403: my-service-account@myproject.iam.gserviceaccount.com does not have storage.objects.create access to object this-bucket-exists/keys/0fa714e695c8ecd58cb467cdeb04d36f3b710f883496a90f23cae0315daf0b93., forbidden Bucket does not exist: $ ./restic init -r gs:this-bucket-does-not-exist:/ create backend at gs:this-bucket-does-not-exist:/ failed: service.Buckets.Insert: googleapi: Error 403: my-service-account@myproject.iam.gserviceaccount.com does not have storage.buckets.create access to bucket this-bucket-does-not-exist., forbidden Service account with "Storage Admin": Bucket exists and is accessible: Same Bucket exists but is not accessible: Same. Previously this would fail when Create tried to create the bucket. Now it fails when trying to create the keys. Bucket does not exist: $ ./restic init -r gs:this-bucket-does-not-exist:/ enter password for new backend: enter password again: created restic backend c3c48b481d at gs:this-bucket-does-not-exist:/ Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost.
2017-09-26 04:53:21 +00:00
// Create opens the gs backend at the specified bucket and attempts to creates
// the bucket if it does not exist yet.
//
gs: allow backend creation without storage.buckets.get If the service account used with restic does not have the storage.buckets.get permission (in the "Storage Admin" role), Create cannot use Get to determine if the bucket is accessible. Rather than always trying to create the bucket on Get error, gracefully fall back to assuming the bucket is accessible. If it is, restic init will complete successfully. If it is not, it will fail on a later call. Here is what init looks like now in different cases. Service account without "Storage Admin": Bucket exists and is accessible (this is the case that didn't work before): $ ./restic init -r gs:this-bucket-does-exist:/ enter password for new backend: enter password again: created restic backend c02e2edb67 at gs:this-bucket-does-exist:/ Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. Bucket exists but is not accessible: $ ./restic init -r gs:this-bucket-does-exist:/ enter password for new backend: enter password again: create key in backend at gs:this-bucket-does-exist:/ failed: service.Objects.Insert: googleapi: Error 403: my-service-account@myproject.iam.gserviceaccount.com does not have storage.objects.create access to object this-bucket-exists/keys/0fa714e695c8ecd58cb467cdeb04d36f3b710f883496a90f23cae0315daf0b93., forbidden Bucket does not exist: $ ./restic init -r gs:this-bucket-does-not-exist:/ create backend at gs:this-bucket-does-not-exist:/ failed: service.Buckets.Insert: googleapi: Error 403: my-service-account@myproject.iam.gserviceaccount.com does not have storage.buckets.create access to bucket this-bucket-does-not-exist., forbidden Service account with "Storage Admin": Bucket exists and is accessible: Same Bucket exists but is not accessible: Same. Previously this would fail when Create tried to create the bucket. Now it fails when trying to create the keys. Bucket does not exist: $ ./restic init -r gs:this-bucket-does-not-exist:/ enter password for new backend: enter password again: created restic backend c3c48b481d at gs:this-bucket-does-not-exist:/ Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost.
2017-09-26 04:53:21 +00:00
// The service account must have the "storage.buckets.create" permission to
// create a bucket the does not yet exist.
func Create(cfg Config) (restic.Backend, error) {
be, err := open(cfg)
if err != nil {
return nil, errors.Wrap(err, "open")
}
gs: allow backend creation without storage.buckets.get If the service account used with restic does not have the storage.buckets.get permission (in the "Storage Admin" role), Create cannot use Get to determine if the bucket is accessible. Rather than always trying to create the bucket on Get error, gracefully fall back to assuming the bucket is accessible. If it is, restic init will complete successfully. If it is not, it will fail on a later call. Here is what init looks like now in different cases. Service account without "Storage Admin": Bucket exists and is accessible (this is the case that didn't work before): $ ./restic init -r gs:this-bucket-does-exist:/ enter password for new backend: enter password again: created restic backend c02e2edb67 at gs:this-bucket-does-exist:/ Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. Bucket exists but is not accessible: $ ./restic init -r gs:this-bucket-does-exist:/ enter password for new backend: enter password again: create key in backend at gs:this-bucket-does-exist:/ failed: service.Objects.Insert: googleapi: Error 403: my-service-account@myproject.iam.gserviceaccount.com does not have storage.objects.create access to object this-bucket-exists/keys/0fa714e695c8ecd58cb467cdeb04d36f3b710f883496a90f23cae0315daf0b93., forbidden Bucket does not exist: $ ./restic init -r gs:this-bucket-does-not-exist:/ create backend at gs:this-bucket-does-not-exist:/ failed: service.Buckets.Insert: googleapi: Error 403: my-service-account@myproject.iam.gserviceaccount.com does not have storage.buckets.create access to bucket this-bucket-does-not-exist., forbidden Service account with "Storage Admin": Bucket exists and is accessible: Same Bucket exists but is not accessible: Same. Previously this would fail when Create tried to create the bucket. Now it fails when trying to create the keys. Bucket does not exist: $ ./restic init -r gs:this-bucket-does-not-exist:/ enter password for new backend: enter password again: created restic backend c3c48b481d at gs:this-bucket-does-not-exist:/ Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost.
2017-09-26 04:53:21 +00:00
// Try to determine if the bucket exists. If it does not, try to create it.
//
// A Get call has three typical error cases:
//
// * nil: Bucket exists and we have access to the metadata (returned).
//
// * 403: Bucket exists and we do not have access to the metadata. We
// don't have storage.buckets.get permission to the bucket, but we may
// still be able to access objects in the bucket.
//
// * 404: Bucket doesn't exist.
//
// Determining if the bucket is accessible is best-effort because the
// 403 case is ambiguous.
if _, err := be.service.Buckets.Get(be.bucketName).Do(); err != nil {
gs: allow backend creation without storage.buckets.get If the service account used with restic does not have the storage.buckets.get permission (in the "Storage Admin" role), Create cannot use Get to determine if the bucket is accessible. Rather than always trying to create the bucket on Get error, gracefully fall back to assuming the bucket is accessible. If it is, restic init will complete successfully. If it is not, it will fail on a later call. Here is what init looks like now in different cases. Service account without "Storage Admin": Bucket exists and is accessible (this is the case that didn't work before): $ ./restic init -r gs:this-bucket-does-exist:/ enter password for new backend: enter password again: created restic backend c02e2edb67 at gs:this-bucket-does-exist:/ Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. Bucket exists but is not accessible: $ ./restic init -r gs:this-bucket-does-exist:/ enter password for new backend: enter password again: create key in backend at gs:this-bucket-does-exist:/ failed: service.Objects.Insert: googleapi: Error 403: my-service-account@myproject.iam.gserviceaccount.com does not have storage.objects.create access to object this-bucket-exists/keys/0fa714e695c8ecd58cb467cdeb04d36f3b710f883496a90f23cae0315daf0b93., forbidden Bucket does not exist: $ ./restic init -r gs:this-bucket-does-not-exist:/ create backend at gs:this-bucket-does-not-exist:/ failed: service.Buckets.Insert: googleapi: Error 403: my-service-account@myproject.iam.gserviceaccount.com does not have storage.buckets.create access to bucket this-bucket-does-not-exist., forbidden Service account with "Storage Admin": Bucket exists and is accessible: Same Bucket exists but is not accessible: Same. Previously this would fail when Create tried to create the bucket. Now it fails when trying to create the keys. Bucket does not exist: $ ./restic init -r gs:this-bucket-does-not-exist:/ enter password for new backend: enter password again: created restic backend c3c48b481d at gs:this-bucket-does-not-exist:/ Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost.
2017-09-26 04:53:21 +00:00
gerr, ok := err.(*googleapi.Error)
if !ok {
// Don't know what to do with this error.
return nil, errors.Wrap(err, "service.Buckets.Get")
}
gs: allow backend creation without storage.buckets.get If the service account used with restic does not have the storage.buckets.get permission (in the "Storage Admin" role), Create cannot use Get to determine if the bucket is accessible. Rather than always trying to create the bucket on Get error, gracefully fall back to assuming the bucket is accessible. If it is, restic init will complete successfully. If it is not, it will fail on a later call. Here is what init looks like now in different cases. Service account without "Storage Admin": Bucket exists and is accessible (this is the case that didn't work before): $ ./restic init -r gs:this-bucket-does-exist:/ enter password for new backend: enter password again: created restic backend c02e2edb67 at gs:this-bucket-does-exist:/ Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. Bucket exists but is not accessible: $ ./restic init -r gs:this-bucket-does-exist:/ enter password for new backend: enter password again: create key in backend at gs:this-bucket-does-exist:/ failed: service.Objects.Insert: googleapi: Error 403: my-service-account@myproject.iam.gserviceaccount.com does not have storage.objects.create access to object this-bucket-exists/keys/0fa714e695c8ecd58cb467cdeb04d36f3b710f883496a90f23cae0315daf0b93., forbidden Bucket does not exist: $ ./restic init -r gs:this-bucket-does-not-exist:/ create backend at gs:this-bucket-does-not-exist:/ failed: service.Buckets.Insert: googleapi: Error 403: my-service-account@myproject.iam.gserviceaccount.com does not have storage.buckets.create access to bucket this-bucket-does-not-exist., forbidden Service account with "Storage Admin": Bucket exists and is accessible: Same Bucket exists but is not accessible: Same. Previously this would fail when Create tried to create the bucket. Now it fails when trying to create the keys. Bucket does not exist: $ ./restic init -r gs:this-bucket-does-not-exist:/ enter password for new backend: enter password again: created restic backend c3c48b481d at gs:this-bucket-does-not-exist:/ Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost.
2017-09-26 04:53:21 +00:00
switch gerr.Code {
case 403:
// Bucket exists, but we don't know if it is
// accessible. Optimistically assume it is; if not,
// future Backend calls will fail.
debug.Log("Unable to determine if bucket %s is accessible (err %v). Continuing as if it is.", be.bucketName, err)
case 404:
// Bucket doesn't exist, try to create it.
bucket := &storage.Bucket{
Name: be.bucketName,
}
if _, err := be.service.Buckets.Insert(be.projectID, bucket).Do(); err != nil {
// Always an error, as the bucket definitely
// doesn't exist.
return nil, errors.Wrap(err, "service.Buckets.Insert")
}
default:
// Don't know what to do with this error.
return nil, errors.Wrap(err, "service.Buckets.Get")
}
}
return be, nil
}
// SetListMaxItems sets the number of list items to load per request.
func (be *Backend) SetListMaxItems(i int) {
be.listMaxItems = i
}
// IsNotExist returns true if the error is caused by a not existing file.
func (be *Backend) IsNotExist(err error) bool {
debug.Log("IsNotExist(%T, %#v)", err, err)
if os.IsNotExist(err) {
return true
}
if er, ok := err.(*googleapi.Error); ok {
if er.Code == 404 {
return true
}
}
return false
}
// Join combines path components with slashes.
func (be *Backend) Join(p ...string) string {
return path.Join(p...)
}
// Location returns this backend's location (the bucket name).
func (be *Backend) Location() string {
return be.Join(be.bucketName, be.prefix)
}
// Path returns the path in the bucket that is used for this backend.
func (be *Backend) Path() string {
return be.prefix
}
// Save stores data in the backend at the handle.
func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err error) {
if err := h.Valid(); err != nil {
return err
}
objName := be.Filename(h)
debug.Log("Save %v at %v", h, objName)
be.sem.GetToken()
// Check key does not already exist
if _, err := be.service.Objects.Get(be.bucketName, objName).Do(); err == nil {
debug.Log("%v already exists", h)
be.sem.ReleaseToken()
return errors.New("key already exists")
}
debug.Log("InsertObject(%v, %v)", be.bucketName, objName)
gs: disable resumable uploads By default, the GCS Go packages have an internal "chunk size" of 8MB, used for blob uploads. Media().Do() will buffer a full 8MB from the io.Reader (or less if EOF is reached) then write that full 8MB to the network all at once. This behavior does not play nicely with --limit-upload, which only limits the Reader passed to Media. While the long-term average upload rate will be correctly limited, the actual network bandwidth will be very spikey. e.g., if an 8MB/s connection is limited to 1MB/s, Media().Do() will spend 8s reading from the rate-limited reader (performing no network requests), then 1s writing to the network at 8MB/s. This is bad for network connections hurt by full-speed uploads, particularly when writing 8MB will take several seconds. Disable resumable uploads entirely by setting the chunk size to zero. This causes the io.Reader to be passed further down the request stack, where there is less (but still some) buffering. My connection is around 1.5MB/s up, with nominal ~15ms ping times to 8.8.8.8. Without this change, --limit-upload 1024 results in several seconds of ~200ms ping times (uploading), followed by several seconds of ~15ms ping times (reading from rate-limited reader). A bandwidth monitor reports this as several seconds of ~1.5MB/s followed by several seconds of 0.0MB/s. With this change, --limit-upload 1024 results in ~20ms ping times and the bandwidth monitor reports a constant ~1MB/s. I've elected to make this change unconditional of --limit-upload because the resumable uploads shouldn't be providing much benefit anyways, as restic already uploads mostly small blobs and already has a retry mechanism. --limit-download is not affected by this problem, as Get().Download() returns the real http.Response.Body without any internal buffering. Updates #1216
2017-10-18 04:04:35 +00:00
// Set chunk size to zero to disable resumable uploads.
//
// With a non-zero chunk size (the default is
// googleapi.DefaultUploadChunkSize, 8MB), Insert will buffer data from
// rd in chunks of this size so it can upload these chunks in
// individual requests.
//
// This chunking allows the library to automatically handle network
// interruptions and re-upload only the last chunk rather than the full
// file.
//
// Unfortunately, this buffering doesn't play nicely with
// --limit-upload, which applies a rate limit to rd. This rate limit
// ends up only limiting the read from rd into the buffer rather than
// the network traffic itself. This results in poor network rate limit
// behavior, where individual chunks are written to the network at full
// bandwidth for several seconds, followed by several seconds of no
// network traffic as the next chunk is read through the rate limiter.
//
// By disabling chunking, rd is passed further down the request stack,
// where there is less (but some) buffering, which ultimately results
// in better rate limiting behavior.
//
// restic typically writes small blobs (4MB-30MB), so the resumable
// uploads are not providing significant benefit anyways.
cs := googleapi.ChunkSize(0)
info, err := be.service.Objects.Insert(be.bucketName,
&storage.Object{
Name: objName,
gs: disable resumable uploads By default, the GCS Go packages have an internal "chunk size" of 8MB, used for blob uploads. Media().Do() will buffer a full 8MB from the io.Reader (or less if EOF is reached) then write that full 8MB to the network all at once. This behavior does not play nicely with --limit-upload, which only limits the Reader passed to Media. While the long-term average upload rate will be correctly limited, the actual network bandwidth will be very spikey. e.g., if an 8MB/s connection is limited to 1MB/s, Media().Do() will spend 8s reading from the rate-limited reader (performing no network requests), then 1s writing to the network at 8MB/s. This is bad for network connections hurt by full-speed uploads, particularly when writing 8MB will take several seconds. Disable resumable uploads entirely by setting the chunk size to zero. This causes the io.Reader to be passed further down the request stack, where there is less (but still some) buffering. My connection is around 1.5MB/s up, with nominal ~15ms ping times to 8.8.8.8. Without this change, --limit-upload 1024 results in several seconds of ~200ms ping times (uploading), followed by several seconds of ~15ms ping times (reading from rate-limited reader). A bandwidth monitor reports this as several seconds of ~1.5MB/s followed by several seconds of 0.0MB/s. With this change, --limit-upload 1024 results in ~20ms ping times and the bandwidth monitor reports a constant ~1MB/s. I've elected to make this change unconditional of --limit-upload because the resumable uploads shouldn't be providing much benefit anyways, as restic already uploads mostly small blobs and already has a retry mechanism. --limit-download is not affected by this problem, as Get().Download() returns the real http.Response.Body without any internal buffering. Updates #1216
2017-10-18 04:04:35 +00:00
}).Media(rd, cs).Do()
be.sem.ReleaseToken()
gs: fix nil dereference info can be nil if err != nil, resulting in a nil dereference while logging: $ # GCS config $ ./restic init debug enabled panic: runtime error: invalid memory address or nil pointer dereference [signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x935947] goroutine 1 [running]: github.com/restic/restic/internal/backend/gs.(*Backend).Save(0xc420012690, 0xe84e80, 0xc420010448, 0xb57149, 0x3, 0xc4203fc140, 0x40, 0xe7be40, 0xc4201d8f90, 0xa0, ...) src/github.com/restic/restic/internal/backend/gs/gs.go:226 +0x6d7 github.com/restic/restic/internal/repository.AddKey(0xe84e80, 0xc420010448, 0xc4202f0360, 0xc42000a1b0, 0x4, 0x0, 0xa55b60, 0xc4203043e0, 0xa55420) src/github.com/restic/restic/internal/repository/key.go:235 +0x4a1 github.com/restic/restic/internal/repository.createMasterKey(0xc4202f0360, 0xc42000a1b0, 0x4, 0xa55420, 0xc420304370, 0x6a6070) src/github.com/restic/restic/internal/repository/key.go:62 +0x60 github.com/restic/restic/internal/repository.(*Repository).init(0xc4202f0360, 0xe84e80, 0xc420010448, 0xc42000a1b0, 0x4, 0x1, 0xc42030a440, 0x40, 0x32a4573d3d9eb5, 0x0, ...) src/github.com/restic/restic/internal/repository/repository.go:403 +0x5d github.com/restic/restic/internal/repository.(*Repository).Init(0xc4202f0360, 0xe84e80, 0xc420010448, 0xc42000a1b0, 0x4, 0xe84e40, 0xc42004ad80) src/github.com/restic/restic/internal/repository/repository.go:397 +0x12c main.runInit(0xc420018072, 0x16, 0x0, 0x0, 0x0, 0xe84e40, 0xc42004ad80, 0xc42000a1b0, 0x4, 0xe7dac0, ...) src/github.com/restic/restic/cmd/restic/cmd_init.go:47 +0x2a4 main.glob..func9(0xeb5000, 0xedad70, 0x0, 0x0, 0x0, 0x0) src/github.com/restic/restic/cmd/restic/cmd_init.go:20 +0x8e github.com/restic/restic/vendor/github.com/spf13/cobra.(*Command).execute(0xeb5000, 0xedad70, 0x0, 0x0, 0xeb5000, 0xedad70) src/github.com/restic/restic/vendor/github.com/spf13/cobra/command.go:649 +0x457 github.com/restic/restic/vendor/github.com/spf13/cobra.(*Command).ExecuteC(0xeb3e00, 0xc420011650, 0xa55b60, 0xc420011660) src/github.com/restic/restic/vendor/github.com/spf13/cobra/command.go:728 +0x339 github.com/restic/restic/vendor/github.com/spf13/cobra.(*Command).Execute(0xeb3e00, 0x25, 0xc4201a7eb8) src/github.com/restic/restic/vendor/github.com/spf13/cobra/command.go:687 +0x2b main.main() src/github.com/restic/restic/cmd/restic/main.go:72 +0x268 (The error was likely because I had just enabled the GCS API. Subsequent runs were fine.)
2017-08-28 04:28:39 +00:00
if err != nil {
debug.Log("%v: err %#v: %v", objName, err, err)
return errors.Wrap(err, "service.Objects.Insert")
}
debug.Log("%v -> %v bytes", objName, info.Size)
return nil
}
// wrapReader wraps an io.ReadCloser to run an additional function on Close.
type wrapReader struct {
io.ReadCloser
f func()
}
func (wr wrapReader) Close() error {
err := wr.ReadCloser.Close()
wr.f()
return err
}
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is nonzero, only a portion of the file is
// returned. rd must be closed after use.
func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h))
if err := h.Valid(); err != nil {
return nil, err
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
if length < 0 {
return nil, errors.Errorf("invalid length %d", length)
}
objName := be.Filename(h)
be.sem.GetToken()
var byteRange string
if length > 0 {
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length-1))
} else {
byteRange = fmt.Sprintf("bytes=%d-", offset)
}
req := be.service.Objects.Get(be.bucketName, objName)
// https://cloud.google.com/storage/docs/json_api/v1/parameters#range
req.Header().Set("Range", byteRange)
res, err := req.Download()
if err != nil {
be.sem.ReleaseToken()
return nil, err
}
closeRd := wrapReader{
ReadCloser: res.Body,
f: func() {
debug.Log("Close()")
be.sem.ReleaseToken()
},
}
return closeRd, err
}
// Stat returns information about a blob.
func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) {
debug.Log("%v", h)
objName := be.Filename(h)
be.sem.GetToken()
obj, err := be.service.Objects.Get(be.bucketName, objName).Do()
be.sem.ReleaseToken()
if err != nil {
debug.Log("GetObject() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "service.Objects.Get")
}
return restic.FileInfo{Size: int64(obj.Size)}, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (be *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
found := false
objName := be.Filename(h)
be.sem.GetToken()
_, err := be.service.Objects.Get(be.bucketName, objName).Do()
be.sem.ReleaseToken()
if err == nil {
found = true
}
// If error, then not found
return found, nil
}
// Remove removes the blob with the given name and type.
func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
objName := be.Filename(h)
be.sem.GetToken()
err := be.service.Objects.Delete(be.bucketName, objName).Do()
be.sem.ReleaseToken()
if er, ok := err.(*googleapi.Error); ok {
if er.Code == 404 {
err = nil
}
}
debug.Log("Remove(%v) at %v -> err %v", h, objName, err)
return errors.Wrap(err, "client.RemoveObject")
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (be *Backend) List(ctx context.Context, t restic.FileType) <-chan string {
debug.Log("listing %v", t)
ch := make(chan string)
prefix := be.Dirname(restic.Handle{Type: t})
// make sure prefix ends with a slash
if prefix[len(prefix)-1] != '/' {
prefix += "/"
}
go func() {
defer close(ch)
listReq := be.service.Objects.List(be.bucketName).Prefix(prefix).MaxResults(int64(be.listMaxItems))
2017-09-17 09:08:51 +00:00
for {
be.sem.GetToken()
2017-09-17 09:08:51 +00:00
obj, err := listReq.Do()
be.sem.ReleaseToken()
2017-09-17 09:08:51 +00:00
if err != nil {
fmt.Fprintf(os.Stderr, "error listing %v: %v\n", prefix, err)
return
}
2017-09-17 09:08:51 +00:00
debug.Log("returned %v items", len(obj.Items))
for _, item := range obj.Items {
m := strings.TrimPrefix(item.Name, prefix)
if m == "" {
continue
}
select {
case ch <- path.Base(m):
case <-ctx.Done():
return
}
}
2017-09-17 09:08:51 +00:00
if obj.NextPageToken == "" {
break
}
2017-09-17 09:08:51 +00:00
listReq.PageToken(obj.NextPageToken)
}
}()
return ch
}
// Remove keys for a specified backend type.
func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
for key := range be.List(ctx, restic.DataFile) {
err := be.Remove(ctx, restic.Handle{Type: restic.DataFile, Name: key})
if err != nil {
return err
}
}
return nil
}
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *Backend) Delete(ctx context.Context) error {
alltypes := []restic.FileType{
restic.DataFile,
restic.KeyFile,
restic.LockFile,
restic.SnapshotFile,
restic.IndexFile}
for _, t := range alltypes {
err := be.removeKeys(ctx, t)
if err != nil {
return nil
}
}
return be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
}
// Close does nothing.
func (be *Backend) Close() error { return nil }