2017-07-08 13:38:48 +00:00
|
|
|
package azure
|
|
|
|
|
|
|
|
import (
|
2022-11-18 22:04:31 +00:00
|
|
|
"bytes"
|
2017-07-08 13:38:48 +00:00
|
|
|
"context"
|
2020-12-19 11:40:50 +00:00
|
|
|
"crypto/md5"
|
2018-05-31 19:26:28 +00:00
|
|
|
"encoding/base64"
|
2022-03-05 18:16:13 +00:00
|
|
|
"fmt"
|
2020-12-19 11:39:48 +00:00
|
|
|
"hash"
|
2017-07-08 13:38:48 +00:00
|
|
|
"io"
|
2017-08-05 19:25:38 +00:00
|
|
|
"net/http"
|
2017-07-08 13:38:48 +00:00
|
|
|
"path"
|
|
|
|
"strings"
|
|
|
|
|
2022-10-15 14:23:39 +00:00
|
|
|
"github.com/restic/restic/internal/backend/layout"
|
2023-06-08 11:04:34 +00:00
|
|
|
"github.com/restic/restic/internal/backend/location"
|
2023-10-01 08:24:33 +00:00
|
|
|
"github.com/restic/restic/internal/backend/util"
|
2017-07-08 13:38:48 +00:00
|
|
|
"github.com/restic/restic/internal/debug"
|
2017-08-05 19:46:15 +00:00
|
|
|
"github.com/restic/restic/internal/errors"
|
2017-07-08 13:38:48 +00:00
|
|
|
"github.com/restic/restic/internal/restic"
|
2020-12-17 11:47:53 +00:00
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
|
2023-06-08 19:54:49 +00:00
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
2022-11-18 22:04:31 +00:00
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
|
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
|
|
|
azContainer "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
2017-07-08 13:38:48 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Backend stores data on an azure endpoint.
|
|
|
|
type Backend struct {
|
2022-11-18 22:04:31 +00:00
|
|
|
cfg Config
|
|
|
|
container *azContainer.Client
|
2021-08-07 20:20:49 +00:00
|
|
|
connections uint
|
2017-09-18 10:01:54 +00:00
|
|
|
prefix string
|
|
|
|
listMaxItems int
|
2022-10-15 14:23:39 +00:00
|
|
|
layout.Layout
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
const saveLargeSize = 256 * 1024 * 1024
|
2017-09-18 10:01:54 +00:00
|
|
|
const defaultListMaxItems = 5000
|
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
// make sure that *Backend implements backend.Backend
|
|
|
|
var _ restic.Backend = &Backend{}
|
|
|
|
|
2023-06-08 11:04:34 +00:00
|
|
|
func NewFactory() location.Factory {
|
2023-06-08 15:32:43 +00:00
|
|
|
return location.NewHTTPBackendFactory("azure", ParseConfig, location.NoPassword, Create, Open)
|
2023-06-08 11:04:34 +00:00
|
|
|
}
|
|
|
|
|
2017-09-24 18:04:23 +00:00
|
|
|
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
2017-07-08 13:38:48 +00:00
|
|
|
debug.Log("open, config %#v", cfg)
|
2022-11-18 22:04:31 +00:00
|
|
|
var client *azContainer.Client
|
2022-03-05 18:16:13 +00:00
|
|
|
var err error
|
2022-11-18 22:04:31 +00:00
|
|
|
|
2023-06-25 00:06:54 +00:00
|
|
|
var endpointSuffix string
|
|
|
|
if cfg.EndpointSuffix != "" {
|
|
|
|
endpointSuffix = cfg.EndpointSuffix
|
|
|
|
} else {
|
|
|
|
endpointSuffix = "core.windows.net"
|
|
|
|
}
|
|
|
|
url := fmt.Sprintf("https://%s.blob.%s/%s", cfg.AccountName, endpointSuffix, cfg.Container)
|
2022-11-18 22:04:31 +00:00
|
|
|
opts := &azContainer.ClientOptions{
|
|
|
|
ClientOptions: azcore.ClientOptions{
|
2023-05-18 18:07:47 +00:00
|
|
|
Transport: &http.Client{Transport: rt},
|
2022-11-18 22:04:31 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2022-03-05 18:16:13 +00:00
|
|
|
if cfg.AccountKey.String() != "" {
|
|
|
|
// We have an account key value, find the BlobServiceClient
|
|
|
|
// from with a BasicClient
|
|
|
|
debug.Log(" - using account key")
|
2022-11-18 22:04:31 +00:00
|
|
|
cred, err := azblob.NewSharedKeyCredential(cfg.AccountName, cfg.AccountKey.Unwrap())
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "NewSharedKeyCredential")
|
|
|
|
}
|
|
|
|
|
|
|
|
client, err = azContainer.NewClientWithSharedKeyCredential(url, cred, opts)
|
|
|
|
|
2022-03-05 18:16:13 +00:00
|
|
|
if err != nil {
|
2022-11-18 22:04:31 +00:00
|
|
|
return nil, errors.Wrap(err, "NewClientWithSharedKeyCredential")
|
2022-03-05 18:16:13 +00:00
|
|
|
}
|
|
|
|
} else if cfg.AccountSAS.String() != "" {
|
|
|
|
// Get the client using the SAS Token as authentication, this
|
|
|
|
// is longer winded than above because the SDK wants a URL for the Account
|
|
|
|
// if your using a SAS token, and not just the account name
|
|
|
|
// we (as per the SDK ) assume the default Azure portal.
|
2022-11-18 22:04:31 +00:00
|
|
|
// https://github.com/Azure/azure-storage-blob-go/issues/130
|
2022-03-05 18:16:13 +00:00
|
|
|
debug.Log(" - using sas token")
|
2022-07-16 21:45:41 +00:00
|
|
|
sas := cfg.AccountSAS.Unwrap()
|
2022-11-18 22:04:31 +00:00
|
|
|
|
2022-07-16 21:45:41 +00:00
|
|
|
// strip query sign prefix
|
|
|
|
if sas[0] == '?' {
|
|
|
|
sas = sas[1:]
|
|
|
|
}
|
2022-11-18 22:04:31 +00:00
|
|
|
|
|
|
|
urlWithSAS := fmt.Sprintf("%s?%s", url, sas)
|
|
|
|
|
|
|
|
client, err = azContainer.NewClientWithNoCredential(urlWithSAS, opts)
|
2022-03-05 18:16:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "NewAccountSASClientFromEndpointToken")
|
|
|
|
}
|
|
|
|
} else {
|
2023-06-08 19:54:49 +00:00
|
|
|
debug.Log(" - using DefaultAzureCredential")
|
|
|
|
cred, err := azidentity.NewDefaultAzureCredential(nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "NewDefaultAzureCredential")
|
|
|
|
}
|
|
|
|
|
|
|
|
client, err = azContainer.NewClient(url, cred, opts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "NewClient")
|
|
|
|
}
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
be := &Backend{
|
2022-11-18 22:04:31 +00:00
|
|
|
container: client,
|
|
|
|
cfg: cfg,
|
2021-08-07 20:20:49 +00:00
|
|
|
connections: cfg.Connections,
|
2022-10-15 14:23:39 +00:00
|
|
|
Layout: &layout.DefaultLayout{
|
2017-07-08 13:38:48 +00:00
|
|
|
Path: cfg.Prefix,
|
|
|
|
Join: path.Join,
|
|
|
|
},
|
2017-09-18 10:01:54 +00:00
|
|
|
listMaxItems: defaultListMaxItems,
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return be, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open opens the Azure backend at specified container.
|
2023-05-18 17:18:09 +00:00
|
|
|
func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
|
2017-09-24 18:04:23 +00:00
|
|
|
return open(cfg, rt)
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create opens the Azure backend at specified container and creates the container if
|
|
|
|
// it does not exist yet.
|
2022-11-18 22:04:31 +00:00
|
|
|
func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
|
2017-09-24 18:04:23 +00:00
|
|
|
be, err := open(cfg, rt)
|
2017-07-08 13:38:48 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "open")
|
|
|
|
}
|
|
|
|
|
2022-12-11 21:04:00 +00:00
|
|
|
_, err = be.container.GetProperties(ctx, &azContainer.GetPropertiesOptions{})
|
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
if err != nil && bloberror.HasCode(err, bloberror.ContainerNotFound) {
|
|
|
|
_, err = be.container.Create(ctx, &azContainer.CreateOptions{})
|
2017-07-08 13:38:48 +00:00
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "container.Create")
|
|
|
|
}
|
|
|
|
} else if err != nil {
|
|
|
|
return be, err
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return be, nil
|
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
// SetListMaxItems sets the number of list items to load per request.
|
|
|
|
func (be *Backend) SetListMaxItems(i int) {
|
|
|
|
be.listMaxItems = i
|
|
|
|
}
|
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
// IsNotExist returns true if the error is caused by a not existing file.
|
|
|
|
func (be *Backend) IsNotExist(err error) bool {
|
2022-11-18 22:04:31 +00:00
|
|
|
return bloberror.HasCode(err, bloberror.BlobNotFound)
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Join combines path components with slashes.
|
|
|
|
func (be *Backend) Join(p ...string) string {
|
|
|
|
return path.Join(p...)
|
|
|
|
}
|
|
|
|
|
2021-08-07 20:20:49 +00:00
|
|
|
func (be *Backend) Connections() uint {
|
|
|
|
return be.connections
|
|
|
|
}
|
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
// Location returns this backend's location (the container name).
|
|
|
|
func (be *Backend) Location() string {
|
2022-11-18 22:04:31 +00:00
|
|
|
return be.Join(be.cfg.AccountName, be.cfg.Prefix)
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
2020-12-19 11:39:48 +00:00
|
|
|
// Hasher may return a hash function for calculating a content hash for the backend
|
|
|
|
func (be *Backend) Hasher() hash.Hash {
|
2020-12-19 11:40:50 +00:00
|
|
|
return md5.New()
|
2020-12-19 11:39:48 +00:00
|
|
|
}
|
|
|
|
|
2022-05-01 18:07:29 +00:00
|
|
|
// HasAtomicReplace returns whether Save() can atomically replace files
|
|
|
|
func (be *Backend) HasAtomicReplace() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
// Path returns the path in the bucket that is used for this backend.
|
|
|
|
func (be *Backend) Path() string {
|
|
|
|
return be.prefix
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save stores data in the backend at the handle.
|
2018-03-03 13:20:54 +00:00
|
|
|
func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
|
2017-07-08 13:38:48 +00:00
|
|
|
objName := be.Filename(h)
|
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
debug.Log("InsertObject(%v, %v)", be.cfg.AccountName, objName)
|
2017-07-08 13:38:48 +00:00
|
|
|
|
2018-05-31 19:26:28 +00:00
|
|
|
var err error
|
2022-11-18 22:04:31 +00:00
|
|
|
if rd.Length() < saveLargeSize {
|
2018-05-31 19:26:28 +00:00
|
|
|
// if it's smaller than 256miB, then just create the file directly from the reader
|
2022-11-18 22:04:31 +00:00
|
|
|
err = be.saveSmall(ctx, objName, rd)
|
2018-05-31 19:26:28 +00:00
|
|
|
} else {
|
|
|
|
// otherwise use the more complicated method
|
|
|
|
err = be.saveLarge(ctx, objName, rd)
|
|
|
|
}
|
2017-07-08 13:38:48 +00:00
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
return err
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
func (be *Backend) saveSmall(ctx context.Context, objName string, rd restic.RewindReader) error {
|
|
|
|
blockBlobClient := be.container.NewBlockBlobClient(objName)
|
|
|
|
|
|
|
|
// upload it as a new "block", use the base64 hash for the ID
|
|
|
|
id := base64.StdEncoding.EncodeToString(rd.Hash())
|
|
|
|
|
|
|
|
buf := make([]byte, rd.Length())
|
|
|
|
_, err := io.ReadFull(rd, buf)
|
2018-05-31 19:26:28 +00:00
|
|
|
if err != nil {
|
2022-11-18 22:04:31 +00:00
|
|
|
return errors.Wrap(err, "ReadFull")
|
2018-05-31 19:26:28 +00:00
|
|
|
}
|
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
reader := bytes.NewReader(buf)
|
|
|
|
_, err = blockBlobClient.StageBlock(ctx, id, streaming.NopCloser(reader), &blockblob.StageBlockOptions{
|
2023-04-07 13:05:55 +00:00
|
|
|
TransactionalValidation: blob.TransferValidationTypeMD5(rd.Hash()),
|
2022-11-18 22:04:31 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "StageBlock")
|
|
|
|
}
|
|
|
|
|
|
|
|
blocks := []string{id}
|
|
|
|
_, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{})
|
|
|
|
return errors.Wrap(err, "CommitBlockList")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (be *Backend) saveLarge(ctx context.Context, objName string, rd restic.RewindReader) error {
|
|
|
|
blockBlobClient := be.container.NewBlockBlobClient(objName)
|
|
|
|
|
2018-05-31 19:26:28 +00:00
|
|
|
buf := make([]byte, 100*1024*1024)
|
2022-11-18 22:04:31 +00:00
|
|
|
blocks := []string{}
|
2020-12-18 22:41:29 +00:00
|
|
|
uploadedBytes := 0
|
2018-05-31 19:26:28 +00:00
|
|
|
|
|
|
|
for {
|
|
|
|
n, err := io.ReadFull(rd, buf)
|
|
|
|
if err == io.ErrUnexpectedEOF {
|
|
|
|
err = nil
|
|
|
|
}
|
2022-11-18 22:04:31 +00:00
|
|
|
|
2018-05-31 19:26:28 +00:00
|
|
|
if err == io.EOF {
|
|
|
|
// end of file reached, no bytes have been read at all
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "ReadFull")
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = buf[:n]
|
2020-12-18 22:41:29 +00:00
|
|
|
uploadedBytes += n
|
2018-05-31 19:26:28 +00:00
|
|
|
|
|
|
|
// upload it as a new "block", use the base64 hash for the ID
|
2020-12-19 11:40:50 +00:00
|
|
|
h := md5.Sum(buf)
|
2018-05-31 19:26:28 +00:00
|
|
|
id := base64.StdEncoding.EncodeToString(h[:])
|
2022-11-18 22:04:31 +00:00
|
|
|
|
|
|
|
reader := bytes.NewReader(buf)
|
|
|
|
debug.Log("StageBlock %v with %d bytes", id, len(buf))
|
|
|
|
_, err = blockBlobClient.StageBlock(ctx, id, streaming.NopCloser(reader), &blockblob.StageBlockOptions{
|
2023-04-07 13:05:55 +00:00
|
|
|
TransactionalValidation: blob.TransferValidationTypeMD5(h[:]),
|
2022-11-18 22:04:31 +00:00
|
|
|
})
|
|
|
|
|
2018-05-31 19:26:28 +00:00
|
|
|
if err != nil {
|
2022-11-18 22:04:31 +00:00
|
|
|
return errors.Wrap(err, "StageBlock")
|
2018-05-31 19:26:28 +00:00
|
|
|
}
|
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
blocks = append(blocks, id)
|
2018-05-31 19:26:28 +00:00
|
|
|
}
|
|
|
|
|
2020-12-18 22:41:29 +00:00
|
|
|
// sanity check
|
|
|
|
if uploadedBytes != int(rd.Length()) {
|
|
|
|
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", uploadedBytes, rd.Length())
|
|
|
|
}
|
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
_, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{})
|
|
|
|
|
2018-05-31 19:26:28 +00:00
|
|
|
debug.Log("uploaded %d parts: %v", len(blocks), blocks)
|
2022-11-18 22:04:31 +00:00
|
|
|
return errors.Wrap(err, "CommitBlockList")
|
2018-05-31 19:26:28 +00:00
|
|
|
}
|
|
|
|
|
2018-01-17 04:59:16 +00:00
|
|
|
// Load runs fn with a reader that yields the contents of the file at h at the
|
|
|
|
// given offset.
|
|
|
|
func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
2023-10-01 08:24:33 +00:00
|
|
|
return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
|
2018-01-17 04:59:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (be *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
2017-07-08 13:38:48 +00:00
|
|
|
objName := be.Filename(h)
|
2022-11-18 22:04:31 +00:00
|
|
|
blockBlobClient := be.container.NewBlobClient(objName)
|
2017-07-08 13:38:48 +00:00
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
resp, err := blockBlobClient.DownloadStream(ctx, &blob.DownloadStreamOptions{
|
|
|
|
Range: azblob.HTTPRange{
|
|
|
|
Offset: offset,
|
|
|
|
Count: int64(length),
|
|
|
|
},
|
|
|
|
})
|
2017-07-08 13:38:48 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-04-07 21:02:35 +00:00
|
|
|
return resp.Body, err
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stat returns information about a blob.
|
2017-10-31 11:32:30 +00:00
|
|
|
func (be *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
|
2017-07-08 13:38:48 +00:00
|
|
|
objName := be.Filename(h)
|
2022-11-18 22:04:31 +00:00
|
|
|
blobClient := be.container.NewBlobClient(objName)
|
2017-07-08 13:38:48 +00:00
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
props, err := blobClient.GetProperties(ctx, nil)
|
2017-10-31 11:32:30 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2017-07-08 13:38:48 +00:00
|
|
|
return restic.FileInfo{}, errors.Wrap(err, "blob.GetProperties")
|
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
fi := restic.FileInfo{
|
2022-11-18 22:04:31 +00:00
|
|
|
Size: *props.ContentLength,
|
2018-01-20 18:34:38 +00:00
|
|
|
Name: h.Name,
|
|
|
|
}
|
|
|
|
return fi, nil
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove removes the blob with the given name and type.
|
|
|
|
func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
|
|
|
|
objName := be.Filename(h)
|
2022-11-18 22:04:31 +00:00
|
|
|
blob := be.container.NewBlobClient(objName)
|
2017-10-31 11:32:30 +00:00
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
_, err := blob.Delete(ctx, &azblob.DeleteBlobOptions{})
|
2017-10-31 11:32:30 +00:00
|
|
|
|
2022-12-11 21:04:00 +00:00
|
|
|
if be.IsNotExist(err) {
|
2022-11-18 22:04:31 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
return errors.Wrap(err, "client.RemoveObject")
|
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
// List runs fn for each file in the backend which has the type t. When an
|
|
|
|
// error occurs (or fn returns an error), List stops and returns it.
|
|
|
|
func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
|
2017-12-14 18:13:01 +00:00
|
|
|
prefix, _ := be.Basedir(t)
|
2017-07-08 13:38:48 +00:00
|
|
|
|
|
|
|
// make sure prefix ends with a slash
|
2018-01-20 18:34:38 +00:00
|
|
|
if !strings.HasSuffix(prefix, "/") {
|
2017-07-08 13:38:48 +00:00
|
|
|
prefix += "/"
|
|
|
|
}
|
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
max := int32(be.listMaxItems)
|
|
|
|
|
|
|
|
opts := &azContainer.ListBlobsFlatOptions{
|
|
|
|
MaxResults: &max,
|
|
|
|
Prefix: &prefix,
|
2017-09-17 09:32:05 +00:00
|
|
|
}
|
2022-11-18 22:04:31 +00:00
|
|
|
lister := be.container.NewListBlobsFlatPager(opts)
|
2017-09-17 09:32:05 +00:00
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
for lister.More() {
|
|
|
|
resp, err := lister.NextPage(ctx)
|
2017-07-08 13:38:48 +00:00
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-31 11:32:30 +00:00
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
debug.Log("got %v objects", len(resp.Segment.BlobItems))
|
2018-01-20 18:34:38 +00:00
|
|
|
|
2022-11-18 22:04:31 +00:00
|
|
|
for _, item := range resp.Segment.BlobItems {
|
|
|
|
m := strings.TrimPrefix(*item.Name, prefix)
|
2018-01-20 18:34:38 +00:00
|
|
|
if m == "" {
|
|
|
|
continue
|
2017-09-17 09:32:05 +00:00
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
fi := restic.FileInfo{
|
|
|
|
Name: path.Base(m),
|
2022-11-18 22:04:31 +00:00
|
|
|
Size: *item.Properties.ContentLength,
|
2018-01-20 18:34:38 +00:00
|
|
|
}
|
2017-07-08 13:38:48 +00:00
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
2017-09-17 09:32:05 +00:00
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
err := fn(fi)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
2018-01-20 18:34:38 +00:00
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
2018-01-20 18:34:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ctx.Err()
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
|
|
|
|
func (be *Backend) Delete(ctx context.Context) error {
|
2023-10-01 08:24:33 +00:00
|
|
|
return util.DefaultDelete(ctx, be)
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close does nothing
|
|
|
|
func (be *Backend) Close() error { return nil }
|