diff --git a/changelog/unreleased/issue-4521 b/changelog/unreleased/issue-4521 new file mode 100644 index 000000000..709741d11 --- /dev/null +++ b/changelog/unreleased/issue-4521 @@ -0,0 +1,21 @@ +Enhancement: Add config option to set Microsoft Blob Storage Access Tier + +The `azure.access-tier` option can be passed to Restic (using `-o`) to +specify the access tier for Microsoft Blob Storage objects created by Restic. + +The access tier is passed as-is to Microsoft Blob Storage, so it needs to be +understood by the API. The allowed values are `Hot`, `Cool`, or `Cold`. + +If unspecified, the default is inferred from the default configured on the +storage account. + +You can mix access tiers in the same container, and the setting isn't +stored in the restic repository, so be sure to specify it with each +command that writes to Microsoft Blob Storage. + +There is no official `Archive` storage support in restic, use this option at +your own risk. To restore any data, it is still necessary to manually warm up +the required data in the `Archive` tier. + +https://github.com/restic/restic/issues/4521 +https://github.com/restic/restic/pull/5046 \ No newline at end of file diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 0b35d1a1e..720bfc11d 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -568,6 +568,10 @@ The number of concurrent connections to the Azure Blob Storage service can be se ``-o azure.connections=10`` switch. By default, at most five parallel connections are established. +The access tier of the blobs uploaded to the Azure Blob Storage service can be set with the +``-o azure.access-tier=Cool`` switch. The allowed values are ``Hot``, ``Cool`` or ``Cold``. +If unspecified, the default is inferred from the default configured on the storage account. + Google Cloud Storage ******************** diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 8f5ee9f00..c1c049a94 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -37,6 +37,8 @@ type Backend struct { prefix string listMaxItems int layout.Layout + + accessTier blob.AccessTier } const saveLargeSize = 256 * 1024 * 1024 @@ -124,17 +126,33 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { } } + var accessTier blob.AccessTier + // if the access tier is not supported, then we will not set the access tier; during the upload process, + // the value will be inferred from the default configured on the storage account. + for _, tier := range supportedAccessTiers() { + if strings.EqualFold(string(tier), cfg.AccessTier) { + accessTier = tier + debug.Log(" - using access tier %v", accessTier) + break + } + } + be := &Backend{ container: client, cfg: cfg, connections: cfg.Connections, Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, + accessTier: accessTier, } return be, nil } +func supportedAccessTiers() []blob.AccessTier { + return []blob.AccessTier{blob.AccessTierHot, blob.AccessTierCool, blob.AccessTierCold, blob.AccessTierArchive} +} + // Open opens the Azure backend at specified container. func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) { return open(cfg, rt) @@ -213,25 +231,39 @@ func (be *Backend) Path() string { return be.prefix } +// useAccessTier determines whether to apply the configured access tier to a given file. +// For archive access tier, only data files are stored using that class; metadata +// must remain instantly accessible. +func (be *Backend) useAccessTier(h backend.Handle) bool { + notArchiveClass := !strings.EqualFold(be.cfg.AccessTier, "archive") + isDataFile := h.Type == backend.PackFile && !h.IsMetadata + return isDataFile || notArchiveClass +} + // Save stores data in the backend at the handle. func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { objName := be.Filename(h) debug.Log("InsertObject(%v, %v)", be.cfg.AccountName, objName) + var accessTier blob.AccessTier + if be.useAccessTier(h) { + accessTier = be.accessTier + } + var err error if rd.Length() < saveLargeSize { // if it's smaller than 256miB, then just create the file directly from the reader - err = be.saveSmall(ctx, objName, rd) + err = be.saveSmall(ctx, objName, rd, accessTier) } else { // otherwise use the more complicated method - err = be.saveLarge(ctx, objName, rd) + err = be.saveLarge(ctx, objName, rd, accessTier) } return err } -func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.RewindReader) error { +func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error { blockBlobClient := be.container.NewBlockBlobClient(objName) // upload it as a new "block", use the base64 hash for the ID @@ -252,11 +284,13 @@ func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.Rew } blocks := []string{id} - _, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{}) + _, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{ + Tier: &accessTier, + }) return errors.Wrap(err, "CommitBlockList") } -func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader) error { +func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error { blockBlobClient := be.container.NewBlockBlobClient(objName) buf := make([]byte, 100*1024*1024) @@ -303,7 +337,9 @@ func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.Rew return errors.Errorf("wrote %d bytes instead of the expected %d bytes", uploadedBytes, rd.Length()) } - _, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{}) + _, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{ + Tier: &accessTier, + }) debug.Log("uploaded %d parts: %v", len(blocks), blocks) return errors.Wrap(err, "CommitBlockList") diff --git a/internal/backend/azure/config.go b/internal/backend/azure/config.go index 7d69719ef..ee7ac51d8 100644 --- a/internal/backend/azure/config.go +++ b/internal/backend/azure/config.go @@ -22,7 +22,8 @@ type Config struct { Container string Prefix string - Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` + AccessTier string `option:"access-tier" help:"set the access tier for the blob storage (default: inferred from the storage account defaults)"` } // NewConfig returns a new Config with the default values filled in.