2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-25 06:07:44 +00:00

Merge pull request #4584 from elkemper/fix-stop-archiving-metadata

S3: Don't archive metadata files on S3 Glacier
This commit is contained in:
Michael Eischer 2024-01-20 10:30:37 +00:00 committed by GitHub
commit 6696195f38
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 31 additions and 6 deletions

View File

@ -0,0 +1,12 @@
Enhancement: Ignore s3.storage-class for metadata if archive tier is specified
There is no official cold storage support in restic, use this option at your
own risk.
Restic always stored all files on s3 using the specified `s3.storage-class`.
Now, restic will store metadata using a non-archive storage tier to avoid
problems when accessing a repository. To restore any data, it is still
necessary to manually warm up the required data beforehand.
https://github.com/restic/restic/issues/4583
https://github.com/restic/restic/pull/4584

View File

@ -325,16 +325,29 @@ func (be *Backend) Path() string {
return be.cfg.Prefix
}
// useStorageClass returns whether file should be saved in the provided Storage Class
// For archive storage classes, only data files are stored using that class; metadata
// must remain instantly accessible.
func (be *Backend) useStorageClass(h backend.Handle) bool {
notArchiveClass := be.cfg.StorageClass != "GLACIER" && be.cfg.StorageClass != "DEEP_ARCHIVE"
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
return isDataFile || notArchiveClass
}
// Save stores data in the backend at the handle.
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
objName := be.Filename(h)
opts := minio.PutObjectOptions{StorageClass: be.cfg.StorageClass}
opts.ContentType = "application/octet-stream"
opts := minio.PutObjectOptions{
ContentType: "application/octet-stream",
// the only option with the high-level api is to let the library handle the checksum computation
opts.SendContentMd5 = true
SendContentMd5: true,
// only use multipart uploads for very large files
opts.PartSize = 200 * 1024 * 1024
PartSize: 200 * 1024 * 1024,
}
if be.useStorageClass(h) {
opts.StorageClass = be.cfg.StorageClass
}
info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, io.NopCloser(rd), int64(rd.Length()), opts)