mirror of
https://github.com/octoleo/restic.git
synced 2024-11-24 21:57:41 +00:00
s3: minor cleanups for archive storage class handling
This commit is contained in:
parent
8ca58b487c
commit
a763a5c67d
@ -1,9 +1,12 @@
|
|||||||
Bugfix: Ignoring the s3.storage-class option for metadata when archive tier is specified
|
Enhancement: Ignore s3.storage-class for metadata if archive tier is specified
|
||||||
|
|
||||||
Restic now will save snapshot metadata to non-archive storage tier whatsoever,
|
There is no official cold storage support in restic, use this option at your
|
||||||
this will help avoid issues when data is being saved to archive storage class.
|
own risk.
|
||||||
It is not providing any support for cold storages in restic,
|
|
||||||
only saving users from making backups unusable.
|
Restic always stored all files on s3 using the specified `s3.storage-class`.
|
||||||
|
Now, restic will store metadata using a non-archive storage tier to avoid
|
||||||
|
problems when accessing a repository. To restore any data, it is still
|
||||||
|
necessary to manually warm up the required data beforehand.
|
||||||
|
|
||||||
https://github.com/restic/restic/issues/4583
|
https://github.com/restic/restic/issues/4583
|
||||||
https://github.com/restic/restic/issues/3202
|
https://github.com/restic/restic/pull/4584
|
||||||
|
@ -326,8 +326,10 @@ func (be *Backend) Path() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// useStorageClass returns whether file should be saved in the provided Storage Class
|
// useStorageClass returns whether file should be saved in the provided Storage Class
|
||||||
|
// For archive storage classes, only data files are stored using that class; metadata
|
||||||
|
// must remain instantly accessible.
|
||||||
func (be *Backend) useStorageClass(h backend.Handle) bool {
|
func (be *Backend) useStorageClass(h backend.Handle) bool {
|
||||||
var notArchiveClass bool = be.cfg.StorageClass != "GLACIER" && be.cfg.StorageClass != "DEEP_ARCHIVE"
|
notArchiveClass := be.cfg.StorageClass != "GLACIER" && be.cfg.StorageClass != "DEEP_ARCHIVE"
|
||||||
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
|
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
|
||||||
return isDataFile || notArchiveClass
|
return isDataFile || notArchiveClass
|
||||||
}
|
}
|
||||||
@ -336,15 +338,16 @@ func (be *Backend) useStorageClass(h backend.Handle) bool {
|
|||||||
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
|
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
|
||||||
objName := be.Filename(h)
|
objName := be.Filename(h)
|
||||||
|
|
||||||
opts := minio.PutObjectOptions{ContentType: "application/octet-stream"}
|
opts := minio.PutObjectOptions{
|
||||||
|
ContentType: "application/octet-stream",
|
||||||
|
// the only option with the high-level api is to let the library handle the checksum computation
|
||||||
|
SendContentMd5: true,
|
||||||
|
// only use multipart uploads for very large files
|
||||||
|
PartSize: 200 * 1024 * 1024,
|
||||||
|
}
|
||||||
if be.useStorageClass(h) {
|
if be.useStorageClass(h) {
|
||||||
opts.StorageClass = be.cfg.StorageClass
|
opts.StorageClass = be.cfg.StorageClass
|
||||||
}
|
}
|
||||||
// the only option with the high-level api is to let the library handle the checksum computation
|
|
||||||
opts.SendContentMd5 = true
|
|
||||||
// only use multipart uploads for very large files
|
|
||||||
opts.PartSize = 200 * 1024 * 1024
|
|
||||||
|
|
||||||
info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, io.NopCloser(rd), int64(rd.Length()), opts)
|
info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, io.NopCloser(rd), int64(rd.Length()), opts)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user