mirror of
https://github.com/octoleo/restic.git
synced 2024-11-23 13:17:42 +00:00
Merge pull request #883 from trbs/s3_perf_cache_stat
Cache size of last ReadAt on S3 for performance
This commit is contained in:
commit
0c2834edb7
@ -7,6 +7,7 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"restic"
|
"restic"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"restic/backend"
|
"restic/backend"
|
||||||
"restic/errors"
|
"restic/errors"
|
||||||
@ -24,6 +25,8 @@ type s3 struct {
|
|||||||
connChan chan struct{}
|
connChan chan struct{}
|
||||||
bucketname string
|
bucketname string
|
||||||
prefix string
|
prefix string
|
||||||
|
cacheMutex sync.RWMutex
|
||||||
|
cacheObjSize map[string]int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open opens the S3 backend at bucket and region. The bucket is created if it
|
// Open opens the S3 backend at bucket and region. The bucket is created if it
|
||||||
@ -36,7 +39,12 @@ func Open(cfg Config) (restic.Backend, error) {
|
|||||||
return nil, errors.Wrap(err, "minio.New")
|
return nil, errors.Wrap(err, "minio.New")
|
||||||
}
|
}
|
||||||
|
|
||||||
be := &s3{client: client, bucketname: cfg.Bucket, prefix: cfg.Prefix}
|
be := &s3{
|
||||||
|
client: client,
|
||||||
|
bucketname: cfg.Bucket,
|
||||||
|
prefix: cfg.Prefix,
|
||||||
|
cacheObjSize: make(map[string]int64),
|
||||||
|
}
|
||||||
|
|
||||||
tr := &http.Transport{MaxIdleConnsPerHost: connLimit}
|
tr := &http.Transport{MaxIdleConnsPerHost: connLimit}
|
||||||
client.SetCustomTransport(tr)
|
client.SetCustomTransport(tr)
|
||||||
@ -139,6 +147,7 @@ func (be *s3) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
var obj *minio.Object
|
var obj *minio.Object
|
||||||
|
var size int64
|
||||||
|
|
||||||
objName := be.s3path(h)
|
objName := be.s3path(h)
|
||||||
|
|
||||||
@ -186,20 +195,30 @@ func (be *s3) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, er
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// otherwise use a buffer with ReadAt
|
// otherwise use a buffer with ReadAt
|
||||||
|
be.cacheMutex.RLock()
|
||||||
|
size, cacheHit := be.cacheObjSize[objName]
|
||||||
|
be.cacheMutex.RUnlock()
|
||||||
|
|
||||||
|
if !cacheHit {
|
||||||
info, err := obj.Stat()
|
info, err := obj.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = obj.Close()
|
_ = obj.Close()
|
||||||
return nil, errors.Wrap(err, "obj.Stat")
|
return nil, errors.Wrap(err, "obj.Stat")
|
||||||
}
|
}
|
||||||
|
size = info.Size
|
||||||
|
be.cacheMutex.Lock()
|
||||||
|
be.cacheObjSize[objName] = size
|
||||||
|
be.cacheMutex.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
if offset > info.Size {
|
if offset > size {
|
||||||
_ = obj.Close()
|
_ = obj.Close()
|
||||||
return nil, errors.New("offset larger than file size")
|
return nil, errors.New("offset larger than file size")
|
||||||
}
|
}
|
||||||
|
|
||||||
l := int64(length)
|
l := int64(length)
|
||||||
if offset+l > info.Size {
|
if offset+l > size {
|
||||||
l = info.Size - offset
|
l = size - offset
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := make([]byte, l)
|
buf := make([]byte, l)
|
||||||
|
Loading…
Reference in New Issue
Block a user