2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-23 11:28:54 +00:00

s3: Use low-level API with a Range header for Load

benchmark                                         old ns/op      new ns/op      delta
    BenchmarkBackendMinio/LoadFile-4                  9213315        11001787       +19.41%
    BenchmarkBackendMinio/LoadPartialFile-4           4176619        3479707        -16.69%
    BenchmarkBackendMinio/LoadPartialFileOffset-4     4391521        3139214        -28.52%
    BenchmarkBackendS3/LoadFile-4                     2886070905     2505907501     -13.17%
    BenchmarkBackendS3/LoadPartialFile-4              762702722      735694398      -3.54%
    BenchmarkBackendS3/LoadPartialFileOffset-4        789724328      1108989142     +40.43%

    benchmark                                         old MB/s     new MB/s     speedup
    BenchmarkBackendMinio/LoadFile-4                  1821.21      1525.15      0.84x
    BenchmarkBackendMinio/LoadPartialFile-4           1004.49      1205.67      1.20x
    BenchmarkBackendMinio/LoadPartialFileOffset-4     955.34       1336.45      1.40x
    BenchmarkBackendS3/LoadFile-4                     5.81         6.70         1.15x
    BenchmarkBackendS3/LoadPartialFile-4              5.50         5.70         1.04x
    BenchmarkBackendS3/LoadPartialFileOffset-4        5.31         3.78         0.71x

    benchmark                                         old allocs     new allocs     delta
    BenchmarkBackendMinio/LoadFile-4                  406            204            -49.75%
    BenchmarkBackendMinio/LoadPartialFile-4           225            206            -8.44%
    BenchmarkBackendMinio/LoadPartialFileOffset-4     227            207            -8.81%
    BenchmarkBackendS3/LoadFile-4                     600            388            -35.33%
    BenchmarkBackendS3/LoadPartialFile-4              416            302            -27.40%
    BenchmarkBackendS3/LoadPartialFileOffset-4        417            303            -27.34%

    benchmark                                         old bytes     new bytes     delta
    BenchmarkBackendMinio/LoadFile-4                  29475         13904         -52.83%
    BenchmarkBackendMinio/LoadPartialFile-4           4218838       13958         -99.67%
    BenchmarkBackendMinio/LoadPartialFileOffset-4     4219175       14332         -99.66%
    BenchmarkBackendS3/LoadFile-4                     114152        97424         -14.65%
    BenchmarkBackendS3/LoadPartialFile-4              4265416       56212         -98.68%
    BenchmarkBackendS3/LoadPartialFileOffset-4        4266520       56308         -98.68%
This commit is contained in:
Alexander Neumann 2017-05-13 21:18:14 +02:00
parent be0e53c07b
commit 1e0e6ee573

View File

@ -1,7 +1,7 @@
package s3 package s3
import ( import (
"bytes" "fmt"
"io" "io"
"path" "path"
"restic" "restic"
@ -139,96 +139,26 @@ func (be *s3) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, er
return nil, errors.Errorf("invalid length %d", length) return nil, errors.Errorf("invalid length %d", length)
} }
var obj *minio.Object
var size int64
objName := be.Filename(h) objName := be.Filename(h)
// get token for connection // get token for connection
<-be.connChan <-be.connChan
obj, err := be.client.GetObject(be.bucketname, objName) byteRange := fmt.Sprintf("bytes=%d-", offset)
if err != nil { if length > 0 {
debug.Log(" err %v", err) byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
// return token
be.connChan <- struct{}{}
return nil, errors.Wrap(err, "client.GetObject")
} }
headers := minio.NewGetReqHeaders()
headers.Add("Range", byteRange)
debug.Log("Load(%v) send range %v", h, byteRange)
// if we're going to read the whole object, just pass it on. coreClient := minio.Core{be.client}
if length == 0 { rd, _, err := coreClient.GetObject(be.bucketname, objName, headers)
debug.Log("Load %v: pass on object", h)
_, err = obj.Seek(offset, 0) // return token
if err != nil { be.connChan <- struct{}{}
_ = obj.Close()
// return token return rd, err
be.connChan <- struct{}{}
return nil, errors.Wrap(err, "obj.Seek")
}
rd := wrapReader{
ReadCloser: obj,
f: func() {
debug.Log("Close()")
// return token
be.connChan <- struct{}{}
},
}
return rd, nil
}
defer func() {
// return token
be.connChan <- struct{}{}
}()
// otherwise use a buffer with ReadAt
be.cacheMutex.RLock()
size, cacheHit := be.cacheObjSize[objName]
be.cacheMutex.RUnlock()
if !cacheHit {
info, err := obj.Stat()
if err != nil {
_ = obj.Close()
return nil, errors.Wrap(err, "obj.Stat")
}
size = info.Size
be.cacheMutex.Lock()
be.cacheObjSize[objName] = size
be.cacheMutex.Unlock()
}
if offset > size {
_ = obj.Close()
return nil, errors.New("offset larger than file size")
}
l := int64(length)
if offset+l > size {
l = size - offset
}
buf := make([]byte, l)
n, err := obj.ReadAt(buf, offset)
debug.Log("Load %v: use buffer with ReadAt: %v, %v", h, n, err)
if err == io.EOF {
debug.Log("Load %v: shorten buffer %v -> %v", h, len(buf), n)
buf = buf[:n]
err = nil
}
if err != nil {
_ = obj.Close()
return nil, errors.Wrap(err, "obj.ReadAt")
}
return backend.Closer{Reader: bytes.NewReader(buf)}, nil
} }
// Stat returns information about a blob. // Stat returns information about a blob.