Smarter limit on size of pull block queue

This commit is contained in:
Jakob Borg 2014-09-13 10:57:36 +02:00
parent c5243cd4d5
commit bf7a128142
3 changed files with 16 additions and 7 deletions

View File

@ -255,7 +255,7 @@ func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query() var qs = r.URL.Query()
var repo = qs.Get("repo") var repo = qs.Get("repo")
files := m.NeedFilesRepo(repo) files := m.NeedFilesRepoLimited(repo, 100, 250) // max 100 files or 2500 blocks
w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(files) json.NewEncoder(w).Encode(files)

View File

@ -324,15 +324,19 @@ func (m *Model) NeedSize(repo string) (files int, bytes int64) {
return return
} }
// NeedFiles returns the list of currently needed files // NeedFiles returns the list of currently needed files, stopping at maxFiles
func (m *Model) NeedFilesRepo(repo string) []protocol.FileInfo { // files or maxBlocks blocks. Limits <= 0 are ignored.
func (m *Model) NeedFilesRepoLimited(repo string, maxFiles, maxBlocks int) []protocol.FileInfo {
m.rmut.RLock() m.rmut.RLock()
defer m.rmut.RUnlock() defer m.rmut.RUnlock()
nblocks := 0
if rf, ok := m.repoFiles[repo]; ok { if rf, ok := m.repoFiles[repo]; ok {
fs := make([]protocol.FileInfo, 0, indexBatchSize) fs := make([]protocol.FileInfo, 0, maxFiles)
rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileIntf) bool { rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileIntf) bool {
fs = append(fs, f.(protocol.FileInfo)) fi := f.(protocol.FileInfo)
return len(fs) < indexBatchSize fs = append(fs, fi)
nblocks += len(fi.Blocks)
return (maxFiles <= 0 || len(fs) < maxFiles) && (maxBlocks <= 0 || nblocks < maxBlocks)
}) })
return fs return fs
} }

View File

@ -61,6 +61,11 @@ type openFile struct {
type activityMap map[protocol.NodeID]int type activityMap map[protocol.NodeID]int
// Queue about this many blocks each puller iteration. More blocks means
// longer iterations and better efficiency; fewer blocks reduce memory
// consumption. 1000 blocks ~= 1000 * 128 KiB ~= 125 MiB of data.
const pullIterationBlocks = 1000
func (m activityMap) leastBusyNode(availability []protocol.NodeID, isValid func(protocol.NodeID) bool) protocol.NodeID { func (m activityMap) leastBusyNode(availability []protocol.NodeID, isValid func(protocol.NodeID) bool) protocol.NodeID {
var low int = 2<<30 - 1 var low int = 2<<30 - 1
var selected protocol.NodeID var selected protocol.NodeID
@ -702,7 +707,7 @@ func (p *puller) queueNeededBlocks(prevVer uint64) (uint64, int) {
queued := 0 queued := 0
files := make([]protocol.FileInfo, 0, indexBatchSize) files := make([]protocol.FileInfo, 0, indexBatchSize)
for _, f := range p.model.NeedFilesRepo(p.repoCfg.ID) { for _, f := range p.model.NeedFilesRepoLimited(p.repoCfg.ID, indexBatchSize, pullIterationBlocks) {
if _, ok := p.openFiles[f.Name]; ok { if _, ok := p.openFiles[f.Name]; ok {
continue continue
} }