User fewer hasher routines when there are many folders.

This commit is contained in:
Jakob Borg 2015-04-29 20:46:32 +02:00
parent a9c31652b6
commit 756c5a2604
3 changed files with 28 additions and 7 deletions

View File

@ -17,6 +17,7 @@ import (
"net" "net"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
stdsync "sync" stdsync "sync"
"time" "time"
@ -1211,7 +1212,7 @@ nextSub:
CurrentFiler: cFiler{m, folder}, CurrentFiler: cFiler{m, folder},
IgnorePerms: folderCfg.IgnorePerms, IgnorePerms: folderCfg.IgnorePerms,
AutoNormalize: folderCfg.AutoNormalize, AutoNormalize: folderCfg.AutoNormalize,
Hashers: folderCfg.Hashers, Hashers: m.numHashers(folder),
ShortID: m.shortID, ShortID: m.shortID,
} }
@ -1321,6 +1322,27 @@ nextSub:
return nil return nil
} }
// numHashers returns the number of hasher routines to use for a given folder,
// taking into account configuration and available CPU cores.
func (m *Model) numHashers(folder string) int {
m.fmut.Lock()
folderCfg := m.folderCfgs[folder]
numFolders := len(m.folderCfgs)
m.fmut.Unlock()
if folderCfg.Hashers > 0 {
// Specific value set in the config, use that.
return folderCfg.Hashers
}
if perFolder := runtime.GOMAXPROCS(-1) / numFolders; perFolder > 0 {
// We have CPUs to spare, divide them per folder.
return perFolder
}
return 1
}
// clusterConfig returns a ClusterConfigMessage that is correct for the given peer device // clusterConfig returns a ClusterConfigMessage that is correct for the given peer device
func (m *Model) clusterConfig(device protocol.DeviceID) protocol.ClusterConfigMessage { func (m *Model) clusterConfig(device protocol.DeviceID) protocol.ClusterConfigMessage {
cm := protocol.ClusterConfigMessage{ cm := protocol.ClusterConfigMessage{

View File

@ -89,14 +89,9 @@ func (w *Walker) Walk() (chan protocol.FileInfo, error) {
return nil, err return nil, err
} }
workers := w.Hashers
if workers < 1 {
workers = runtime.NumCPU()
}
files := make(chan protocol.FileInfo) files := make(chan protocol.FileInfo)
hashedFiles := make(chan protocol.FileInfo) hashedFiles := make(chan protocol.FileInfo)
newParallelHasher(w.Dir, w.BlockSize, workers, hashedFiles, files) newParallelHasher(w.Dir, w.BlockSize, w.Hashers, hashedFiles, files)
go func() { go func() {
hashFiles := w.walkAndHashFiles(files) hashFiles := w.walkAndHashFiles(files)

View File

@ -63,6 +63,7 @@ func TestWalkSub(t *testing.T) {
Subs: []string{"dir2"}, Subs: []string{"dir2"},
BlockSize: 128 * 1024, BlockSize: 128 * 1024,
Matcher: ignores, Matcher: ignores,
Hashers: 2,
} }
fchan, err := w.Walk() fchan, err := w.Walk()
var files []protocol.FileInfo var files []protocol.FileInfo
@ -99,6 +100,7 @@ func TestWalk(t *testing.T) {
Dir: "testdata", Dir: "testdata",
BlockSize: 128 * 1024, BlockSize: 128 * 1024,
Matcher: ignores, Matcher: ignores,
Hashers: 2,
} }
fchan, err := w.Walk() fchan, err := w.Walk()
@ -122,6 +124,7 @@ func TestWalkError(t *testing.T) {
w := Walker{ w := Walker{
Dir: "testdata-missing", Dir: "testdata-missing",
BlockSize: 128 * 1024, BlockSize: 128 * 1024,
Hashers: 2,
} }
_, err := w.Walk() _, err := w.Walk()
@ -280,6 +283,7 @@ func walkDir(dir string) ([]protocol.FileInfo, error) {
Dir: dir, Dir: dir,
BlockSize: 128 * 1024, BlockSize: 128 * 1024,
AutoNormalize: true, AutoNormalize: true,
Hashers: 2,
} }
fchan, err := w.Walk() fchan, err := w.Walk()