lib/scanner: Speed up weak hash

The rolling version of adler32 is just a wrapper around the standard
hash/adler32 when used in a non-rolling fashion, but it's inefficient as
it allocates a new hash instance for every Write(). This uses the
default version instead in the block hasher, and adds a test to verify
the result is the same as they were before. It reduces allocations by
88% and increases speed about 5%.

	benchmark               old ns/op     new ns/op     delta
	BenchmarkHashFile-8     64434698      61303647      -4.86%

	benchmark               old MB/s     new MB/s     speedup
	BenchmarkHashFile-8     276.65       290.78       1.05x

	benchmark               old allocs     new allocs     delta
	BenchmarkHashFile-8     1238           150            -87.88%

	benchmark               old bytes     new bytes     delta
	BenchmarkHashFile-8     17877363      49292         -99.72%
This commit is contained in:
Jakob Borg 2017-01-18 10:33:17 +01:00
parent f36f00e87b
commit 9b1c592fb7
3 changed files with 27 additions and 10 deletions

View File

@ -9,9 +9,9 @@ package scanner
import (
"bytes"
"fmt"
"hash/adler32"
"io"
"github.com/chmduquesne/rollinghash/adler32"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sha256"
)

View File

@ -18,38 +18,51 @@ var blocksTestData = []struct {
data []byte
blocksize int
hash []string
weakhash []uint32
}{
{[]byte(""), 1024, []string{
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}},
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
[]uint32{0},
},
{[]byte("contents"), 1024, []string{
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"},
[]uint32{0x0f3a036f},
},
{[]byte("contents"), 9, []string{
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"},
[]uint32{0x0f3a036f},
},
{[]byte("contents"), 8, []string{
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"},
[]uint32{0x0f3a036f},
},
{[]byte("contents"), 7, []string{
"ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
"043a718774c572bd8a25adbeb1bfcd5c0256ae11cecf9f9c3f925d0e52beaf89"},
[]uint32{0x0bcb02fc, 0x00740074},
},
{[]byte("contents"), 3, []string{
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3",
"44ad63f60af0f6db6fdde6d5186ef78176367df261fa06be3079b6c80c8adba4"},
[]uint32{0x02780141, 0x02970148, 0x015d00e8},
},
{[]byte("conconts"), 3, []string{
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
"44ad63f60af0f6db6fdde6d5186ef78176367df261fa06be3079b6c80c8adba4"},
[]uint32{0x02780141, 0x02780141, 0x015d00e8},
},
{[]byte("contenten"), 3, []string{
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3",
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3"},
[]uint32{0x02780141, 0x02970148, 0x02970148},
},
}
func TestBlocks(t *testing.T) {
for _, test := range blocksTestData {
for testNo, test := range blocksTestData {
buf := bytes.NewBuffer(test.data)
blocks, err := Blocks(buf, test.blocksize, -1, nil)
@ -58,12 +71,12 @@ func TestBlocks(t *testing.T) {
}
if l := len(blocks); l != len(test.hash) {
t.Fatalf("Incorrect number of blocks %d != %d", l, len(test.hash))
t.Fatalf("%d: Incorrect number of blocks %d != %d", testNo, l, len(test.hash))
} else {
i := 0
for off := int64(0); off < int64(len(test.data)); off += int64(test.blocksize) {
if blocks[i].Offset != off {
t.Errorf("Incorrect offset for block %d: %d != %d", i, blocks[i].Offset, off)
t.Errorf("%d/%d: Incorrect offset %d != %d", testNo, i, blocks[i].Offset, off)
}
bs := test.blocksize
@ -71,10 +84,13 @@ func TestBlocks(t *testing.T) {
bs = rem
}
if int(blocks[i].Size) != bs {
t.Errorf("Incorrect length for block %d: %d != %d", i, blocks[i].Size, bs)
t.Errorf("%d/%d: Incorrect length %d != %d", testNo, i, blocks[i].Size, bs)
}
if h := fmt.Sprintf("%x", blocks[i].Hash); h != test.hash[i] {
t.Errorf("Incorrect block hash %q != %q", h, test.hash[i])
t.Errorf("%d/%d: Incorrect block hash %q != %q", testNo, i, h, test.hash[i])
}
if h := blocks[i].WeakHash; h != test.weakhash[i] {
t.Errorf("%d/%d: Incorrect block weakhash 0x%08x != 0x%08x", testNo, i, h, test.weakhash[i])
}
i++

View File

@ -428,6 +428,7 @@ func BenchmarkHashFile(b *testing.B) {
}
}
b.SetBytes(testdataSize)
b.ReportAllocs()
}