2014-11-16 20:13:20 +00:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-09-29 19:43:32 +00:00
|
|
|
//
|
2015-03-07 20:36:35 +00:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-09 06:52:18 +00:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2014-07-30 18:10:46 +00:00
|
|
|
|
|
|
|
package scanner
|
|
|
|
|
|
|
|
import (
|
2017-04-26 00:15:23 +00:00
|
|
|
"context"
|
2016-07-26 08:51:39 +00:00
|
|
|
"errors"
|
2014-07-30 18:10:46 +00:00
|
|
|
|
2017-04-01 09:04:11 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/fs"
|
2015-09-22 17:38:46 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/sync"
|
2014-07-30 18:10:46 +00:00
|
|
|
)
|
|
|
|
|
2017-01-23 13:50:32 +00:00
|
|
|
// HashFile hashes the files and returns a list of blocks representing the file.
|
2023-06-29 13:35:25 +00:00
|
|
|
func HashFile(ctx context.Context, folderID string, fs fs.Filesystem, path string, blockSize int, counter Counter, useWeakHashes bool) ([]protocol.BlockInfo, error) {
|
2017-04-01 09:04:11 +00:00
|
|
|
fd, err := fs.Open(path)
|
2014-10-03 22:15:54 +00:00
|
|
|
if err != nil {
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugln("open:", err)
|
2016-07-26 08:51:39 +00:00
|
|
|
return nil, err
|
2014-10-03 22:15:54 +00:00
|
|
|
}
|
2015-08-26 22:49:06 +00:00
|
|
|
defer fd.Close()
|
2014-07-30 18:10:46 +00:00
|
|
|
|
2016-07-26 08:51:39 +00:00
|
|
|
// Get the size and modtime of the file before we start hashing it.
|
|
|
|
|
|
|
|
fi, err := fd.Stat()
|
|
|
|
if err != nil {
|
|
|
|
l.Debugln("stat before:", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
size := fi.Size()
|
|
|
|
modTime := fi.ModTime()
|
|
|
|
|
|
|
|
// Hash the file. This may take a while for large files.
|
|
|
|
|
2017-04-26 00:15:23 +00:00
|
|
|
blocks, err := Blocks(ctx, fd, blockSize, size, counter, useWeakHashes)
|
2016-07-26 08:51:39 +00:00
|
|
|
if err != nil {
|
|
|
|
l.Debugln("blocks:", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-06-29 13:51:14 +00:00
|
|
|
metricHashedBytes.WithLabelValues(folderID).Add(float64(size))
|
2023-06-29 13:35:25 +00:00
|
|
|
|
2016-07-26 08:51:39 +00:00
|
|
|
// Recheck the size and modtime again. If they differ, the file changed
|
|
|
|
// while we were reading it and our hash results are invalid.
|
|
|
|
|
|
|
|
fi, err = fd.Stat()
|
|
|
|
if err != nil {
|
|
|
|
l.Debugln("stat after:", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if size != fi.Size() || !modTime.Equal(fi.ModTime()) {
|
|
|
|
return nil, errors.New("file changed during hashing")
|
2014-10-03 22:15:54 +00:00
|
|
|
}
|
2015-08-26 22:49:06 +00:00
|
|
|
|
2016-07-26 08:51:39 +00:00
|
|
|
return blocks, nil
|
2014-10-03 22:15:54 +00:00
|
|
|
}
|
2014-07-30 18:10:46 +00:00
|
|
|
|
2017-04-01 09:04:11 +00:00
|
|
|
// The parallel hasher reads FileInfo structures from the inbox, hashes the
|
|
|
|
// file to populate the Blocks element and sends it to the outbox. A number of
|
|
|
|
// workers are used in parallel. The outbox will become closed when the inbox
|
|
|
|
// is closed and all items handled.
|
|
|
|
type parallelHasher struct {
|
2023-06-29 13:35:25 +00:00
|
|
|
folderID string
|
|
|
|
fs fs.Filesystem
|
|
|
|
outbox chan<- ScanResult
|
|
|
|
inbox <-chan protocol.FileInfo
|
|
|
|
counter Counter
|
|
|
|
done chan<- struct{}
|
|
|
|
wg sync.WaitGroup
|
2017-04-01 09:04:11 +00:00
|
|
|
}
|
|
|
|
|
2023-06-29 13:35:25 +00:00
|
|
|
func newParallelHasher(ctx context.Context, folderID string, fs fs.Filesystem, workers int, outbox chan<- ScanResult, inbox <-chan protocol.FileInfo, counter Counter, done chan<- struct{}) {
|
2017-04-01 09:04:11 +00:00
|
|
|
ph := ¶llelHasher{
|
2023-06-29 13:35:25 +00:00
|
|
|
folderID: folderID,
|
|
|
|
fs: fs,
|
|
|
|
outbox: outbox,
|
|
|
|
inbox: inbox,
|
|
|
|
counter: counter,
|
|
|
|
done: done,
|
|
|
|
wg: sync.NewWaitGroup(),
|
2017-04-01 09:04:11 +00:00
|
|
|
}
|
|
|
|
|
2021-02-03 13:25:24 +00:00
|
|
|
ph.wg.Add(workers)
|
2017-04-01 09:04:11 +00:00
|
|
|
for i := 0; i < workers; i++ {
|
2017-04-26 00:15:23 +00:00
|
|
|
go ph.hashFiles(ctx)
|
2017-04-01 09:04:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
go ph.closeWhenDone()
|
|
|
|
}
|
|
|
|
|
2017-04-26 00:15:23 +00:00
|
|
|
func (ph *parallelHasher) hashFiles(ctx context.Context) {
|
2017-04-01 09:04:11 +00:00
|
|
|
defer ph.wg.Done()
|
|
|
|
|
2015-11-13 14:00:32 +00:00
|
|
|
for {
|
|
|
|
select {
|
2017-04-01 09:04:11 +00:00
|
|
|
case f, ok := <-ph.inbox:
|
2015-11-13 14:00:32 +00:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2014-07-30 18:10:46 +00:00
|
|
|
|
2022-10-13 17:32:58 +00:00
|
|
|
l.Debugln("started hashing:", f)
|
|
|
|
|
2018-02-14 07:59:46 +00:00
|
|
|
if f.IsDirectory() || f.IsDeleted() {
|
2015-11-13 14:00:32 +00:00
|
|
|
panic("Bug. Asked to hash a directory or a deleted file.")
|
|
|
|
}
|
2014-07-30 18:10:46 +00:00
|
|
|
|
2023-06-29 13:35:25 +00:00
|
|
|
blocks, err := HashFile(ctx, ph.folderID, ph.fs, f.Name, f.BlockSize(), ph.counter, true)
|
2015-11-13 14:00:32 +00:00
|
|
|
if err != nil {
|
2021-02-19 07:51:39 +00:00
|
|
|
handleError(ctx, "hashing", f.Name, err, ph.outbox)
|
2015-11-13 14:00:32 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-02-14 07:59:46 +00:00
|
|
|
f.Blocks = blocks
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
f.BlocksHash = protocol.BlocksHash(blocks)
|
2016-07-26 08:51:39 +00:00
|
|
|
|
|
|
|
// The size we saw when initially deciding to hash the file
|
|
|
|
// might not have been the size it actually had when we hashed
|
|
|
|
// it. Update the size from the block list.
|
|
|
|
|
2018-02-14 07:59:46 +00:00
|
|
|
f.Size = 0
|
2016-07-26 08:51:39 +00:00
|
|
|
for _, b := range blocks {
|
2018-02-14 07:59:46 +00:00
|
|
|
f.Size += int64(b.Size)
|
2016-07-26 08:51:39 +00:00
|
|
|
}
|
|
|
|
|
2022-10-13 17:32:58 +00:00
|
|
|
l.Debugln("completed hashing:", f)
|
2015-11-13 14:00:32 +00:00
|
|
|
select {
|
2018-11-07 10:04:41 +00:00
|
|
|
case ph.outbox <- ScanResult{File: f}:
|
2017-04-26 00:15:23 +00:00
|
|
|
case <-ctx.Done():
|
2015-11-13 14:00:32 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-26 00:15:23 +00:00
|
|
|
case <-ctx.Done():
|
2015-11-13 14:00:32 +00:00
|
|
|
return
|
|
|
|
}
|
2014-07-30 18:10:46 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-01 09:04:11 +00:00
|
|
|
|
|
|
|
func (ph *parallelHasher) closeWhenDone() {
|
|
|
|
ph.wg.Wait()
|
2020-09-25 09:27:44 +00:00
|
|
|
// In case the hasher aborted on context, wait for filesystem
|
|
|
|
// walking/progress routine to finish.
|
|
|
|
for range ph.inbox {
|
|
|
|
}
|
2017-04-01 09:04:11 +00:00
|
|
|
if ph.done != nil {
|
|
|
|
close(ph.done)
|
|
|
|
}
|
|
|
|
close(ph.outbox)
|
|
|
|
}
|