2014-11-16 20:13:20 +00:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-10-06 20:57:33 +00:00
|
|
|
//
|
2015-03-07 20:36:35 +00:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-09 06:52:18 +00:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2014-10-06 20:57:33 +00:00
|
|
|
|
2015-01-12 13:50:30 +00:00
|
|
|
package db
|
2014-10-06 20:57:33 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
2015-06-03 07:47:39 +00:00
|
|
|
"fmt"
|
2014-10-06 20:57:33 +00:00
|
|
|
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/osutil"
|
2015-09-22 17:38:46 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2014-10-06 20:57:33 +00:00
|
|
|
|
|
|
|
"github.com/syndtr/goleveldb/leveldb"
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/util"
|
|
|
|
)
|
|
|
|
|
|
|
|
var blockFinder *BlockFinder
|
|
|
|
|
2017-04-22 14:23:33 +00:00
|
|
|
const maxBatchSize = 1000
|
2015-10-21 21:03:42 +00:00
|
|
|
|
2014-10-06 20:57:33 +00:00
|
|
|
type BlockMap struct {
|
2015-10-31 11:31:25 +00:00
|
|
|
db *Instance
|
2016-01-03 18:08:19 +00:00
|
|
|
folder uint32
|
2014-10-06 20:57:33 +00:00
|
|
|
}
|
|
|
|
|
2016-01-03 18:08:19 +00:00
|
|
|
func NewBlockMap(db *Instance, folder uint32) *BlockMap {
|
2014-10-06 20:57:33 +00:00
|
|
|
return &BlockMap{
|
|
|
|
db: db,
|
|
|
|
folder: folder,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add files to the block map, ignoring any deleted or invalid files.
|
|
|
|
func (m *BlockMap) Add(files []protocol.FileInfo) error {
|
|
|
|
batch := new(leveldb.Batch)
|
|
|
|
buf := make([]byte, 4)
|
2015-10-21 20:55:40 +00:00
|
|
|
var key []byte
|
2014-10-06 20:57:33 +00:00
|
|
|
for _, file := range files {
|
2015-10-21 21:03:42 +00:00
|
|
|
if batch.Len() > maxBatchSize {
|
|
|
|
if err := m.db.Write(batch, nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
batch.Reset()
|
|
|
|
}
|
|
|
|
|
2014-10-06 20:57:33 +00:00
|
|
|
if file.IsDirectory() || file.IsDeleted() || file.IsInvalid() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, block := range file.Blocks {
|
|
|
|
binary.BigEndian.PutUint32(buf, uint32(i))
|
2015-10-21 20:55:40 +00:00
|
|
|
key = m.blockKeyInto(key, block.Hash, file.Name)
|
|
|
|
batch.Put(key, buf)
|
2014-10-06 20:57:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return m.db.Write(batch, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update block map state, removing any deleted or invalid files.
|
|
|
|
func (m *BlockMap) Update(files []protocol.FileInfo) error {
|
|
|
|
batch := new(leveldb.Batch)
|
|
|
|
buf := make([]byte, 4)
|
2015-10-21 20:55:40 +00:00
|
|
|
var key []byte
|
2014-10-06 20:57:33 +00:00
|
|
|
for _, file := range files {
|
2015-10-21 21:03:42 +00:00
|
|
|
if batch.Len() > maxBatchSize {
|
|
|
|
if err := m.db.Write(batch, nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
batch.Reset()
|
|
|
|
}
|
|
|
|
|
2014-10-06 20:57:33 +00:00
|
|
|
if file.IsDirectory() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if file.IsDeleted() || file.IsInvalid() {
|
|
|
|
for _, block := range file.Blocks {
|
2015-10-21 20:55:40 +00:00
|
|
|
key = m.blockKeyInto(key, block.Hash, file.Name)
|
|
|
|
batch.Delete(key)
|
2014-10-06 20:57:33 +00:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, block := range file.Blocks {
|
|
|
|
binary.BigEndian.PutUint32(buf, uint32(i))
|
2015-10-21 20:55:40 +00:00
|
|
|
key = m.blockKeyInto(key, block.Hash, file.Name)
|
|
|
|
batch.Put(key, buf)
|
2014-10-06 20:57:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return m.db.Write(batch, nil)
|
|
|
|
}
|
|
|
|
|
2014-10-22 14:24:11 +00:00
|
|
|
// Discard block map state, removing the given files
|
|
|
|
func (m *BlockMap) Discard(files []protocol.FileInfo) error {
|
|
|
|
batch := new(leveldb.Batch)
|
2015-10-21 20:55:40 +00:00
|
|
|
var key []byte
|
2014-10-22 14:24:11 +00:00
|
|
|
for _, file := range files {
|
2015-10-21 21:03:42 +00:00
|
|
|
if batch.Len() > maxBatchSize {
|
|
|
|
if err := m.db.Write(batch, nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
batch.Reset()
|
|
|
|
}
|
|
|
|
|
2014-10-22 14:24:11 +00:00
|
|
|
for _, block := range file.Blocks {
|
2015-10-21 20:55:40 +00:00
|
|
|
key = m.blockKeyInto(key, block.Hash, file.Name)
|
|
|
|
batch.Delete(key)
|
2014-10-22 14:24:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return m.db.Write(batch, nil)
|
|
|
|
}
|
|
|
|
|
2014-10-06 20:57:33 +00:00
|
|
|
// Drop block map, removing all entries related to this block map from the db.
|
|
|
|
func (m *BlockMap) Drop() error {
|
|
|
|
batch := new(leveldb.Batch)
|
2016-01-03 18:08:19 +00:00
|
|
|
iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:keyPrefixLen+keyFolderLen]), nil)
|
2014-10-06 20:57:33 +00:00
|
|
|
defer iter.Release()
|
|
|
|
for iter.Next() {
|
2015-10-21 21:03:42 +00:00
|
|
|
if batch.Len() > maxBatchSize {
|
|
|
|
if err := m.db.Write(batch, nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
batch.Reset()
|
|
|
|
}
|
|
|
|
|
2014-10-06 20:57:33 +00:00
|
|
|
batch.Delete(iter.Key())
|
|
|
|
}
|
|
|
|
if iter.Error() != nil {
|
|
|
|
return iter.Error()
|
|
|
|
}
|
|
|
|
return m.db.Write(batch, nil)
|
|
|
|
}
|
|
|
|
|
2015-10-21 20:55:40 +00:00
|
|
|
func (m *BlockMap) blockKeyInto(o, hash []byte, file string) []byte {
|
|
|
|
return blockKeyInto(o, hash, m.folder, file)
|
2014-10-06 20:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type BlockFinder struct {
|
2015-10-31 11:31:25 +00:00
|
|
|
db *Instance
|
2014-10-06 20:57:33 +00:00
|
|
|
}
|
|
|
|
|
2015-10-31 11:31:25 +00:00
|
|
|
func NewBlockFinder(db *Instance) *BlockFinder {
|
2014-10-06 20:57:33 +00:00
|
|
|
if blockFinder != nil {
|
|
|
|
return blockFinder
|
|
|
|
}
|
|
|
|
|
|
|
|
f := &BlockFinder{
|
2015-09-04 10:01:00 +00:00
|
|
|
db: db,
|
2014-10-06 20:57:33 +00:00
|
|
|
}
|
2015-06-03 07:47:39 +00:00
|
|
|
|
2014-10-06 20:57:33 +00:00
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
2015-06-03 07:47:39 +00:00
|
|
|
func (f *BlockFinder) String() string {
|
|
|
|
return fmt.Sprintf("BlockFinder@%p", f)
|
2014-10-06 20:57:33 +00:00
|
|
|
}
|
|
|
|
|
2015-04-28 20:32:10 +00:00
|
|
|
// Iterate takes an iterator function which iterates over all matching blocks
|
|
|
|
// for the given hash. The iterator function has to return either true (if
|
|
|
|
// they are happy with the block) or false to continue iterating for whatever
|
|
|
|
// reason. The iterator finally returns the result, whether or not a
|
|
|
|
// satisfying block was eventually found.
|
2015-09-04 10:01:00 +00:00
|
|
|
func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool {
|
2015-10-21 20:55:40 +00:00
|
|
|
var key []byte
|
2014-10-06 20:57:33 +00:00
|
|
|
for _, folder := range folders {
|
2016-01-03 18:08:19 +00:00
|
|
|
folderID := f.db.folderIdx.ID([]byte(folder))
|
|
|
|
key = blockKeyInto(key, hash, folderID, "")
|
2014-10-06 20:57:33 +00:00
|
|
|
iter := f.db.NewIterator(util.BytesPrefix(key), nil)
|
|
|
|
defer iter.Release()
|
|
|
|
|
|
|
|
for iter.Next() && iter.Error() == nil {
|
2016-01-03 18:08:19 +00:00
|
|
|
file := blockKeyName(iter.Key())
|
2015-01-18 01:12:06 +00:00
|
|
|
index := int32(binary.BigEndian.Uint32(iter.Value()))
|
2014-11-05 23:41:51 +00:00
|
|
|
if iterFn(folder, osutil.NativeFilename(file), index) {
|
2014-10-06 20:57:33 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2015-04-28 20:32:10 +00:00
|
|
|
// Fix repairs incorrect blockmap entries, removing the old entry and
|
|
|
|
// replacing it with a new entry for the given block
|
2015-01-18 01:12:06 +00:00
|
|
|
func (f *BlockFinder) Fix(folder, file string, index int32, oldHash, newHash []byte) error {
|
2014-10-24 22:20:08 +00:00
|
|
|
buf := make([]byte, 4)
|
|
|
|
binary.BigEndian.PutUint32(buf, uint32(index))
|
|
|
|
|
2016-01-03 18:08:19 +00:00
|
|
|
folderID := f.db.folderIdx.ID([]byte(folder))
|
2014-10-24 22:20:08 +00:00
|
|
|
batch := new(leveldb.Batch)
|
2016-01-03 18:08:19 +00:00
|
|
|
batch.Delete(blockKeyInto(nil, oldHash, folderID, file))
|
|
|
|
batch.Put(blockKeyInto(nil, newHash, folderID, file), buf)
|
2014-10-24 22:20:08 +00:00
|
|
|
return f.db.Write(batch, nil)
|
|
|
|
}
|
|
|
|
|
2014-10-06 20:57:33 +00:00
|
|
|
// m.blockKey returns a byte slice encoding the following information:
|
|
|
|
// keyTypeBlock (1 byte)
|
2016-01-03 18:08:19 +00:00
|
|
|
// folder (4 bytes)
|
2014-10-17 23:39:36 +00:00
|
|
|
// block hash (32 bytes)
|
2014-10-06 20:57:33 +00:00
|
|
|
// file name (variable size)
|
2016-01-03 18:08:19 +00:00
|
|
|
func blockKeyInto(o, hash []byte, folder uint32, file string) []byte {
|
|
|
|
reqLen := keyPrefixLen + keyFolderLen + keyHashLen + len(file)
|
2015-10-21 20:55:40 +00:00
|
|
|
if cap(o) < reqLen {
|
|
|
|
o = make([]byte, reqLen)
|
|
|
|
} else {
|
|
|
|
o = o[:reqLen]
|
|
|
|
}
|
2015-01-17 19:53:33 +00:00
|
|
|
o[0] = KeyTypeBlock
|
2016-01-03 18:08:19 +00:00
|
|
|
binary.BigEndian.PutUint32(o[keyPrefixLen:], folder)
|
2016-12-17 14:37:11 +00:00
|
|
|
copy(o[keyPrefixLen+keyFolderLen:], hash)
|
2016-01-03 18:08:19 +00:00
|
|
|
copy(o[keyPrefixLen+keyFolderLen+keyHashLen:], []byte(file))
|
2014-10-06 20:57:33 +00:00
|
|
|
return o
|
|
|
|
}
|
|
|
|
|
2016-01-03 18:08:19 +00:00
|
|
|
// blockKeyName returns the file name from the block key
|
|
|
|
func blockKeyName(data []byte) string {
|
|
|
|
if len(data) < keyPrefixLen+keyFolderLen+keyHashLen+1 {
|
2014-10-06 20:57:33 +00:00
|
|
|
panic("Incorrect key length")
|
|
|
|
}
|
2015-01-17 19:53:33 +00:00
|
|
|
if data[0] != KeyTypeBlock {
|
2014-10-06 20:57:33 +00:00
|
|
|
panic("Incorrect key type")
|
|
|
|
}
|
|
|
|
|
2016-01-03 18:08:19 +00:00
|
|
|
file := string(data[keyPrefixLen+keyFolderLen+keyHashLen:])
|
|
|
|
return file
|
2014-10-06 20:57:33 +00:00
|
|
|
}
|