syncthing/lib/db/structs.pb.go

3479 lines
88 KiB
Go
Raw Normal View History

// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: lib/db/structs.proto
package db
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
github_com_syncthing_syncthing_lib_protocol "github.com/syncthing/syncthing/lib/protocol"
protocol "github.com/syncthing/syncthing/lib/protocol"
_ "github.com/syncthing/syncthing/proto/ext"
_ "google.golang.org/protobuf/types/known/timestamppb"
io "io"
math "math"
math_bits "math/bits"
time "time"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
var _ = time.Kitchen
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type FileVersion struct {
Version protocol.Vector `protobuf:"bytes,1,opt,name=version,proto3" json:"version" xml:"version"`
Deleted bool `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted" xml:"deleted"`
Devices [][]byte `protobuf:"bytes,3,rep,name=devices,proto3" json:"devices" xml:"device"`
InvalidDevices [][]byte `protobuf:"bytes,4,rep,name=invalid_devices,json=invalidDevices,proto3" json:"invalidDevices" xml:"invalidDevice"`
}
func (m *FileVersion) Reset() { *m = FileVersion{} }
func (m *FileVersion) String() string { return proto.CompactTextString(m) }
func (*FileVersion) ProtoMessage() {}
func (*FileVersion) Descriptor() ([]byte, []int) {
return fileDescriptor_5465d80e8cba02e3, []int{0}
}
func (m *FileVersion) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *FileVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_FileVersion.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *FileVersion) XXX_Merge(src proto.Message) {
xxx_messageInfo_FileVersion.Merge(m, src)
}
func (m *FileVersion) XXX_Size() int {
return m.ProtoSize()
}
func (m *FileVersion) XXX_DiscardUnknown() {
xxx_messageInfo_FileVersion.DiscardUnknown(m)
}
var xxx_messageInfo_FileVersion proto.InternalMessageInfo
type VersionList struct {
RawVersions []FileVersion `protobuf:"bytes,1,rep,name=versions,proto3" json:"versions" xml:"version"`
}
func (m *VersionList) Reset() { *m = VersionList{} }
func (*VersionList) ProtoMessage() {}
func (*VersionList) Descriptor() ([]byte, []int) {
return fileDescriptor_5465d80e8cba02e3, []int{1}
}
func (m *VersionList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VersionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VersionList.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *VersionList) XXX_Merge(src proto.Message) {
xxx_messageInfo_VersionList.Merge(m, src)
}
func (m *VersionList) XXX_Size() int {
return m.ProtoSize()
}
func (m *VersionList) XXX_DiscardUnknown() {
xxx_messageInfo_VersionList.DiscardUnknown(m)
}
var xxx_messageInfo_VersionList proto.InternalMessageInfo
// Must be the same as FileInfo but without the blocks field
type FileInfoTruncated struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" xml:"name"`
Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size" xml:"size"`
ModifiedS int64 `protobuf:"varint,5,opt,name=modified_s,json=modifiedS,proto3" json:"modifiedS" xml:"modifiedS"`
ModifiedBy github_com_syncthing_syncthing_lib_protocol.ShortID `protobuf:"varint,12,opt,name=modified_by,json=modifiedBy,proto3,customtype=github.com/syncthing/syncthing/lib/protocol.ShortID" json:"modifiedBy" xml:"modifiedBy"`
Version protocol.Vector `protobuf:"bytes,9,opt,name=version,proto3" json:"version" xml:"version"`
Sequence int64 `protobuf:"varint,10,opt,name=sequence,proto3" json:"sequence" xml:"sequence"`
// repeated BlockInfo Blocks = 16
SymlinkTarget string `protobuf:"bytes,17,opt,name=symlink_target,json=symlinkTarget,proto3" json:"symlinkTarget" xml:"symlinkTarget"`
BlocksHash []byte `protobuf:"bytes,18,opt,name=blocks_hash,json=blocksHash,proto3" json:"blocksHash" xml:"blocksHash"`
Encrypted []byte `protobuf:"bytes,19,opt,name=encrypted,proto3" json:"encrypted" xml:"encrypted"`
Type protocol.FileInfoType `protobuf:"varint,2,opt,name=type,proto3,enum=protocol.FileInfoType" json:"type" xml:"type"`
Permissions uint32 `protobuf:"varint,4,opt,name=permissions,proto3" json:"permissions" xml:"permissions"`
ModifiedNs int `protobuf:"varint,11,opt,name=modified_ns,json=modifiedNs,proto3,casttype=int" json:"modifiedNs" xml:"modifiedNs"`
RawBlockSize int `protobuf:"varint,13,opt,name=block_size,json=blockSize,proto3,casttype=int" json:"blockSize" xml:"blockSize"`
Platform protocol.PlatformData `protobuf:"bytes,14,opt,name=platform,proto3" json:"platform" xml:"platform"`
// see bep.proto
LocalFlags uint32 `protobuf:"varint,1000,opt,name=local_flags,json=localFlags,proto3" json:"localFlags" xml:"localFlags"`
VersionHash []byte `protobuf:"bytes,1001,opt,name=version_hash,json=versionHash,proto3" json:"versionHash" xml:"versionHash"`
InodeChangeNs int64 `protobuf:"varint,1002,opt,name=inode_change_ns,json=inodeChangeNs,proto3" json:"inodeChangeNs" xml:"inodeChangeNs"`
Deleted bool `protobuf:"varint,6,opt,name=deleted,proto3" json:"deleted" xml:"deleted"`
RawInvalid bool `protobuf:"varint,7,opt,name=invalid,proto3" json:"invalid" xml:"invalid"`
NoPermissions bool `protobuf:"varint,8,opt,name=no_permissions,json=noPermissions,proto3" json:"noPermissions" xml:"noPermissions"`
}
func (m *FileInfoTruncated) Reset() { *m = FileInfoTruncated{} }
func (*FileInfoTruncated) ProtoMessage() {}
func (*FileInfoTruncated) Descriptor() ([]byte, []int) {
return fileDescriptor_5465d80e8cba02e3, []int{2}
}
func (m *FileInfoTruncated) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *FileInfoTruncated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_FileInfoTruncated.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *FileInfoTruncated) XXX_Merge(src proto.Message) {
xxx_messageInfo_FileInfoTruncated.Merge(m, src)
}
func (m *FileInfoTruncated) XXX_Size() int {
return m.ProtoSize()
}
func (m *FileInfoTruncated) XXX_DiscardUnknown() {
xxx_messageInfo_FileInfoTruncated.DiscardUnknown(m)
}
var xxx_messageInfo_FileInfoTruncated proto.InternalMessageInfo
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
// BlockList is the structure used to store block lists
type BlockList struct {
Blocks []protocol.BlockInfo `protobuf:"bytes,1,rep,name=blocks,proto3" json:"blocks" xml:"block"`
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
}
func (m *BlockList) Reset() { *m = BlockList{} }
func (m *BlockList) String() string { return proto.CompactTextString(m) }
func (*BlockList) ProtoMessage() {}
func (*BlockList) Descriptor() ([]byte, []int) {
return fileDescriptor_5465d80e8cba02e3, []int{3}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
}
func (m *BlockList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BlockList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BlockList.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *BlockList) XXX_Merge(src proto.Message) {
xxx_messageInfo_BlockList.Merge(m, src)
}
func (m *BlockList) XXX_Size() int {
return m.ProtoSize()
}
func (m *BlockList) XXX_DiscardUnknown() {
xxx_messageInfo_BlockList.DiscardUnknown(m)
}
var xxx_messageInfo_BlockList proto.InternalMessageInfo
// IndirectionHashesOnly is used to only unmarshal the indirection hashes
// from a FileInfo
type IndirectionHashesOnly struct {
BlocksHash []byte `protobuf:"bytes,18,opt,name=blocks_hash,json=blocksHash,proto3" json:"blocksHash" xml:"blocksHash"`
VersionHash []byte `protobuf:"bytes,1001,opt,name=version_hash,json=versionHash,proto3" json:"versionHash" xml:"versionHash"`
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
}
func (m *IndirectionHashesOnly) Reset() { *m = IndirectionHashesOnly{} }
func (m *IndirectionHashesOnly) String() string { return proto.CompactTextString(m) }
func (*IndirectionHashesOnly) ProtoMessage() {}
func (*IndirectionHashesOnly) Descriptor() ([]byte, []int) {
return fileDescriptor_5465d80e8cba02e3, []int{4}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
}
func (m *IndirectionHashesOnly) XXX_Unmarshal(b []byte) error {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
return m.Unmarshal(b)
}
func (m *IndirectionHashesOnly) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
if deterministic {
return xxx_messageInfo_IndirectionHashesOnly.Marshal(b, m, deterministic)
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *IndirectionHashesOnly) XXX_Merge(src proto.Message) {
xxx_messageInfo_IndirectionHashesOnly.Merge(m, src)
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
}
func (m *IndirectionHashesOnly) XXX_Size() int {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
return m.ProtoSize()
}
func (m *IndirectionHashesOnly) XXX_DiscardUnknown() {
xxx_messageInfo_IndirectionHashesOnly.DiscardUnknown(m)
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
}
var xxx_messageInfo_IndirectionHashesOnly proto.InternalMessageInfo
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
// For each folder and device we keep one of these to track the current
// counts and sequence. We also keep one for the global state of the folder.
type Counts struct {
Files int `protobuf:"varint,1,opt,name=files,proto3,casttype=int" json:"files" xml:"files"`
Directories int `protobuf:"varint,2,opt,name=directories,proto3,casttype=int" json:"directories" xml:"directories"`
Symlinks int `protobuf:"varint,3,opt,name=symlinks,proto3,casttype=int" json:"symlinks" xml:"symlinks"`
Deleted int `protobuf:"varint,4,opt,name=deleted,proto3,casttype=int" json:"deleted" xml:"deleted"`
Bytes int64 `protobuf:"varint,5,opt,name=bytes,proto3" json:"bytes" xml:"bytes"`
Sequence int64 `protobuf:"varint,6,opt,name=sequence,proto3" json:"sequence" xml:"sequence"`
DeviceID []byte `protobuf:"bytes,17,opt,name=device_id,json=deviceId,proto3" json:"deviceId" xml:"deviceId"`
LocalFlags uint32 `protobuf:"varint,18,opt,name=local_flags,json=localFlags,proto3" json:"localFlags" xml:"localFlags"`
}
2020-09-04 09:09:36 +00:00
func (m *Counts) Reset() { *m = Counts{} }
func (*Counts) ProtoMessage() {}
func (*Counts) Descriptor() ([]byte, []int) {
return fileDescriptor_5465d80e8cba02e3, []int{5}
}
func (m *Counts) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Counts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Counts.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Counts) XXX_Merge(src proto.Message) {
xxx_messageInfo_Counts.Merge(m, src)
}
func (m *Counts) XXX_Size() int {
return m.ProtoSize()
}
func (m *Counts) XXX_DiscardUnknown() {
xxx_messageInfo_Counts.DiscardUnknown(m)
}
var xxx_messageInfo_Counts proto.InternalMessageInfo
type CountsSet struct {
Counts []Counts `protobuf:"bytes,1,rep,name=counts,proto3" json:"counts" xml:"count"`
Created int64 `protobuf:"varint,2,opt,name=created,proto3" json:"created" xml:"created"`
}
func (m *CountsSet) Reset() { *m = CountsSet{} }
func (m *CountsSet) String() string { return proto.CompactTextString(m) }
func (*CountsSet) ProtoMessage() {}
func (*CountsSet) Descriptor() ([]byte, []int) {
return fileDescriptor_5465d80e8cba02e3, []int{6}
}
func (m *CountsSet) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CountsSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CountsSet.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *CountsSet) XXX_Merge(src proto.Message) {
xxx_messageInfo_CountsSet.Merge(m, src)
}
func (m *CountsSet) XXX_Size() int {
return m.ProtoSize()
}
func (m *CountsSet) XXX_DiscardUnknown() {
xxx_messageInfo_CountsSet.DiscardUnknown(m)
}
var xxx_messageInfo_CountsSet proto.InternalMessageInfo
type FileVersionDeprecated struct {
Version protocol.Vector `protobuf:"bytes,1,opt,name=version,proto3" json:"version" xml:"version"`
Device []byte `protobuf:"bytes,2,opt,name=device,proto3" json:"device" xml:"device"`
Invalid bool `protobuf:"varint,3,opt,name=invalid,proto3" json:"invalid" xml:"invalid"`
Deleted bool `protobuf:"varint,4,opt,name=deleted,proto3" json:"deleted" xml:"deleted"`
}
func (m *FileVersionDeprecated) Reset() { *m = FileVersionDeprecated{} }
func (m *FileVersionDeprecated) String() string { return proto.CompactTextString(m) }
func (*FileVersionDeprecated) ProtoMessage() {}
func (*FileVersionDeprecated) Descriptor() ([]byte, []int) {
return fileDescriptor_5465d80e8cba02e3, []int{7}
}
func (m *FileVersionDeprecated) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *FileVersionDeprecated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_FileVersionDeprecated.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *FileVersionDeprecated) XXX_Merge(src proto.Message) {
xxx_messageInfo_FileVersionDeprecated.Merge(m, src)
}
func (m *FileVersionDeprecated) XXX_Size() int {
return m.ProtoSize()
}
func (m *FileVersionDeprecated) XXX_DiscardUnknown() {
xxx_messageInfo_FileVersionDeprecated.DiscardUnknown(m)
}
var xxx_messageInfo_FileVersionDeprecated proto.InternalMessageInfo
type VersionListDeprecated struct {
Versions []FileVersionDeprecated `protobuf:"bytes,1,rep,name=versions,proto3" json:"versions" xml:"version"`
}
func (m *VersionListDeprecated) Reset() { *m = VersionListDeprecated{} }
func (*VersionListDeprecated) ProtoMessage() {}
func (*VersionListDeprecated) Descriptor() ([]byte, []int) {
return fileDescriptor_5465d80e8cba02e3, []int{8}
}
func (m *VersionListDeprecated) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VersionListDeprecated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VersionListDeprecated.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *VersionListDeprecated) XXX_Merge(src proto.Message) {
xxx_messageInfo_VersionListDeprecated.Merge(m, src)
}
func (m *VersionListDeprecated) XXX_Size() int {
return m.ProtoSize()
}
func (m *VersionListDeprecated) XXX_DiscardUnknown() {
xxx_messageInfo_VersionListDeprecated.DiscardUnknown(m)
}
var xxx_messageInfo_VersionListDeprecated proto.InternalMessageInfo
type ObservedFolder struct {
Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time" xml:"time"`
Label string `protobuf:"bytes,2,opt,name=label,proto3" json:"label" xml:"label"`
ReceiveEncrypted bool `protobuf:"varint,3,opt,name=receive_encrypted,json=receiveEncrypted,proto3" json:"receiveEncrypted" xml:"receiveEncrypted"`
RemoteEncrypted bool `protobuf:"varint,4,opt,name=remote_encrypted,json=remoteEncrypted,proto3" json:"remoteEncrypted" xml:"remoteEncrypted"`
}
func (m *ObservedFolder) Reset() { *m = ObservedFolder{} }
func (m *ObservedFolder) String() string { return proto.CompactTextString(m) }
func (*ObservedFolder) ProtoMessage() {}
func (*ObservedFolder) Descriptor() ([]byte, []int) {
return fileDescriptor_5465d80e8cba02e3, []int{9}
}
func (m *ObservedFolder) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ObservedFolder) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ObservedFolder.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ObservedFolder) XXX_Merge(src proto.Message) {
xxx_messageInfo_ObservedFolder.Merge(m, src)
}
func (m *ObservedFolder) XXX_Size() int {
return m.ProtoSize()
}
func (m *ObservedFolder) XXX_DiscardUnknown() {
xxx_messageInfo_ObservedFolder.DiscardUnknown(m)
}
var xxx_messageInfo_ObservedFolder proto.InternalMessageInfo
type ObservedDevice struct {
Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time" xml:"time"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name" xml:"name"`
Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address" xml:"address"`
}
func (m *ObservedDevice) Reset() { *m = ObservedDevice{} }
func (m *ObservedDevice) String() string { return proto.CompactTextString(m) }
func (*ObservedDevice) ProtoMessage() {}
func (*ObservedDevice) Descriptor() ([]byte, []int) {
return fileDescriptor_5465d80e8cba02e3, []int{10}
}
func (m *ObservedDevice) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ObservedDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ObservedDevice.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ObservedDevice) XXX_Merge(src proto.Message) {
xxx_messageInfo_ObservedDevice.Merge(m, src)
}
func (m *ObservedDevice) XXX_Size() int {
return m.ProtoSize()
}
func (m *ObservedDevice) XXX_DiscardUnknown() {
xxx_messageInfo_ObservedDevice.DiscardUnknown(m)
}
var xxx_messageInfo_ObservedDevice proto.InternalMessageInfo
func init() {
proto.RegisterType((*FileVersion)(nil), "db.FileVersion")
proto.RegisterType((*VersionList)(nil), "db.VersionList")
proto.RegisterType((*FileInfoTruncated)(nil), "db.FileInfoTruncated")
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
proto.RegisterType((*BlockList)(nil), "db.BlockList")
proto.RegisterType((*IndirectionHashesOnly)(nil), "db.IndirectionHashesOnly")
proto.RegisterType((*Counts)(nil), "db.Counts")
proto.RegisterType((*CountsSet)(nil), "db.CountsSet")
proto.RegisterType((*FileVersionDeprecated)(nil), "db.FileVersionDeprecated")
proto.RegisterType((*VersionListDeprecated)(nil), "db.VersionListDeprecated")
proto.RegisterType((*ObservedFolder)(nil), "db.ObservedFolder")
proto.RegisterType((*ObservedDevice)(nil), "db.ObservedDevice")
}
func init() { proto.RegisterFile("lib/db/structs.proto", fileDescriptor_5465d80e8cba02e3) }
var fileDescriptor_5465d80e8cba02e3 = []byte{
// 1543 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcd, 0x6f, 0xdb, 0x46,
0x16, 0x37, 0x2d, 0xd9, 0x92, 0x46, 0xf2, 0x17, 0xb3, 0x36, 0xb4, 0xde, 0x5d, 0x51, 0x3b, 0x71,
0x00, 0xed, 0x07, 0x64, 0xc0, 0x41, 0x8c, 0x45, 0x80, 0x6d, 0x10, 0xc6, 0x75, 0xe2, 0x20, 0x75,
0xd2, 0x71, 0x90, 0x14, 0xed, 0x41, 0xe0, 0xc7, 0x58, 0x26, 0x42, 0x91, 0x2a, 0x49, 0xdb, 0x51,
0x6e, 0xbd, 0x14, 0xe8, 0x2d, 0x08, 0x7a, 0x28, 0x8a, 0xa2, 0x08, 0x50, 0xa0, 0x7f, 0x42, 0xff,
0x82, 0xa2, 0xc8, 0xd1, 0xc7, 0xa2, 0x07, 0x16, 0xb1, 0x2f, 0xad, 0x8e, 0x3a, 0xf6, 0x54, 0xcc,
0x9b, 0xe1, 0x70, 0x64, 0x23, 0x45, 0x92, 0xfa, 0xc6, 0xf7, 0x7b, 0xbf, 0x79, 0x12, 0xdf, 0xfc,
0xde, 0x07, 0xd1, 0x5f, 0x7c, 0xcf, 0x5e, 0x75, 0xed, 0xd5, 0x38, 0x89, 0xf6, 0x9d, 0x24, 0x6e,
0xf7, 0xa3, 0x30, 0x09, 0xf5, 0x49, 0xd7, 0x5e, 0xbe, 0x18, 0xd1, 0x7e, 0x18, 0xaf, 0x02, 0x60,
0xef, 0xef, 0xae, 0x76, 0xc3, 0x6e, 0x08, 0x06, 0x3c, 0x71, 0xe2, 0xb2, 0xd1, 0x0d, 0xc3, 0xae,
0x4f, 0x73, 0x56, 0xe2, 0xf5, 0x68, 0x9c, 0x58, 0xbd, 0xbe, 0x20, 0x2c, 0xb1, 0xf8, 0xf0, 0xe8,
0x84, 0xfe, 0xaa, 0x4d, 0x33, 0xbc, 0x42, 0x1f, 0x27, 0xfc, 0x11, 0x7f, 0x3d, 0x89, 0xaa, 0x9b,
0x9e, 0x4f, 0x1f, 0xd0, 0x28, 0xf6, 0xc2, 0x40, 0xbf, 0x83, 0x4a, 0x07, 0xfc, 0xb1, 0xae, 0x35,
0xb5, 0x56, 0x75, 0x6d, 0xbe, 0x9d, 0x05, 0x68, 0x3f, 0xa0, 0x4e, 0x12, 0x46, 0x66, 0xf3, 0x45,
0x6a, 0x4c, 0x0c, 0x53, 0x23, 0x23, 0x8e, 0x52, 0x63, 0xe6, 0x71, 0xcf, 0xbf, 0x8a, 0x85, 0x8d,
0x49, 0xe6, 0xd1, 0xd7, 0x51, 0xc9, 0xa5, 0x3e, 0x4d, 0xa8, 0x5b, 0x9f, 0x6c, 0x6a, 0xad, 0xb2,
0xf9, 0x77, 0x76, 0x4e, 0x40, 0xf2, 0x9c, 0xb0, 0x31, 0xc9, 0x3c, 0xfa, 0x15, 0x76, 0xee, 0xc0,
0x73, 0x68, 0x5c, 0x2f, 0x34, 0x0b, 0xad, 0x9a, 0xf9, 0x37, 0x7e, 0x0e, 0xa0, 0x51, 0x6a, 0xd4,
0xc4, 0x39, 0x66, 0xc3, 0x31, 0x70, 0xe8, 0x04, 0xcd, 0x79, 0xc1, 0x81, 0xe5, 0x7b, 0x6e, 0x27,
0x3b, 0x5e, 0x84, 0xe3, 0xff, 0x1a, 0xa6, 0xc6, 0xac, 0x70, 0x6d, 0xc8, 0x28, 0x17, 0x20, 0xca,
0x18, 0x8c, 0xc9, 0x29, 0x1a, 0xfe, 0x44, 0x43, 0x55, 0x91, 0x9c, 0x3b, 0x5e, 0x9c, 0xe8, 0x3e,
0x2a, 0x8b, 0xb7, 0x8b, 0xeb, 0x5a, 0xb3, 0xd0, 0xaa, 0xae, 0xcd, 0xb5, 0x5d, 0xbb, 0xad, 0xe4,
0xd0, 0xbc, 0xc6, 0x12, 0x74, 0x9c, 0x1a, 0x55, 0x62, 0x1d, 0x0a, 0x2c, 0x1e, 0xa6, 0x86, 0x3c,
0x77, 0x26, 0x61, 0xcf, 0x8e, 0x56, 0x54, 0x2e, 0x91, 0xcc, 0xab, 0xc5, 0x2f, 0x9e, 0x1b, 0x13,
0xf8, 0x9b, 0x1a, 0x5a, 0x60, 0x3f, 0xb0, 0x15, 0xec, 0x86, 0xf7, 0xa3, 0xfd, 0xc0, 0xb1, 0x58,
0x92, 0xfe, 0x8d, 0x8a, 0x81, 0xd5, 0xa3, 0x70, 0x4f, 0x15, 0x73, 0x69, 0x98, 0x1a, 0x60, 0x8f,
0x52, 0x03, 0x41, 0x74, 0x66, 0x60, 0x02, 0x18, 0xe3, 0xc6, 0xde, 0x13, 0x5a, 0x2f, 0x34, 0xb5,
0x56, 0x81, 0x73, 0x99, 0x2d, 0xb9, 0xcc, 0xc0, 0x04, 0x30, 0xfd, 0x1a, 0x42, 0xbd, 0xd0, 0xf5,
0x76, 0x3d, 0xea, 0x76, 0xe2, 0xfa, 0x14, 0x9c, 0x68, 0x0e, 0x53, 0xa3, 0x92, 0xa1, 0x3b, 0xa3,
0xd4, 0x98, 0x83, 0x63, 0x12, 0xc1, 0x24, 0xf7, 0xea, 0xdf, 0x69, 0xa8, 0x2a, 0x23, 0xd8, 0x83,
0x7a, 0xad, 0xa9, 0xb5, 0x8a, 0xe6, 0xe7, 0x1a, 0x4b, 0xcb, 0x4f, 0xa9, 0x71, 0xb9, 0xeb, 0x25,
0x7b, 0xfb, 0x76, 0xdb, 0x09, 0x7b, 0xab, 0xf1, 0x20, 0x70, 0x92, 0x3d, 0x2f, 0xe8, 0x2a, 0x4f,
0xaa, 0x68, 0xdb, 0x3b, 0x7b, 0x61, 0x94, 0x6c, 0x6d, 0x0c, 0x53, 0x43, 0xfe, 0x29, 0x73, 0x30,
0x4a, 0x8d, 0xf9, 0xb1, 0xdf, 0x37, 0x07, 0xf8, 0xcb, 0xa3, 0x95, 0xb7, 0x09, 0x4c, 0x94, 0xb0,
0xaa, 0xf8, 0x2b, 0x7f, 0x5e, 0xfc, 0x57, 0x51, 0x39, 0xa6, 0x1f, 0xef, 0xd3, 0xc0, 0xa1, 0x75,
0x04, 0x59, 0x6c, 0x30, 0x15, 0x64, 0xd8, 0x28, 0x35, 0x66, 0x79, 0xee, 0x05, 0x80, 0x89, 0xf4,
0xe9, 0x77, 0xd1, 0x6c, 0x3c, 0xe8, 0xf9, 0x5e, 0xf0, 0xa8, 0x93, 0x58, 0x51, 0x97, 0x26, 0xf5,
0x05, 0xb8, 0xe5, 0xd6, 0x30, 0x35, 0x66, 0x84, 0xe7, 0x3e, 0x38, 0xa4, 0x8e, 0xc7, 0x50, 0x4c,
0xc6, 0x59, 0xfa, 0x0d, 0x54, 0xb5, 0xfd, 0xd0, 0x79, 0x14, 0x77, 0xf6, 0xac, 0x78, 0xaf, 0xae,
0x37, 0xb5, 0x56, 0xcd, 0xc4, 0x2c, 0xad, 0x1c, 0xbe, 0x65, 0xc5, 0x7b, 0x32, 0xad, 0x39, 0x84,
0x89, 0xe2, 0xd7, 0xdf, 0x41, 0x15, 0x1a, 0x38, 0xd1, 0xa0, 0xcf, 0x0a, 0xfa, 0x02, 0x84, 0x00,
0x61, 0x48, 0x50, 0x0a, 0x43, 0x22, 0x98, 0xe4, 0x5e, 0xdd, 0x44, 0xc5, 0x64, 0xd0, 0xa7, 0xd0,
0x0b, 0x66, 0xd7, 0x96, 0xf2, 0xe4, 0x4a, 0x71, 0x0f, 0xfa, 0x94, 0xab, 0x93, 0xf1, 0xa4, 0x3a,
0x99, 0x81, 0x09, 0x60, 0xfa, 0x26, 0xaa, 0xf6, 0x69, 0xd4, 0xf3, 0x62, 0x5e, 0x82, 0xc5, 0xa6,
0xd6, 0x9a, 0x31, 0x57, 0x86, 0xa9, 0xa1, 0xc2, 0xa3, 0xd4, 0x58, 0x80, 0x93, 0x0a, 0x86, 0x89,
0xca, 0xd0, 0x6f, 0x2b, 0x1a, 0x0d, 0xe2, 0x7a, 0xb5, 0xa9, 0xb5, 0xa6, 0xa0, 0x4f, 0x48, 0x41,
0x6c, 0xc7, 0x67, 0x74, 0xb6, 0x1d, 0xe3, 0xdf, 0x52, 0xa3, 0xe0, 0x05, 0x09, 0x51, 0x68, 0xfa,
0x2e, 0xe2, 0x59, 0xea, 0x40, 0x8d, 0xcd, 0x40, 0xa8, 0x9b, 0xc7, 0xa9, 0x51, 0x23, 0xd6, 0xa1,
0xc9, 0x1c, 0x3b, 0xde, 0x13, 0xca, 0x12, 0x65, 0x67, 0x86, 0x4c, 0x94, 0x44, 0xb2, 0xc0, 0xcf,
0x8e, 0x56, 0xc6, 0x8e, 0x91, 0xfc, 0x90, 0xfe, 0x00, 0x95, 0xfb, 0xbe, 0x95, 0xec, 0x86, 0x51,
0xaf, 0x3e, 0x0b, 0x02, 0x55, 0x72, 0x78, 0x4f, 0x78, 0x36, 0xac, 0xc4, 0x32, 0xb1, 0x90, 0xa9,
0xe4, 0x4b, 0xb5, 0x65, 0x00, 0x26, 0xd2, 0xa7, 0x6f, 0xa0, 0xaa, 0x1f, 0x3a, 0x96, 0xdf, 0xd9,
0xf5, 0xad, 0x6e, 0x5c, 0xff, 0xa5, 0x04, 0x49, 0x05, 0x75, 0x00, 0xbe, 0xc9, 0x60, 0x99, 0x8c,
0x1c, 0xc2, 0x44, 0xf1, 0xeb, 0xb7, 0x50, 0x4d, 0x48, 0x9f, 0x6b, 0xec, 0xd7, 0x12, 0x28, 0x04,
0xee, 0x46, 0x38, 0x84, 0xca, 0x16, 0xd4, 0x8a, 0xe1, 0x32, 0x53, 0x19, 0xfa, 0xfb, 0xac, 0x8f,
0x87, 0x2e, 0xed, 0x38, 0x7b, 0x56, 0xd0, 0xa5, 0xec, 0x7e, 0x86, 0x25, 0xa8, 0x20, 0xd0, 0x3f,
0xf8, 0x6e, 0x80, 0x6b, 0x5b, 0xed, 0xe3, 0x0a, 0x8a, 0xc9, 0x38, 0x4b, 0x9d, 0x44, 0xd3, 0x6f,
0x32, 0x89, 0x08, 0x2a, 0x89, 0x81, 0x50, 0x2f, 0xc1, 0xb9, 0xff, 0x1d, 0xa7, 0x06, 0x22, 0xd6,
0xe1, 0x16, 0x47, 0x59, 0x14, 0x41, 0x90, 0x51, 0x84, 0xcd, 0xda, 0xba, 0xc2, 0x24, 0x19, 0x8f,
0x15, 0x77, 0x10, 0x76, 0x54, 0x15, 0x97, 0x21, 0x34, 0xbc, 0x5c, 0x10, 0xde, 0x1b, 0xd3, 0x31,
0x7f, 0xb9, 0x31, 0x14, 0x93, 0x71, 0x96, 0x98, 0x12, 0x0f, 0x51, 0x05, 0x54, 0x03, 0x63, 0xea,
0x36, 0x9a, 0xe6, 0x85, 0x2b, 0x86, 0xd4, 0x85, 0x5c, 0x28, 0x40, 0x62, 0xd5, 0x66, 0xfe, 0x43,
0xa8, 0x44, 0x50, 0x47, 0xa9, 0x51, 0xcd, 0x45, 0x89, 0x89, 0x80, 0xf1, 0xb7, 0x1a, 0x5a, 0xdc,
0x0a, 0x5c, 0x2f, 0xa2, 0x4e, 0x22, 0xae, 0x88, 0xc6, 0x77, 0x03, 0x7f, 0x70, 0x3e, 0x5d, 0xe5,
0xdc, 0x74, 0x83, 0xbf, 0x2a, 0xa2, 0xe9, 0x1b, 0xe1, 0x7e, 0x90, 0xc4, 0xfa, 0x15, 0x34, 0xb5,
0xeb, 0xf9, 0x34, 0x86, 0xe9, 0x38, 0x65, 0x1a, 0xc3, 0xd4, 0xe0, 0x80, 0x7c, 0x49, 0xb0, 0x64,
0x39, 0x73, 0xa7, 0xfe, 0x1e, 0xaa, 0xf2, 0xf7, 0x0c, 0x23, 0x8f, 0xc6, 0xd0, 0xa8, 0xa6, 0xcc,
0xff, 0xb0, 0x7f, 0xa2, 0xc0, 0xf2, 0x9f, 0x28, 0x98, 0x0c, 0xa4, 0x12, 0xf5, 0xeb, 0xa8, 0x2c,
0xda, 0x70, 0x0c, 0xa3, 0x77, 0xca, 0xbc, 0x04, 0x23, 0x40, 0x60, 0xf9, 0x08, 0x10, 0x80, 0x8c,
0x22, 0x29, 0xfa, 0xff, 0x73, 0xe1, 0x16, 0x21, 0xc2, 0xc5, 0x3f, 0x12, 0x6e, 0x76, 0x5e, 0xea,
0xb7, 0x8d, 0xa6, 0xec, 0x41, 0x42, 0xb3, 0x39, 0x5e, 0x67, 0x79, 0x00, 0x20, 0xbf, 0x6c, 0x66,
0x61, 0xc2, 0xd1, 0xb1, 0xa1, 0x35, 0xfd, 0x86, 0x43, 0x6b, 0x07, 0x55, 0xf8, 0xda, 0xd5, 0xf1,
0x5c, 0x98, 0x57, 0x35, 0x73, 0xfd, 0x38, 0x35, 0xca, 0x7c, 0x95, 0x82, 0x21, 0x5e, 0xe6, 0x84,
0x2d, 0x57, 0x06, 0xca, 0x00, 0x56, 0x2d, 0x92, 0x49, 0x24, 0x8f, 0x49, 0x4c, 0xed, 0x4d, 0xfa,
0xdb, 0xb4, 0x26, 0x51, 0x20, 0x9f, 0x6a, 0xa8, 0xc2, 0xe5, 0xb1, 0x43, 0x13, 0xfd, 0x3a, 0x9a,
0x76, 0xc0, 0x10, 0x15, 0x82, 0xd8, 0x1a, 0xc7, 0xdd, 0x79, 0x61, 0x70, 0x86, 0xcc, 0x15, 0x98,
0x98, 0x08, 0x98, 0x35, 0x15, 0x27, 0xa2, 0x56, 0xb6, 0xde, 0x16, 0x78, 0x53, 0x11, 0x90, 0xbc,
0x1b, 0x61, 0x63, 0x92, 0x79, 0xf0, 0x67, 0x93, 0x68, 0x51, 0x59, 0x18, 0x37, 0x68, 0x3f, 0xa2,
0x7c, 0xa7, 0x3b, 0xdf, 0xf5, 0x7b, 0x0d, 0x4d, 0xf3, 0x3c, 0xc2, 0xdf, 0xab, 0x99, 0xcb, 0xec,
0x95, 0x38, 0x72, 0x66, 0x89, 0x16, 0x38, 0x7b, 0xa7, 0xac, 0xe1, 0x15, 0xf2, 0x46, 0xf9, 0xaa,
0x16, 0x97, 0x37, 0xb5, 0xf5, 0x71, 0x9d, 0xbe, 0x6e, 0x83, 0xc5, 0x87, 0x68, 0x51, 0x59, 0xaf,
0x95, 0x54, 0x7c, 0x70, 0x66, 0xd1, 0xfe, 0xeb, 0xa9, 0x45, 0x3b, 0x27, 0x9b, 0xff, 0xcc, 0xe6,
0xdd, 0x2b, 0x77, 0xec, 0x33, 0x4b, 0xf5, 0x0f, 0x93, 0x68, 0xf6, 0xae, 0x1d, 0xd3, 0xe8, 0x80,
0xba, 0x9b, 0xa1, 0xef, 0xd2, 0x48, 0xdf, 0x46, 0x45, 0xf6, 0x09, 0x25, 0x52, 0xbf, 0xdc, 0xe6,
0xdf, 0x57, 0xed, 0xec, 0xfb, 0xaa, 0x7d, 0x3f, 0xfb, 0xbe, 0x32, 0x1b, 0xe2, 0xf7, 0x80, 0x9f,
0xef, 0x29, 0x5e, 0x8f, 0xe2, 0xa7, 0x3f, 0x1b, 0x1a, 0x01, 0x9c, 0x15, 0x9f, 0x6f, 0xd9, 0xd4,
0x87, 0xf4, 0x57, 0x78, 0xf1, 0x01, 0x20, 0x05, 0x05, 0x16, 0x26, 0x1c, 0xd5, 0x3f, 0x42, 0x0b,
0x11, 0x75, 0xa8, 0x77, 0x40, 0x3b, 0xf9, 0x9e, 0xc5, 0x6f, 0xa1, 0x3d, 0x4c, 0x8d, 0x79, 0xe1,
0x7c, 0x57, 0x59, 0xb7, 0x96, 0x20, 0xcc, 0x69, 0x07, 0x26, 0x67, 0xb8, 0xfa, 0x43, 0x34, 0x1f,
0xd1, 0x5e, 0x98, 0xa8, 0xb1, 0xf9, 0x4d, 0xfd, 0x77, 0x98, 0x1a, 0x73, 0xdc, 0xa7, 0x86, 0x5e,
0x14, 0xa1, 0xc7, 0x70, 0x4c, 0x4e, 0x33, 0xf1, 0xf7, 0x5a, 0x9e, 0x48, 0x5e, 0xc0, 0xe7, 0x9e,
0xc8, 0xec, 0x53, 0x67, 0xf2, 0x35, 0x3e, 0x75, 0xd6, 0x51, 0xc9, 0x72, 0xdd, 0x88, 0xc6, 0xbc,
0xe5, 0x56, 0xb8, 0x10, 0x05, 0x24, 0x65, 0x21, 0x6c, 0x4c, 0x32, 0x8f, 0x79, 0xf3, 0xc5, 0xcb,
0xc6, 0xc4, 0xd1, 0xcb, 0xc6, 0xc4, 0x8b, 0xe3, 0x86, 0x76, 0x74, 0xdc, 0xd0, 0x9e, 0x9e, 0x34,
0x26, 0x9e, 0x9f, 0x34, 0xb4, 0xa3, 0x93, 0xc6, 0xc4, 0x8f, 0x27, 0x8d, 0x89, 0x0f, 0x2f, 0xbd,
0xc6, 0xf7, 0x85, 0x6b, 0xdb, 0xd3, 0xf0, 0x9a, 0x97, 0x7f, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x52,
0x5f, 0x14, 0xfe, 0xde, 0x0f, 0x00, 0x00,
}
func (m *FileVersion) Marshal() (dAtA []byte, err error) {
size := m.ProtoSize()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *FileVersion) MarshalTo(dAtA []byte) (int, error) {
size := m.ProtoSize()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *FileVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.InvalidDevices) > 0 {
for iNdEx := len(m.InvalidDevices) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.InvalidDevices[iNdEx])
copy(dAtA[i:], m.InvalidDevices[iNdEx])
i = encodeVarintStructs(dAtA, i, uint64(len(m.InvalidDevices[iNdEx])))
i--
dAtA[i] = 0x22
}
}
if len(m.Devices) > 0 {
for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Devices[iNdEx])
copy(dAtA[i:], m.Devices[iNdEx])
i = encodeVarintStructs(dAtA, i, uint64(len(m.Devices[iNdEx])))
i--
dAtA[i] = 0x1a
}
}
if m.Deleted {
i--
if m.Deleted {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x10
}
{
size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintStructs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *VersionList) Marshal() (dAtA []byte, err error) {
size := m.ProtoSize()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VersionList) MarshalTo(dAtA []byte) (int, error) {
size := m.ProtoSize()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *VersionList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.RawVersions) > 0 {
for iNdEx := len(m.RawVersions) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.RawVersions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintStructs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *FileInfoTruncated) Marshal() (dAtA []byte, err error) {
size := m.ProtoSize()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *FileInfoTruncated) MarshalTo(dAtA []byte) (int, error) {
size := m.ProtoSize()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *FileInfoTruncated) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.InodeChangeNs != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.InodeChangeNs))
i--
dAtA[i] = 0x3e
i--
dAtA[i] = 0xd0
}
if len(m.VersionHash) > 0 {
i -= len(m.VersionHash)
copy(dAtA[i:], m.VersionHash)
i = encodeVarintStructs(dAtA, i, uint64(len(m.VersionHash)))
i--
dAtA[i] = 0x3e
i--
dAtA[i] = 0xca
}
if m.LocalFlags != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.LocalFlags))
i--
dAtA[i] = 0x3e
i--
dAtA[i] = 0xc0
}
if len(m.Encrypted) > 0 {
i -= len(m.Encrypted)
copy(dAtA[i:], m.Encrypted)
i = encodeVarintStructs(dAtA, i, uint64(len(m.Encrypted)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x9a
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
if len(m.BlocksHash) > 0 {
i -= len(m.BlocksHash)
copy(dAtA[i:], m.BlocksHash)
i = encodeVarintStructs(dAtA, i, uint64(len(m.BlocksHash)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x92
}
if len(m.SymlinkTarget) > 0 {
i -= len(m.SymlinkTarget)
copy(dAtA[i:], m.SymlinkTarget)
i = encodeVarintStructs(dAtA, i, uint64(len(m.SymlinkTarget)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x8a
}
{
size, err := m.Platform.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintStructs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x72
if m.RawBlockSize != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.RawBlockSize))
i--
dAtA[i] = 0x68
}
if m.ModifiedBy != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.ModifiedBy))
i--
dAtA[i] = 0x60
}
if m.ModifiedNs != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.ModifiedNs))
i--
dAtA[i] = 0x58
}
if m.Sequence != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.Sequence))
i--
dAtA[i] = 0x50
}
{
size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintStructs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x4a
if m.NoPermissions {
i--
if m.NoPermissions {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x40
}
if m.RawInvalid {
i--
if m.RawInvalid {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x38
}
if m.Deleted {
i--
if m.Deleted {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x30
}
if m.ModifiedS != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.ModifiedS))
i--
dAtA[i] = 0x28
}
if m.Permissions != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.Permissions))
i--
dAtA[i] = 0x20
}
if m.Size != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.Size))
i--
dAtA[i] = 0x18
}
if m.Type != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.Type))
i--
dAtA[i] = 0x10
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintStructs(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
func (m *BlockList) Marshal() (dAtA []byte, err error) {
size := m.ProtoSize()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BlockList) MarshalTo(dAtA []byte) (int, error) {
size := m.ProtoSize()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *BlockList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Blocks) > 0 {
for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Blocks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintStructs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *IndirectionHashesOnly) Marshal() (dAtA []byte, err error) {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
size := m.ProtoSize()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *IndirectionHashesOnly) MarshalTo(dAtA []byte) (int, error) {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
size := m.ProtoSize()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *IndirectionHashesOnly) MarshalToSizedBuffer(dAtA []byte) (int, error) {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.VersionHash) > 0 {
i -= len(m.VersionHash)
copy(dAtA[i:], m.VersionHash)
i = encodeVarintStructs(dAtA, i, uint64(len(m.VersionHash)))
i--
dAtA[i] = 0x3e
i--
dAtA[i] = 0xca
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
if len(m.BlocksHash) > 0 {
i -= len(m.BlocksHash)
copy(dAtA[i:], m.BlocksHash)
i = encodeVarintStructs(dAtA, i, uint64(len(m.BlocksHash)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x92
}
return len(dAtA) - i, nil
}
func (m *Counts) Marshal() (dAtA []byte, err error) {
size := m.ProtoSize()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Counts) MarshalTo(dAtA []byte) (int, error) {
size := m.ProtoSize()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Counts) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.LocalFlags != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.LocalFlags))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x90
}
if len(m.DeviceID) > 0 {
i -= len(m.DeviceID)
copy(dAtA[i:], m.DeviceID)
i = encodeVarintStructs(dAtA, i, uint64(len(m.DeviceID)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x8a
}
if m.Sequence != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.Sequence))
i--
dAtA[i] = 0x30
}
if m.Bytes != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.Bytes))
i--
dAtA[i] = 0x28
}
if m.Deleted != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.Deleted))
i--
dAtA[i] = 0x20
}
if m.Symlinks != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.Symlinks))
i--
dAtA[i] = 0x18
}
if m.Directories != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.Directories))
i--
dAtA[i] = 0x10
}
if m.Files != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.Files))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *CountsSet) Marshal() (dAtA []byte, err error) {
size := m.ProtoSize()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CountsSet) MarshalTo(dAtA []byte) (int, error) {
size := m.ProtoSize()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *CountsSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Created != 0 {
i = encodeVarintStructs(dAtA, i, uint64(m.Created))
i--
dAtA[i] = 0x10
}
if len(m.Counts) > 0 {
for iNdEx := len(m.Counts) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Counts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintStructs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *FileVersionDeprecated) Marshal() (dAtA []byte, err error) {
size := m.ProtoSize()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *FileVersionDeprecated) MarshalTo(dAtA []byte) (int, error) {
size := m.ProtoSize()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *FileVersionDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Deleted {
i--
if m.Deleted {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x20
}
if m.Invalid {
i--
if m.Invalid {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x18
}
if len(m.Device) > 0 {
i -= len(m.Device)
copy(dAtA[i:], m.Device)
i = encodeVarintStructs(dAtA, i, uint64(len(m.Device)))
i--
dAtA[i] = 0x12
}
{
size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintStructs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *VersionListDeprecated) Marshal() (dAtA []byte, err error) {
size := m.ProtoSize()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VersionListDeprecated) MarshalTo(dAtA []byte) (int, error) {
size := m.ProtoSize()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *VersionListDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Versions) > 0 {
for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintStructs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ObservedFolder) Marshal() (dAtA []byte, err error) {
size := m.ProtoSize()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ObservedFolder) MarshalTo(dAtA []byte) (int, error) {
size := m.ProtoSize()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ObservedFolder) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.RemoteEncrypted {
i--
if m.RemoteEncrypted {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x20
}
if m.ReceiveEncrypted {
i--
if m.ReceiveEncrypted {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x18
}
if len(m.Label) > 0 {
i -= len(m.Label)
copy(dAtA[i:], m.Label)
i = encodeVarintStructs(dAtA, i, uint64(len(m.Label)))
i--
dAtA[i] = 0x12
}
n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):])
if err5 != nil {
return 0, err5
}
i -= n5
i = encodeVarintStructs(dAtA, i, uint64(n5))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ObservedDevice) Marshal() (dAtA []byte, err error) {
size := m.ProtoSize()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ObservedDevice) MarshalTo(dAtA []byte) (int, error) {
size := m.ProtoSize()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ObservedDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Address) > 0 {
i -= len(m.Address)
copy(dAtA[i:], m.Address)
i = encodeVarintStructs(dAtA, i, uint64(len(m.Address)))
i--
dAtA[i] = 0x1a
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintStructs(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x12
}
n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):])
if err6 != nil {
return 0, err6
}
i -= n6
i = encodeVarintStructs(dAtA, i, uint64(n6))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintStructs(dAtA []byte, offset int, v uint64) int {
offset -= sovStructs(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *FileVersion) ProtoSize() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.Version.ProtoSize()
n += 1 + l + sovStructs(uint64(l))
if m.Deleted {
n += 2
}
if len(m.Devices) > 0 {
for _, b := range m.Devices {
l = len(b)
n += 1 + l + sovStructs(uint64(l))
}
}
if len(m.InvalidDevices) > 0 {
for _, b := range m.InvalidDevices {
l = len(b)
n += 1 + l + sovStructs(uint64(l))
}
}
return n
}
func (m *VersionList) ProtoSize() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.RawVersions) > 0 {
for _, e := range m.RawVersions {
l = e.ProtoSize()
n += 1 + l + sovStructs(uint64(l))
}
}
return n
}
func (m *FileInfoTruncated) ProtoSize() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovStructs(uint64(l))
}
if m.Type != 0 {
n += 1 + sovStructs(uint64(m.Type))
}
if m.Size != 0 {
n += 1 + sovStructs(uint64(m.Size))
}
if m.Permissions != 0 {
n += 1 + sovStructs(uint64(m.Permissions))
}
if m.ModifiedS != 0 {
n += 1 + sovStructs(uint64(m.ModifiedS))
}
if m.Deleted {
n += 2
}
if m.RawInvalid {
n += 2
}
if m.NoPermissions {
n += 2
}
l = m.Version.ProtoSize()
n += 1 + l + sovStructs(uint64(l))
if m.Sequence != 0 {
n += 1 + sovStructs(uint64(m.Sequence))
}
if m.ModifiedNs != 0 {
n += 1 + sovStructs(uint64(m.ModifiedNs))
}
if m.ModifiedBy != 0 {
n += 1 + sovStructs(uint64(m.ModifiedBy))
}
if m.RawBlockSize != 0 {
n += 1 + sovStructs(uint64(m.RawBlockSize))
}
l = m.Platform.ProtoSize()
n += 1 + l + sovStructs(uint64(l))
l = len(m.SymlinkTarget)
if l > 0 {
n += 2 + l + sovStructs(uint64(l))
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
l = len(m.BlocksHash)
if l > 0 {
n += 2 + l + sovStructs(uint64(l))
}
l = len(m.Encrypted)
if l > 0 {
n += 2 + l + sovStructs(uint64(l))
}
if m.LocalFlags != 0 {
n += 2 + sovStructs(uint64(m.LocalFlags))
}
l = len(m.VersionHash)
if l > 0 {
n += 2 + l + sovStructs(uint64(l))
}
if m.InodeChangeNs != 0 {
n += 2 + sovStructs(uint64(m.InodeChangeNs))
}
return n
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
func (m *BlockList) ProtoSize() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Blocks) > 0 {
for _, e := range m.Blocks {
l = e.ProtoSize()
n += 1 + l + sovStructs(uint64(l))
}
}
return n
}
func (m *IndirectionHashesOnly) ProtoSize() (n int) {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
if m == nil {
return 0
}
var l int
_ = l
l = len(m.BlocksHash)
if l > 0 {
n += 2 + l + sovStructs(uint64(l))
}
l = len(m.VersionHash)
if l > 0 {
n += 2 + l + sovStructs(uint64(l))
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
return n
}
func (m *Counts) ProtoSize() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Files != 0 {
n += 1 + sovStructs(uint64(m.Files))
}
if m.Directories != 0 {
n += 1 + sovStructs(uint64(m.Directories))
}
if m.Symlinks != 0 {
n += 1 + sovStructs(uint64(m.Symlinks))
}
if m.Deleted != 0 {
n += 1 + sovStructs(uint64(m.Deleted))
}
if m.Bytes != 0 {
n += 1 + sovStructs(uint64(m.Bytes))
}
if m.Sequence != 0 {
n += 1 + sovStructs(uint64(m.Sequence))
}
l = len(m.DeviceID)
if l > 0 {
n += 2 + l + sovStructs(uint64(l))
}
if m.LocalFlags != 0 {
n += 2 + sovStructs(uint64(m.LocalFlags))
}
return n
}
func (m *CountsSet) ProtoSize() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Counts) > 0 {
for _, e := range m.Counts {
l = e.ProtoSize()
n += 1 + l + sovStructs(uint64(l))
}
}
if m.Created != 0 {
n += 1 + sovStructs(uint64(m.Created))
}
return n
}
func (m *FileVersionDeprecated) ProtoSize() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.Version.ProtoSize()
n += 1 + l + sovStructs(uint64(l))
l = len(m.Device)
if l > 0 {
n += 1 + l + sovStructs(uint64(l))
}
if m.Invalid {
n += 2
}
if m.Deleted {
n += 2
}
return n
}
func (m *VersionListDeprecated) ProtoSize() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Versions) > 0 {
for _, e := range m.Versions {
l = e.ProtoSize()
n += 1 + l + sovStructs(uint64(l))
}
}
return n
}
func (m *ObservedFolder) ProtoSize() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time)
n += 1 + l + sovStructs(uint64(l))
l = len(m.Label)
if l > 0 {
n += 1 + l + sovStructs(uint64(l))
}
if m.ReceiveEncrypted {
n += 2
}
if m.RemoteEncrypted {
n += 2
}
return n
}
func (m *ObservedDevice) ProtoSize() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time)
n += 1 + l + sovStructs(uint64(l))
l = len(m.Name)
if l > 0 {
n += 1 + l + sovStructs(uint64(l))
}
l = len(m.Address)
if l > 0 {
n += 1 + l + sovStructs(uint64(l))
}
return n
}
func sovStructs(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozStructs(x uint64) (n int) {
return sovStructs(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *FileVersion) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: FileVersion: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: FileVersion: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Deleted = bool(v != 0)
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Devices = append(m.Devices, make([]byte, postIndex-iNdEx))
copy(m.Devices[len(m.Devices)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field InvalidDevices", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.InvalidDevices = append(m.InvalidDevices, make([]byte, postIndex-iNdEx))
copy(m.InvalidDevices[len(m.InvalidDevices)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipStructs(dAtA[iNdEx:])
if err != nil {
return err
}
2021-05-19 11:30:20 +00:00
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthStructs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VersionList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VersionList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VersionList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RawVersions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RawVersions = append(m.RawVersions, FileVersion{})
if err := m.RawVersions[len(m.RawVersions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipStructs(dAtA[iNdEx:])
if err != nil {
return err
}
2021-05-19 11:30:20 +00:00
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthStructs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *FileInfoTruncated) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: FileInfoTruncated: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: FileInfoTruncated: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= protocol.FileInfoType(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Size", wireType)
}
m.Size = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Size |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType)
}
m.Permissions = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Permissions |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ModifiedS", wireType)
}
m.ModifiedS = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ModifiedS |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Deleted = bool(v != 0)
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RawInvalid", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.RawInvalid = bool(v != 0)
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NoPermissions", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.NoPermissions = bool(v != 0)
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
}
m.Sequence = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Sequence |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 11:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ModifiedNs", wireType)
}
m.ModifiedNs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ModifiedNs |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 12:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ModifiedBy", wireType)
}
m.ModifiedBy = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ModifiedBy |= github_com_syncthing_syncthing_lib_protocol.ShortID(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 13:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RawBlockSize", wireType)
}
m.RawBlockSize = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RawBlockSize |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 14:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 17:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SymlinkTarget", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SymlinkTarget = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
case 18:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field BlocksHash", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.BlocksHash = append(m.BlocksHash[:0], dAtA[iNdEx:postIndex]...)
if m.BlocksHash == nil {
m.BlocksHash = []byte{}
}
iNdEx = postIndex
case 19:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Encrypted", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Encrypted = append(m.Encrypted[:0], dAtA[iNdEx:postIndex]...)
if m.Encrypted == nil {
m.Encrypted = []byte{}
}
iNdEx = postIndex
case 1000:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LocalFlags", wireType)
}
m.LocalFlags = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LocalFlags |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 1001:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field VersionHash", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.VersionHash = append(m.VersionHash[:0], dAtA[iNdEx:postIndex]...)
if m.VersionHash == nil {
m.VersionHash = []byte{}
}
iNdEx = postIndex
case 1002:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field InodeChangeNs", wireType)
}
m.InodeChangeNs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.InodeChangeNs |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipStructs(dAtA[iNdEx:])
if err != nil {
return err
}
2021-05-19 11:30:20 +00:00
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthStructs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
func (m *BlockList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BlockList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BlockList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Blocks = append(m.Blocks, protocol.BlockInfo{})
if err := m.Blocks[len(m.Blocks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipStructs(dAtA[iNdEx:])
if err != nil {
return err
}
2021-05-19 11:30:20 +00:00
if (skippy < 0) || (iNdEx+skippy) < 0 {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
return ErrInvalidLengthStructs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *IndirectionHashesOnly) Unmarshal(dAtA []byte) error {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: IndirectionHashesOnly: wiretype end group for non-group")
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
}
if fieldNum <= 0 {
return fmt.Errorf("proto: IndirectionHashesOnly: illegal tag %d (wire type %d)", fieldNum, wire)
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
}
switch fieldNum {
case 18:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field BlocksHash", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.BlocksHash = append(m.BlocksHash[:0], dAtA[iNdEx:postIndex]...)
if m.BlocksHash == nil {
m.BlocksHash = []byte{}
}
iNdEx = postIndex
case 1001:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field VersionHash", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.VersionHash = append(m.VersionHash[:0], dAtA[iNdEx:postIndex]...)
if m.VersionHash == nil {
m.VersionHash = []byte{}
}
iNdEx = postIndex
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
default:
iNdEx = preIndex
skippy, err := skipStructs(dAtA[iNdEx:])
if err != nil {
return err
}
2021-05-19 11:30:20 +00:00
if (skippy < 0) || (iNdEx+skippy) < 0 {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
return ErrInvalidLengthStructs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Counts) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Counts: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Counts: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Files", wireType)
}
m.Files = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Files |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Directories", wireType)
}
m.Directories = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Directories |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Symlinks", wireType)
}
m.Symlinks = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Symlinks |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
}
m.Deleted = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Deleted |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType)
}
m.Bytes = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Bytes |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
}
m.Sequence = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Sequence |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 17:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DeviceID", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DeviceID = append(m.DeviceID[:0], dAtA[iNdEx:postIndex]...)
if m.DeviceID == nil {
m.DeviceID = []byte{}
}
iNdEx = postIndex
case 18:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LocalFlags", wireType)
}
m.LocalFlags = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LocalFlags |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipStructs(dAtA[iNdEx:])
if err != nil {
return err
}
2021-05-19 11:30:20 +00:00
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthStructs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CountsSet) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CountsSet: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CountsSet: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Counts", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Counts = append(m.Counts, Counts{})
if err := m.Counts[len(m.Counts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
}
m.Created = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Created |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipStructs(dAtA[iNdEx:])
if err != nil {
return err
}
2021-05-19 11:30:20 +00:00
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthStructs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *FileVersionDeprecated) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: FileVersionDeprecated: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: FileVersionDeprecated: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Device = append(m.Device[:0], dAtA[iNdEx:postIndex]...)
if m.Device == nil {
m.Device = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Invalid", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Invalid = bool(v != 0)
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Deleted = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipStructs(dAtA[iNdEx:])
if err != nil {
return err
}
2021-05-19 11:30:20 +00:00
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthStructs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VersionListDeprecated) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VersionListDeprecated: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VersionListDeprecated: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Versions = append(m.Versions, FileVersionDeprecated{})
if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipStructs(dAtA[iNdEx:])
if err != nil {
return err
}
2021-05-19 11:30:20 +00:00
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthStructs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ObservedFolder) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ObservedFolder: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ObservedFolder: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Label = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ReceiveEncrypted", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.ReceiveEncrypted = bool(v != 0)
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RemoteEncrypted", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.RemoteEncrypted = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipStructs(dAtA[iNdEx:])
if err != nil {
return err
}
2021-05-19 11:30:20 +00:00
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthStructs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ObservedDevice) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ObservedDevice: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ObservedDevice: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStructs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthStructs
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthStructs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Address = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipStructs(dAtA[iNdEx:])
if err != nil {
return err
}
2021-05-19 11:30:20 +00:00
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthStructs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipStructs(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowStructs
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowStructs
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowStructs
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthStructs
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupStructs
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthStructs
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthStructs = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowStructs = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupStructs = fmt.Errorf("proto: unexpected end of group")
)