2017-11-11 19:18:17 +00:00
|
|
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
2020-10-02 06:07:05 +00:00
|
|
|
// source: lib/db/structs.proto
|
2016-07-04 10:40:29 +00:00
|
|
|
|
|
|
|
package db
|
|
|
|
|
2019-09-04 06:33:29 +00:00
|
|
|
import (
|
|
|
|
fmt "fmt"
|
|
|
|
_ "github.com/gogo/protobuf/gogoproto"
|
|
|
|
proto "github.com/gogo/protobuf/proto"
|
2020-12-17 18:54:31 +00:00
|
|
|
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
2021-05-19 11:30:20 +00:00
|
|
|
_ "github.com/golang/protobuf/ptypes/timestamp"
|
2019-09-04 06:33:29 +00:00
|
|
|
github_com_syncthing_syncthing_lib_protocol "github.com/syncthing/syncthing/lib/protocol"
|
|
|
|
protocol "github.com/syncthing/syncthing/lib/protocol"
|
2020-10-02 06:07:05 +00:00
|
|
|
_ "github.com/syncthing/syncthing/proto/ext"
|
2019-09-04 06:33:29 +00:00
|
|
|
io "io"
|
|
|
|
math "math"
|
|
|
|
math_bits "math/bits"
|
2020-12-17 18:54:31 +00:00
|
|
|
time "time"
|
2019-09-04 06:33:29 +00:00
|
|
|
)
|
2016-07-04 10:40:29 +00:00
|
|
|
|
|
|
|
// Reference imports to suppress errors if they are not otherwise used.
|
|
|
|
var _ = proto.Marshal
|
|
|
|
var _ = fmt.Errorf
|
|
|
|
var _ = math.Inf
|
2020-12-17 18:54:31 +00:00
|
|
|
var _ = time.Kitchen
|
2016-07-04 10:40:29 +00:00
|
|
|
|
|
|
|
// This is a compile-time assertion to ensure that this generated file
|
|
|
|
// is compatible with the proto package it is being compiled against.
|
2017-01-03 00:16:21 +00:00
|
|
|
// A compilation error at this line likely means your copy of the
|
|
|
|
// proto package needs to be updated.
|
2019-09-04 06:33:29 +00:00
|
|
|
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
2016-07-04 10:40:29 +00:00
|
|
|
|
|
|
|
type FileVersion struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Version protocol.Vector `protobuf:"bytes,1,opt,name=version,proto3" json:"version" xml:"version"`
|
|
|
|
Deleted bool `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted" xml:"deleted"`
|
|
|
|
Devices [][]byte `protobuf:"bytes,3,rep,name=devices,proto3" json:"devices" xml:"device"`
|
|
|
|
InvalidDevices [][]byte `protobuf:"bytes,4,rep,name=invalid_devices,json=invalidDevices,proto3" json:"invalidDevices" xml:"invalidDevice"`
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 10:53:36 +00:00
|
|
|
func (m *FileVersion) Reset() { *m = FileVersion{} }
|
|
|
|
func (m *FileVersion) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*FileVersion) ProtoMessage() {}
|
|
|
|
func (*FileVersion) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{0}
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *FileVersion) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *FileVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_FileVersion.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
2019-01-14 10:53:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
func (m *FileVersion) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_FileVersion.Merge(m, src)
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *FileVersion) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *FileVersion) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_FileVersion.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_FileVersion proto.InternalMessageInfo
|
2016-07-04 10:40:29 +00:00
|
|
|
|
|
|
|
type VersionList struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
RawVersions []FileVersion `protobuf:"bytes,1,rep,name=versions,proto3" json:"versions" xml:"version"`
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionList) Reset() { *m = VersionList{} }
|
|
|
|
func (*VersionList) ProtoMessage() {}
|
|
|
|
func (*VersionList) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{1}
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *VersionList) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *VersionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_VersionList.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
2019-01-14 10:53:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
func (m *VersionList) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_VersionList.Merge(m, src)
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *VersionList) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *VersionList) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_VersionList.DiscardUnknown(m)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 10:53:36 +00:00
|
|
|
var xxx_messageInfo_VersionList proto.InternalMessageInfo
|
2016-07-04 10:40:29 +00:00
|
|
|
|
|
|
|
// Must be the same as FileInfo but without the blocks field
|
|
|
|
type FileInfoTruncated struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" xml:"name"`
|
|
|
|
Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size" xml:"size"`
|
|
|
|
ModifiedS int64 `protobuf:"varint,5,opt,name=modified_s,json=modifiedS,proto3" json:"modifiedS" xml:"modifiedS"`
|
|
|
|
ModifiedBy github_com_syncthing_syncthing_lib_protocol.ShortID `protobuf:"varint,12,opt,name=modified_by,json=modifiedBy,proto3,customtype=github.com/syncthing/syncthing/lib/protocol.ShortID" json:"modifiedBy" xml:"modifiedBy"`
|
|
|
|
Version protocol.Vector `protobuf:"bytes,9,opt,name=version,proto3" json:"version" xml:"version"`
|
|
|
|
Sequence int64 `protobuf:"varint,10,opt,name=sequence,proto3" json:"sequence" xml:"sequence"`
|
2018-04-16 18:08:50 +00:00
|
|
|
// repeated BlockInfo Blocks = 16
|
2020-10-02 06:07:05 +00:00
|
|
|
SymlinkTarget string `protobuf:"bytes,17,opt,name=symlink_target,json=symlinkTarget,proto3" json:"symlinkTarget" xml:"symlinkTarget"`
|
|
|
|
BlocksHash []byte `protobuf:"bytes,18,opt,name=blocks_hash,json=blocksHash,proto3" json:"blocksHash" xml:"blocksHash"`
|
2020-11-09 14:33:32 +00:00
|
|
|
Encrypted []byte `protobuf:"bytes,19,opt,name=encrypted,proto3" json:"encrypted" xml:"encrypted"`
|
2020-10-02 06:07:05 +00:00
|
|
|
Type protocol.FileInfoType `protobuf:"varint,2,opt,name=type,proto3,enum=protocol.FileInfoType" json:"type" xml:"type"`
|
|
|
|
Permissions uint32 `protobuf:"varint,4,opt,name=permissions,proto3" json:"permissions" xml:"permissions"`
|
|
|
|
ModifiedNs int `protobuf:"varint,11,opt,name=modified_ns,json=modifiedNs,proto3,casttype=int" json:"modifiedNs" xml:"modifiedNs"`
|
|
|
|
RawBlockSize int `protobuf:"varint,13,opt,name=block_size,json=blockSize,proto3,casttype=int" json:"blockSize" xml:"blockSize"`
|
2018-06-24 07:50:18 +00:00
|
|
|
// see bep.proto
|
2020-10-02 06:07:05 +00:00
|
|
|
LocalFlags uint32 `protobuf:"varint,1000,opt,name=local_flags,json=localFlags,proto3" json:"localFlags" xml:"localFlags"`
|
|
|
|
VersionHash []byte `protobuf:"bytes,1001,opt,name=version_hash,json=versionHash,proto3" json:"versionHash" xml:"versionHash"`
|
|
|
|
Deleted bool `protobuf:"varint,6,opt,name=deleted,proto3" json:"deleted" xml:"deleted"`
|
|
|
|
RawInvalid bool `protobuf:"varint,7,opt,name=invalid,proto3" json:"invalid" xml:"invalid"`
|
|
|
|
NoPermissions bool `protobuf:"varint,8,opt,name=no_permissions,json=noPermissions,proto3" json:"noPermissions" xml:"noPermissions"`
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 10:53:36 +00:00
|
|
|
func (m *FileInfoTruncated) Reset() { *m = FileInfoTruncated{} }
|
|
|
|
func (*FileInfoTruncated) ProtoMessage() {}
|
|
|
|
func (*FileInfoTruncated) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{2}
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *FileInfoTruncated) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *FileInfoTruncated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_FileInfoTruncated.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
2019-01-14 10:53:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
func (m *FileInfoTruncated) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_FileInfoTruncated.Merge(m, src)
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *FileInfoTruncated) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *FileInfoTruncated) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_FileInfoTruncated.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_FileInfoTruncated proto.InternalMessageInfo
|
2016-07-04 10:40:29 +00:00
|
|
|
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
// BlockList is the structure used to store block lists
|
|
|
|
type BlockList struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Blocks []protocol.BlockInfo `protobuf:"bytes,1,rep,name=blocks,proto3" json:"blocks" xml:"block"`
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *BlockList) Reset() { *m = BlockList{} }
|
|
|
|
func (m *BlockList) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*BlockList) ProtoMessage() {}
|
|
|
|
func (*BlockList) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{3}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
|
|
|
func (m *BlockList) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *BlockList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_BlockList.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func (m *BlockList) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_BlockList.Merge(m, src)
|
|
|
|
}
|
|
|
|
func (m *BlockList) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *BlockList) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_BlockList.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_BlockList proto.InternalMessageInfo
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
// IndirectionHashesOnly is used to only unmarshal the indirection hashes
|
|
|
|
// from a FileInfo
|
|
|
|
type IndirectionHashesOnly struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
BlocksHash []byte `protobuf:"bytes,18,opt,name=blocks_hash,json=blocksHash,proto3" json:"blocksHash" xml:"blocksHash"`
|
|
|
|
VersionHash []byte `protobuf:"bytes,1001,opt,name=version_hash,json=versionHash,proto3" json:"versionHash" xml:"versionHash"`
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) Reset() { *m = IndirectionHashesOnly{} }
|
|
|
|
func (m *IndirectionHashesOnly) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*IndirectionHashesOnly) ProtoMessage() {}
|
|
|
|
func (*IndirectionHashesOnly) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{4}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) XXX_Unmarshal(b []byte) error {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
if deterministic {
|
2020-05-13 12:28:42 +00:00
|
|
|
return xxx_messageInfo_IndirectionHashesOnly.Marshal(b, m, deterministic)
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_IndirectionHashesOnly.Merge(m, src)
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) XXX_Size() int {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_IndirectionHashesOnly.DiscardUnknown(m)
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
var xxx_messageInfo_IndirectionHashesOnly proto.InternalMessageInfo
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
|
2017-12-14 09:51:17 +00:00
|
|
|
// For each folder and device we keep one of these to track the current
|
|
|
|
// counts and sequence. We also keep one for the global state of the folder.
|
|
|
|
type Counts struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Files int `protobuf:"varint,1,opt,name=files,proto3,casttype=int" json:"files" xml:"files"`
|
|
|
|
Directories int `protobuf:"varint,2,opt,name=directories,proto3,casttype=int" json:"directories" xml:"directories"`
|
|
|
|
Symlinks int `protobuf:"varint,3,opt,name=symlinks,proto3,casttype=int" json:"symlinks" xml:"symlinks"`
|
|
|
|
Deleted int `protobuf:"varint,4,opt,name=deleted,proto3,casttype=int" json:"deleted" xml:"deleted"`
|
|
|
|
Bytes int64 `protobuf:"varint,5,opt,name=bytes,proto3" json:"bytes" xml:"bytes"`
|
|
|
|
Sequence int64 `protobuf:"varint,6,opt,name=sequence,proto3" json:"sequence" xml:"sequence"`
|
|
|
|
DeviceID []byte `protobuf:"bytes,17,opt,name=device_id,json=deviceId,proto3" json:"deviceId" xml:"deviceId"`
|
|
|
|
LocalFlags uint32 `protobuf:"varint,18,opt,name=local_flags,json=localFlags,proto3" json:"localFlags" xml:"localFlags"`
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
|
|
|
|
2020-09-04 09:09:36 +00:00
|
|
|
func (m *Counts) Reset() { *m = Counts{} }
|
|
|
|
func (*Counts) ProtoMessage() {}
|
2019-01-14 10:53:36 +00:00
|
|
|
func (*Counts) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{5}
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *Counts) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *Counts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_Counts.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
2019-01-14 10:53:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
func (m *Counts) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_Counts.Merge(m, src)
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *Counts) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *Counts) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_Counts.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_Counts proto.InternalMessageInfo
|
2017-12-14 09:51:17 +00:00
|
|
|
|
|
|
|
type CountsSet struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Counts []Counts `protobuf:"bytes,1,rep,name=counts,proto3" json:"counts" xml:"count"`
|
|
|
|
Created int64 `protobuf:"varint,2,opt,name=created,proto3" json:"created" xml:"created"`
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 10:53:36 +00:00
|
|
|
func (m *CountsSet) Reset() { *m = CountsSet{} }
|
|
|
|
func (m *CountsSet) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*CountsSet) ProtoMessage() {}
|
|
|
|
func (*CountsSet) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{6}
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *CountsSet) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *CountsSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_CountsSet.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
2019-01-14 10:53:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
func (m *CountsSet) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_CountsSet.Merge(m, src)
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *CountsSet) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *CountsSet) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_CountsSet.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_CountsSet proto.InternalMessageInfo
|
2017-12-14 09:51:17 +00:00
|
|
|
|
2020-05-30 07:50:23 +00:00
|
|
|
type FileVersionDeprecated struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Version protocol.Vector `protobuf:"bytes,1,opt,name=version,proto3" json:"version" xml:"version"`
|
|
|
|
Device []byte `protobuf:"bytes,2,opt,name=device,proto3" json:"device" xml:"device"`
|
|
|
|
Invalid bool `protobuf:"varint,3,opt,name=invalid,proto3" json:"invalid" xml:"invalid"`
|
|
|
|
Deleted bool `protobuf:"varint,4,opt,name=deleted,proto3" json:"deleted" xml:"deleted"`
|
2020-05-30 07:50:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *FileVersionDeprecated) Reset() { *m = FileVersionDeprecated{} }
|
|
|
|
func (m *FileVersionDeprecated) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*FileVersionDeprecated) ProtoMessage() {}
|
|
|
|
func (*FileVersionDeprecated) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{7}
|
2020-05-30 07:50:23 +00:00
|
|
|
}
|
|
|
|
func (m *FileVersionDeprecated) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *FileVersionDeprecated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_FileVersionDeprecated.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func (m *FileVersionDeprecated) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_FileVersionDeprecated.Merge(m, src)
|
|
|
|
}
|
|
|
|
func (m *FileVersionDeprecated) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *FileVersionDeprecated) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_FileVersionDeprecated.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_FileVersionDeprecated proto.InternalMessageInfo
|
|
|
|
|
|
|
|
type VersionListDeprecated struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Versions []FileVersionDeprecated `protobuf:"bytes,1,rep,name=versions,proto3" json:"versions" xml:"version"`
|
2020-05-30 07:50:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionListDeprecated) Reset() { *m = VersionListDeprecated{} }
|
|
|
|
func (*VersionListDeprecated) ProtoMessage() {}
|
|
|
|
func (*VersionListDeprecated) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{8}
|
2020-05-30 07:50:23 +00:00
|
|
|
}
|
|
|
|
func (m *VersionListDeprecated) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *VersionListDeprecated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_VersionListDeprecated.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func (m *VersionListDeprecated) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_VersionListDeprecated.Merge(m, src)
|
|
|
|
}
|
|
|
|
func (m *VersionListDeprecated) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *VersionListDeprecated) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_VersionListDeprecated.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_VersionListDeprecated proto.InternalMessageInfo
|
|
|
|
|
2020-12-17 18:54:31 +00:00
|
|
|
type ObservedFolder struct {
|
2021-02-12 21:51:29 +00:00
|
|
|
Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time" xml:"time"`
|
|
|
|
Label string `protobuf:"bytes,2,opt,name=label,proto3" json:"label" xml:"label"`
|
|
|
|
ReceiveEncrypted bool `protobuf:"varint,3,opt,name=receive_encrypted,json=receiveEncrypted,proto3" json:"receiveEncrypted" xml:"receiveEncrypted"`
|
2021-06-17 11:53:02 +00:00
|
|
|
RemoteEncrypted bool `protobuf:"varint,4,opt,name=remote_encrypted,json=remoteEncrypted,proto3" json:"remoteEncrypted" xml:"remoteEncrypted"`
|
2020-12-17 18:54:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedFolder) Reset() { *m = ObservedFolder{} }
|
|
|
|
func (m *ObservedFolder) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*ObservedFolder) ProtoMessage() {}
|
|
|
|
func (*ObservedFolder) Descriptor() ([]byte, []int) {
|
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{9}
|
|
|
|
}
|
|
|
|
func (m *ObservedFolder) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *ObservedFolder) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_ObservedFolder.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func (m *ObservedFolder) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_ObservedFolder.Merge(m, src)
|
|
|
|
}
|
|
|
|
func (m *ObservedFolder) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *ObservedFolder) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_ObservedFolder.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_ObservedFolder proto.InternalMessageInfo
|
|
|
|
|
|
|
|
type ObservedDevice struct {
|
|
|
|
Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time" xml:"time"`
|
|
|
|
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name" xml:"name"`
|
|
|
|
Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address" xml:"address"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedDevice) Reset() { *m = ObservedDevice{} }
|
|
|
|
func (m *ObservedDevice) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*ObservedDevice) ProtoMessage() {}
|
|
|
|
func (*ObservedDevice) Descriptor() ([]byte, []int) {
|
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{10}
|
|
|
|
}
|
|
|
|
func (m *ObservedDevice) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *ObservedDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_ObservedDevice.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func (m *ObservedDevice) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_ObservedDevice.Merge(m, src)
|
|
|
|
}
|
|
|
|
func (m *ObservedDevice) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *ObservedDevice) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_ObservedDevice.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_ObservedDevice proto.InternalMessageInfo
|
|
|
|
|
2016-07-04 10:40:29 +00:00
|
|
|
func init() {
|
|
|
|
proto.RegisterType((*FileVersion)(nil), "db.FileVersion")
|
|
|
|
proto.RegisterType((*VersionList)(nil), "db.VersionList")
|
|
|
|
proto.RegisterType((*FileInfoTruncated)(nil), "db.FileInfoTruncated")
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
proto.RegisterType((*BlockList)(nil), "db.BlockList")
|
2020-05-13 12:28:42 +00:00
|
|
|
proto.RegisterType((*IndirectionHashesOnly)(nil), "db.IndirectionHashesOnly")
|
2017-12-14 09:51:17 +00:00
|
|
|
proto.RegisterType((*Counts)(nil), "db.Counts")
|
|
|
|
proto.RegisterType((*CountsSet)(nil), "db.CountsSet")
|
2020-05-30 07:50:23 +00:00
|
|
|
proto.RegisterType((*FileVersionDeprecated)(nil), "db.FileVersionDeprecated")
|
|
|
|
proto.RegisterType((*VersionListDeprecated)(nil), "db.VersionListDeprecated")
|
2020-12-17 18:54:31 +00:00
|
|
|
proto.RegisterType((*ObservedFolder)(nil), "db.ObservedFolder")
|
|
|
|
proto.RegisterType((*ObservedDevice)(nil), "db.ObservedDevice")
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
|
2020-10-02 06:07:05 +00:00
|
|
|
func init() { proto.RegisterFile("lib/db/structs.proto", fileDescriptor_5465d80e8cba02e3) }
|
|
|
|
|
|
|
|
var fileDescriptor_5465d80e8cba02e3 = []byte{
|
2021-06-17 11:53:02 +00:00
|
|
|
// 1476 bytes of a gzipped FileDescriptorProto
|
|
|
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4d, 0x6f, 0xdb, 0x46,
|
|
|
|
0x13, 0x36, 0x2d, 0xf9, 0x43, 0x2b, 0xf9, 0x8b, 0x79, 0x6d, 0xe8, 0xf5, 0xfb, 0x56, 0xab, 0x6e,
|
|
|
|
0x1c, 0x40, 0xfd, 0x80, 0x0c, 0x38, 0x88, 0x51, 0x04, 0x68, 0x83, 0x30, 0xae, 0x13, 0x07, 0x69,
|
|
|
|
0x52, 0xac, 0x83, 0xa4, 0x68, 0x0f, 0x82, 0x48, 0xae, 0x65, 0x22, 0x14, 0xa9, 0x72, 0x69, 0x3b,
|
|
|
|
0xca, 0xad, 0x97, 0x02, 0xbd, 0x05, 0x41, 0x0f, 0x45, 0x51, 0x14, 0x39, 0xf5, 0x27, 0xf4, 0x17,
|
|
|
|
0x14, 0x45, 0x8e, 0x3e, 0x16, 0x3d, 0xb0, 0x88, 0x7d, 0x69, 0x75, 0xd4, 0xa9, 0xe8, 0xa9, 0xd8,
|
|
|
|
0xd9, 0xe5, 0x92, 0xb2, 0x91, 0x22, 0x49, 0x7d, 0xd3, 0x3c, 0xf3, 0xcc, 0x88, 0x9c, 0x7d, 0x66,
|
|
|
|
0x76, 0x88, 0xfe, 0xe3, 0x7b, 0xf6, 0xaa, 0x6b, 0xaf, 0xf2, 0x38, 0xda, 0x73, 0x62, 0xde, 0xec,
|
|
|
|
0x45, 0x61, 0x1c, 0x9a, 0xe3, 0xae, 0xbd, 0x7c, 0x3e, 0x62, 0xbd, 0x90, 0xaf, 0x02, 0x60, 0xef,
|
|
|
|
0xed, 0xac, 0x76, 0xc2, 0x4e, 0x08, 0x06, 0xfc, 0x92, 0xc4, 0x65, 0xdc, 0x09, 0xc3, 0x8e, 0xcf,
|
|
|
|
0x32, 0x56, 0xec, 0x75, 0x19, 0x8f, 0xdb, 0xdd, 0x9e, 0x22, 0x2c, 0x89, 0xfc, 0xf0, 0xd3, 0x09,
|
|
|
|
0xfd, 0x55, 0x9b, 0xa5, 0x78, 0x89, 0x3d, 0x8c, 0xe5, 0x4f, 0xf2, 0xfd, 0x38, 0x2a, 0x6f, 0x7a,
|
|
|
|
0x3e, 0xbb, 0xc7, 0x22, 0xee, 0x85, 0x81, 0x79, 0x0b, 0x4d, 0xed, 0xcb, 0x9f, 0x55, 0xa3, 0x6e,
|
|
|
|
0x34, 0xca, 0x6b, 0xf3, 0xcd, 0x34, 0x41, 0xf3, 0x1e, 0x73, 0xe2, 0x30, 0xb2, 0xea, 0xcf, 0x12,
|
|
|
|
0x3c, 0x36, 0x48, 0x70, 0x4a, 0x1c, 0x26, 0x78, 0xe6, 0x61, 0xd7, 0xbf, 0x4c, 0x94, 0x4d, 0x68,
|
|
|
|
0xea, 0x31, 0xd7, 0xd1, 0x94, 0xcb, 0x7c, 0x16, 0x33, 0xb7, 0x3a, 0x5e, 0x37, 0x1a, 0xd3, 0xd6,
|
|
|
|
0xff, 0x45, 0x9c, 0x82, 0x74, 0x9c, 0xb2, 0x09, 0x4d, 0x3d, 0xe6, 0x25, 0x11, 0xb7, 0xef, 0x39,
|
|
|
|
0x8c, 0x57, 0x0b, 0xf5, 0x42, 0xa3, 0x62, 0xfd, 0x4f, 0xc6, 0x01, 0x34, 0x4c, 0x70, 0x45, 0xc5,
|
|
|
|
0x09, 0x1b, 0xc2, 0xc0, 0x61, 0x52, 0x34, 0xe7, 0x05, 0xfb, 0x6d, 0xdf, 0x73, 0x5b, 0x69, 0x78,
|
|
|
|
0x11, 0xc2, 0xdf, 0x1a, 0x24, 0x78, 0x56, 0xb9, 0x36, 0x74, 0x96, 0x73, 0x90, 0x65, 0x04, 0x26,
|
|
|
|
0xf4, 0x04, 0x8d, 0x7c, 0x61, 0xa0, 0xb2, 0x2a, 0xce, 0x2d, 0x8f, 0xc7, 0xa6, 0x8f, 0xa6, 0xd5,
|
|
|
|
0xdb, 0xf1, 0xaa, 0x51, 0x2f, 0x34, 0xca, 0x6b, 0x73, 0x4d, 0xd7, 0x6e, 0xe6, 0x6a, 0x68, 0x5d,
|
|
|
|
0x11, 0x05, 0x3a, 0x4a, 0x70, 0x99, 0xb6, 0x0f, 0x14, 0xc6, 0x07, 0x09, 0xd6, 0x71, 0xa7, 0x0a,
|
|
|
|
0xf6, 0xe4, 0x70, 0x25, 0xcf, 0xa5, 0x9a, 0x79, 0xb9, 0xf8, 0xcd, 0x53, 0x3c, 0x46, 0xfe, 0x44,
|
|
|
|
0x68, 0x41, 0xfc, 0xc1, 0x56, 0xb0, 0x13, 0xde, 0x8d, 0xf6, 0x02, 0xa7, 0x2d, 0x8a, 0xf4, 0x36,
|
|
|
|
0x2a, 0x06, 0xed, 0x2e, 0x83, 0x73, 0x2a, 0x59, 0x4b, 0x83, 0x04, 0x83, 0x3d, 0x4c, 0x30, 0x82,
|
|
|
|
0xec, 0xc2, 0x20, 0x14, 0x30, 0xc1, 0xe5, 0xde, 0x23, 0x56, 0x2d, 0xd4, 0x8d, 0x46, 0x41, 0x72,
|
|
|
|
0x85, 0xad, 0xb9, 0xc2, 0x20, 0x14, 0x30, 0xf3, 0x0a, 0x42, 0xdd, 0xd0, 0xf5, 0x76, 0x3c, 0xe6,
|
|
|
|
0xb6, 0x78, 0x75, 0x02, 0x22, 0xea, 0x83, 0x04, 0x97, 0x52, 0x74, 0x7b, 0x98, 0xe0, 0x39, 0x08,
|
|
|
|
0xd3, 0x08, 0xa1, 0x99, 0xd7, 0xfc, 0xd1, 0x40, 0x65, 0x9d, 0xc1, 0xee, 0x57, 0x2b, 0x75, 0xa3,
|
|
|
|
0x51, 0xb4, 0xbe, 0x36, 0x44, 0x59, 0x7e, 0x4d, 0xf0, 0xc5, 0x8e, 0x17, 0xef, 0xee, 0xd9, 0x4d,
|
|
|
|
0x27, 0xec, 0xae, 0xf2, 0x7e, 0xe0, 0xc4, 0xbb, 0x5e, 0xd0, 0xc9, 0xfd, 0xca, 0x8b, 0xb6, 0xb9,
|
|
|
|
0xbd, 0x1b, 0x46, 0xf1, 0xd6, 0xc6, 0x20, 0xc1, 0xfa, 0xa1, 0xac, 0xfe, 0x30, 0xc1, 0xf3, 0x23,
|
|
|
|
0xff, 0x6f, 0xf5, 0xc9, 0xb7, 0x87, 0x2b, 0xaf, 0x93, 0x98, 0xe6, 0xd2, 0xe6, 0xc5, 0x5f, 0xfa,
|
|
|
|
0xf7, 0xe2, 0xbf, 0x8c, 0xa6, 0x39, 0xfb, 0x7c, 0x8f, 0x05, 0x0e, 0xab, 0x22, 0xa8, 0x62, 0x4d,
|
|
|
|
0xa8, 0x20, 0xc5, 0x86, 0x09, 0x9e, 0x95, 0xb5, 0x57, 0x00, 0xa1, 0xda, 0x67, 0xde, 0x41, 0xb3,
|
|
|
|
0xbc, 0xdf, 0xf5, 0xbd, 0xe0, 0x41, 0x2b, 0x6e, 0x47, 0x1d, 0x16, 0x57, 0x17, 0xe0, 0x94, 0x1b,
|
|
|
|
0x83, 0x04, 0xcf, 0x28, 0xcf, 0x5d, 0x70, 0x68, 0x1d, 0x8f, 0xa0, 0x84, 0x8e, 0xb2, 0xcc, 0x6b,
|
|
|
|
0xa8, 0x6c, 0xfb, 0xa1, 0xf3, 0x80, 0xb7, 0x76, 0xdb, 0x7c, 0xb7, 0x6a, 0xd6, 0x8d, 0x46, 0xc5,
|
|
|
|
0x22, 0xa2, 0xac, 0x12, 0xbe, 0xd1, 0xe6, 0xbb, 0xba, 0xac, 0x19, 0x44, 0x68, 0xce, 0x6f, 0x7e,
|
|
|
|
0x80, 0x4a, 0x2c, 0x70, 0xa2, 0x7e, 0x4f, 0x34, 0xf4, 0x39, 0x48, 0x01, 0xc2, 0xd0, 0xa0, 0x16,
|
|
|
|
0x86, 0x46, 0x08, 0xcd, 0xbc, 0xa6, 0x85, 0x8a, 0x71, 0xbf, 0xc7, 0x60, 0x16, 0xcc, 0xae, 0x2d,
|
|
|
|
0x65, 0xc5, 0xd5, 0xe2, 0xee, 0xf7, 0x98, 0x54, 0xa7, 0xe0, 0x69, 0x75, 0x0a, 0x83, 0x50, 0xc0,
|
|
|
|
0xcc, 0x4d, 0x54, 0xee, 0xb1, 0xa8, 0xeb, 0x71, 0xd9, 0x82, 0xc5, 0xba, 0xd1, 0x98, 0xb1, 0x56,
|
|
|
|
0x06, 0x09, 0xce, 0xc3, 0xc3, 0x04, 0x2f, 0x40, 0x64, 0x0e, 0x23, 0x34, 0xcf, 0x30, 0x6f, 0xe6,
|
|
|
|
0x34, 0x1a, 0xf0, 0x6a, 0xb9, 0x6e, 0x34, 0x26, 0x60, 0x4e, 0x68, 0x41, 0xdc, 0xe6, 0xa7, 0x74,
|
|
|
|
0x76, 0x9b, 0x93, 0xbf, 0x12, 0x5c, 0xf0, 0x82, 0x98, 0xe6, 0x68, 0xe6, 0x0e, 0x92, 0x55, 0x6a,
|
|
|
|
0x41, 0x8f, 0xcd, 0x40, 0xaa, 0xeb, 0x47, 0x09, 0xae, 0xd0, 0xf6, 0x81, 0x25, 0x1c, 0xdb, 0xde,
|
|
|
|
0x23, 0x26, 0x0a, 0x65, 0xa7, 0x86, 0x2e, 0x94, 0x46, 0xd2, 0xc4, 0x4f, 0x0e, 0x57, 0x46, 0xc2,
|
|
|
|
0x68, 0x16, 0x64, 0x6e, 0xa0, 0xb2, 0x1f, 0x3a, 0x6d, 0xbf, 0xb5, 0xe3, 0xb7, 0x3b, 0xbc, 0xfa,
|
|
|
|
0xfb, 0x14, 0xbc, 0x3c, 0x9c, 0x22, 0xe0, 0x9b, 0x02, 0xd6, 0x0f, 0x9d, 0x41, 0x84, 0xe6, 0xfc,
|
|
|
|
0xe6, 0x0d, 0x54, 0x51, 0x12, 0x95, 0x5a, 0xf8, 0x63, 0x0a, 0x4e, 0x12, 0x6a, 0xa8, 0x1c, 0x4a,
|
|
|
|
0x0d, 0x0b, 0x79, 0x65, 0x4b, 0x39, 0xe4, 0x19, 0xf9, 0xf1, 0x3e, 0xf9, 0x2a, 0xe3, 0x9d, 0xa2,
|
|
|
|
0x29, 0x35, 0x65, 0xab, 0x53, 0x10, 0xf7, 0xde, 0x51, 0x82, 0x11, 0x6d, 0x1f, 0x6c, 0x49, 0x54,
|
|
|
|
0x64, 0x51, 0x04, 0x9d, 0x45, 0xd9, 0x62, 0x56, 0xe6, 0x98, 0x34, 0xe5, 0x89, 0x8e, 0x09, 0xc2,
|
|
|
|
0x56, 0x5e, 0x1a, 0xd3, 0x90, 0x1a, 0x3a, 0x26, 0x08, 0x3f, 0x1e, 0x11, 0x87, 0xec, 0x98, 0x11,
|
|
|
|
0x94, 0xd0, 0x51, 0x96, 0x1a, 0xbd, 0xf7, 0x51, 0x09, 0x8e, 0x02, 0x66, 0xff, 0x4d, 0x34, 0x29,
|
|
|
|
0xbb, 0x41, 0x4d, 0xfe, 0x73, 0x99, 0x82, 0x81, 0x24, 0x24, 0x6c, 0xbd, 0xa1, 0x26, 0x84, 0xa2,
|
|
|
|
0x0e, 0x13, 0x5c, 0xce, 0x4e, 0x9a, 0x50, 0x05, 0x93, 0x1f, 0x0c, 0xb4, 0xb8, 0x15, 0xb8, 0x5e,
|
|
|
|
0xc4, 0x9c, 0x58, 0xd5, 0x93, 0xf1, 0x3b, 0x81, 0xdf, 0x3f, 0x9b, 0x56, 0x3d, 0xb3, 0x43, 0x26,
|
|
|
|
0xdf, 0x15, 0xd1, 0xe4, 0xb5, 0x70, 0x2f, 0x88, 0xb9, 0x79, 0x09, 0x4d, 0xec, 0x78, 0x3e, 0xe3,
|
|
|
|
0x70, 0xe5, 0x4c, 0x58, 0x78, 0x90, 0x60, 0x09, 0xe8, 0x97, 0x04, 0x4b, 0xf7, 0x88, 0x74, 0x9a,
|
|
|
|
0x1f, 0xa1, 0xb2, 0x7c, 0xcf, 0x30, 0xf2, 0x18, 0x87, 0xee, 0x9f, 0xb0, 0xde, 0x11, 0x4f, 0x92,
|
|
|
|
0x83, 0xf5, 0x93, 0xe4, 0x30, 0x9d, 0x28, 0x4f, 0x34, 0xaf, 0xa2, 0x69, 0x35, 0xdb, 0x38, 0xdc,
|
|
|
|
0x67, 0x13, 0xd6, 0x05, 0x98, 0xab, 0x0a, 0xcb, 0xe6, 0xaa, 0x02, 0x74, 0x16, 0x4d, 0x31, 0xdf,
|
|
|
|
0xcf, 0x84, 0x5b, 0x84, 0x0c, 0xe7, 0xff, 0x49, 0xb8, 0x69, 0xbc, 0xd6, 0x6f, 0x13, 0x4d, 0xd8,
|
|
|
|
0xfd, 0x98, 0xa5, 0x97, 0x63, 0x55, 0xd4, 0x01, 0x80, 0xec, 0xb0, 0x85, 0x45, 0xa8, 0x44, 0x47,
|
|
|
|
0x6e, 0x82, 0xc9, 0x57, 0xbc, 0x09, 0xb6, 0x51, 0x49, 0xee, 0x32, 0x2d, 0xcf, 0x85, 0x4b, 0xa0,
|
|
|
|
0x62, 0xad, 0x1f, 0x25, 0x78, 0x5a, 0xee, 0x27, 0x70, 0x33, 0x4e, 0x4b, 0xc2, 0x96, 0xab, 0x13,
|
|
|
|
0xa5, 0x80, 0xe8, 0x16, 0xcd, 0xa4, 0x9a, 0x27, 0x24, 0x96, 0x1f, 0x24, 0xe6, 0xeb, 0xcc, 0x11,
|
|
|
|
0xd5, 0x20, 0x5f, 0x1a, 0xa8, 0x24, 0xe5, 0xb1, 0xcd, 0x62, 0xf3, 0x2a, 0x9a, 0x74, 0xc0, 0x50,
|
|
|
|
0x1d, 0x82, 0xc4, 0x6e, 0x24, 0xdd, 0x59, 0x63, 0x48, 0x86, 0xae, 0x15, 0x98, 0x84, 0x2a, 0x58,
|
|
|
|
0x0c, 0x15, 0x27, 0x62, 0xed, 0x74, 0x67, 0x2c, 0xc8, 0xa1, 0xa2, 0x20, 0x7d, 0x36, 0xca, 0x26,
|
|
|
|
0x34, 0xf5, 0x90, 0xaf, 0xc6, 0xd1, 0x62, 0x6e, 0x0b, 0xdb, 0x60, 0xbd, 0x88, 0xc9, 0x45, 0xe9,
|
|
|
|
0x6c, 0x77, 0xda, 0x35, 0x34, 0x29, 0xeb, 0x08, 0x8f, 0x57, 0xb1, 0x96, 0xc5, 0x2b, 0x49, 0xe4,
|
|
|
|
0xd4, 0x66, 0xaa, 0x70, 0xf1, 0x4e, 0xe9, 0xc0, 0x2b, 0x64, 0x83, 0xf2, 0x45, 0x23, 0x2e, 0x1b,
|
|
|
|
0x6a, 0xeb, 0xa3, 0x3a, 0x7d, 0xd9, 0x01, 0x4b, 0x0e, 0xd0, 0x62, 0x6e, 0x67, 0xcd, 0x95, 0xe2,
|
|
|
|
0x93, 0x53, 0xdb, 0xeb, 0x7f, 0x4f, 0x6c, 0xaf, 0x19, 0xd9, 0x7a, 0x53, 0x15, 0xe5, 0xc5, 0x8b,
|
|
|
|
0xeb, 0xa9, 0x4d, 0xf5, 0xe7, 0x71, 0x34, 0x7b, 0xc7, 0xe6, 0x2c, 0xda, 0x67, 0xee, 0x66, 0xe8,
|
|
|
|
0xbb, 0x2c, 0x32, 0x6f, 0xa3, 0xa2, 0xf8, 0x2e, 0x51, 0xa5, 0x5f, 0x6e, 0xca, 0x8f, 0x96, 0x66,
|
|
|
|
0xfa, 0xd1, 0xd2, 0xbc, 0x9b, 0x7e, 0xb4, 0x58, 0x35, 0xf5, 0x7f, 0xc0, 0xcf, 0x2e, 0x7f, 0xaf,
|
|
|
|
0xcb, 0xc8, 0xe3, 0xdf, 0xb0, 0x41, 0x01, 0x17, 0xcd, 0xe7, 0xb7, 0x6d, 0xe6, 0x43, 0xf9, 0x4b,
|
|
|
|
0xb2, 0xf9, 0x00, 0xd0, 0x82, 0x02, 0x8b, 0x50, 0x89, 0x9a, 0x9f, 0xa1, 0x85, 0x88, 0x39, 0xcc,
|
|
|
|
0xdb, 0x67, 0xad, 0x6c, 0x79, 0x91, 0xa7, 0xd0, 0x1c, 0x24, 0x78, 0x5e, 0x39, 0x3f, 0xcc, 0xed,
|
|
|
|
0x30, 0x4b, 0x90, 0xe6, 0xa4, 0x83, 0xd0, 0x53, 0x5c, 0xf3, 0x3e, 0x9a, 0x8f, 0x58, 0x37, 0x8c,
|
|
|
|
0xf3, 0xb9, 0xe5, 0x49, 0xbd, 0x3b, 0x48, 0xf0, 0x9c, 0xf4, 0xe5, 0x53, 0x2f, 0xaa, 0xd4, 0x23,
|
|
|
|
0x38, 0xa1, 0x27, 0x99, 0xe4, 0x27, 0x23, 0x2b, 0xa4, 0x6c, 0xe0, 0x33, 0x2f, 0x64, 0xfa, 0xfd,
|
|
|
|
0x30, 0xfe, 0x12, 0xdf, 0x0f, 0xeb, 0x68, 0xaa, 0xed, 0xba, 0x11, 0xe3, 0x72, 0xe4, 0x96, 0xa4,
|
|
|
|
0x10, 0x15, 0xa4, 0x65, 0xa1, 0x6c, 0x42, 0x53, 0x8f, 0x75, 0xfd, 0xd9, 0xf3, 0xda, 0xd8, 0xe1,
|
|
|
|
0xf3, 0xda, 0xd8, 0xb3, 0xa3, 0x9a, 0x71, 0x78, 0x54, 0x33, 0x1e, 0x1f, 0xd7, 0xc6, 0x9e, 0x1e,
|
|
|
|
0xd7, 0x8c, 0xc3, 0xe3, 0xda, 0xd8, 0x2f, 0xc7, 0xb5, 0xb1, 0x4f, 0x2f, 0xbc, 0xc4, 0xd2, 0xee,
|
|
|
|
0xda, 0xf6, 0x24, 0xbc, 0xe6, 0xc5, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb3, 0xa4, 0xea, 0xfc,
|
|
|
|
0x33, 0x0f, 0x00, 0x00,
|
2019-09-04 06:33:29 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *FileVersion) Marshal() (dAtA []byte, err error) {
|
2016-07-04 10:40:29 +00:00
|
|
|
size := m.ProtoSize()
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA = make([]byte, size)
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
2016-07-04 10:40:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
return dAtA[:n], nil
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *FileVersion) MarshalTo(dAtA []byte) (int, error) {
|
2019-09-04 06:33:29 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *FileVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
2020-05-30 07:50:23 +00:00
|
|
|
if len(m.InvalidDevices) > 0 {
|
|
|
|
for iNdEx := len(m.InvalidDevices) - 1; iNdEx >= 0; iNdEx-- {
|
|
|
|
i -= len(m.InvalidDevices[iNdEx])
|
|
|
|
copy(dAtA[i:], m.InvalidDevices[iNdEx])
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.InvalidDevices[iNdEx])))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x22
|
2020-05-11 13:07:06 +00:00
|
|
|
}
|
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
if len(m.Devices) > 0 {
|
|
|
|
for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
|
|
|
|
i -= len(m.Devices[iNdEx])
|
|
|
|
copy(dAtA[i:], m.Devices[iNdEx])
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Devices[iNdEx])))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1a
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if m.Deleted {
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
2020-05-30 07:50:23 +00:00
|
|
|
if m.Deleted {
|
2017-11-11 19:18:17 +00:00
|
|
|
dAtA[i] = 1
|
|
|
|
} else {
|
|
|
|
dAtA[i] = 0
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
2020-05-30 07:50:23 +00:00
|
|
|
dAtA[i] = 0x10
|
2017-11-11 19:18:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
{
|
|
|
|
size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
|
|
|
return len(dAtA) - i, nil
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *VersionList) Marshal() (dAtA []byte, err error) {
|
2016-07-04 10:40:29 +00:00
|
|
|
size := m.ProtoSize()
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA = make([]byte, size)
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
2016-07-04 10:40:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
return dAtA[:n], nil
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *VersionList) MarshalTo(dAtA []byte) (int, error) {
|
2019-09-04 06:33:29 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
2020-05-30 07:50:23 +00:00
|
|
|
if len(m.RawVersions) > 0 {
|
|
|
|
for iNdEx := len(m.RawVersions) - 1; iNdEx >= 0; iNdEx-- {
|
2019-09-04 06:33:29 +00:00
|
|
|
{
|
2020-05-30 07:50:23 +00:00
|
|
|
size, err := m.RawVersions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
2019-09-04 06:33:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
return len(dAtA) - i, nil
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *FileInfoTruncated) Marshal() (dAtA []byte, err error) {
|
2016-07-04 10:40:29 +00:00
|
|
|
size := m.ProtoSize()
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA = make([]byte, size)
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
2016-07-04 10:40:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
return dAtA[:n], nil
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *FileInfoTruncated) MarshalTo(dAtA []byte) (int, error) {
|
2019-09-04 06:33:29 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *FileInfoTruncated) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
2020-05-13 12:28:42 +00:00
|
|
|
if len(m.VersionHash) > 0 {
|
|
|
|
i -= len(m.VersionHash)
|
|
|
|
copy(dAtA[i:], m.VersionHash)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.VersionHash)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x3e
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xca
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.LocalFlags != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.LocalFlags))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x3e
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xc0
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2020-11-09 14:33:32 +00:00
|
|
|
if len(m.Encrypted) > 0 {
|
|
|
|
i -= len(m.Encrypted)
|
|
|
|
copy(dAtA[i:], m.Encrypted)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Encrypted)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x9a
|
|
|
|
}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
if len(m.BlocksHash) > 0 {
|
|
|
|
i -= len(m.BlocksHash)
|
|
|
|
copy(dAtA[i:], m.BlocksHash)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.BlocksHash)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x92
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if len(m.SymlinkTarget) > 0 {
|
|
|
|
i -= len(m.SymlinkTarget)
|
|
|
|
copy(dAtA[i:], m.SymlinkTarget)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.SymlinkTarget)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x8a
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.RawBlockSize != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.RawBlockSize))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x68
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.ModifiedBy != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.ModifiedBy))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x60
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.ModifiedNs != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.ModifiedNs))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x58
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Sequence != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Sequence))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x50
|
|
|
|
}
|
|
|
|
{
|
|
|
|
size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x4a
|
|
|
|
if m.NoPermissions {
|
|
|
|
i--
|
|
|
|
if m.NoPermissions {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[i] = 1
|
2016-07-04 10:40:29 +00:00
|
|
|
} else {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[i] = 0
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
|
|
|
dAtA[i] = 0x40
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2018-06-24 07:50:18 +00:00
|
|
|
if m.RawInvalid {
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
2018-06-24 07:50:18 +00:00
|
|
|
if m.RawInvalid {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[i] = 1
|
2016-07-04 10:40:29 +00:00
|
|
|
} else {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[i] = 0
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
|
|
|
dAtA[i] = 0x38
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Deleted {
|
|
|
|
i--
|
|
|
|
if m.Deleted {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[i] = 1
|
2016-07-04 10:40:29 +00:00
|
|
|
} else {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[i] = 0
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
|
|
|
dAtA[i] = 0x30
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.ModifiedS != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.ModifiedS))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x28
|
2016-08-06 13:05:59 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Permissions != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Permissions))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x20
|
2016-12-21 16:35:20 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Size != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Size))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x18
|
2018-04-16 18:08:50 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Type != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Type))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x10
|
2016-12-09 18:02:18 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if len(m.Name) > 0 {
|
|
|
|
i -= len(m.Name)
|
|
|
|
copy(dAtA[i:], m.Name)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Name)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
2018-06-24 07:50:18 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
return len(dAtA) - i, nil
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
func (m *BlockList) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *BlockList) MarshalTo(dAtA []byte) (int, error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *BlockList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if len(m.Blocks) > 0 {
|
|
|
|
for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- {
|
|
|
|
{
|
|
|
|
size, err := m.Blocks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return len(dAtA) - i, nil
|
|
|
|
}
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) Marshal() (dAtA []byte, err error) {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) MarshalTo(dAtA []byte) (int, error) {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
i := len(dAtA)
|
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
2020-05-13 12:28:42 +00:00
|
|
|
if len(m.VersionHash) > 0 {
|
|
|
|
i -= len(m.VersionHash)
|
|
|
|
copy(dAtA[i:], m.VersionHash)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.VersionHash)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x3e
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xca
|
|
|
|
}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
if len(m.BlocksHash) > 0 {
|
|
|
|
i -= len(m.BlocksHash)
|
|
|
|
copy(dAtA[i:], m.BlocksHash)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.BlocksHash)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x92
|
|
|
|
}
|
|
|
|
return len(dAtA) - i, nil
|
|
|
|
}
|
|
|
|
|
2017-12-14 09:51:17 +00:00
|
|
|
func (m *Counts) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
2017-12-14 09:51:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Counts) MarshalTo(dAtA []byte) (int, error) {
|
2019-09-04 06:33:29 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Counts) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
2017-12-14 09:51:17 +00:00
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.LocalFlags != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.LocalFlags))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x90
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if len(m.DeviceID) > 0 {
|
|
|
|
i -= len(m.DeviceID)
|
|
|
|
copy(dAtA[i:], m.DeviceID)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.DeviceID)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x8a
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Sequence != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Sequence))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x30
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
|
|
|
if m.Bytes != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Bytes))
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
|
|
|
dAtA[i] = 0x28
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Deleted != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Deleted))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x20
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Symlinks != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Symlinks))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x18
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Directories != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Directories))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x10
|
|
|
|
}
|
|
|
|
if m.Files != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Files))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x8
|
2018-07-12 08:15:57 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
return len(dAtA) - i, nil
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *CountsSet) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
2017-12-14 09:51:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *CountsSet) MarshalTo(dAtA []byte) (int, error) {
|
2019-09-04 06:33:29 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *CountsSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
2017-12-14 09:51:17 +00:00
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Created != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Created))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x10
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
if len(m.Counts) > 0 {
|
2019-09-04 06:33:29 +00:00
|
|
|
for iNdEx := len(m.Counts) - 1; iNdEx >= 0; iNdEx-- {
|
|
|
|
{
|
|
|
|
size, err := m.Counts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
return len(dAtA) - i, nil
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
|
|
|
|
2020-05-30 07:50:23 +00:00
|
|
|
func (m *FileVersionDeprecated) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *FileVersionDeprecated) MarshalTo(dAtA []byte) (int, error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *FileVersionDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if m.Deleted {
|
|
|
|
i--
|
|
|
|
if m.Deleted {
|
|
|
|
dAtA[i] = 1
|
|
|
|
} else {
|
|
|
|
dAtA[i] = 0
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x20
|
|
|
|
}
|
|
|
|
if m.Invalid {
|
|
|
|
i--
|
|
|
|
if m.Invalid {
|
|
|
|
dAtA[i] = 1
|
|
|
|
} else {
|
|
|
|
dAtA[i] = 0
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x18
|
|
|
|
}
|
|
|
|
if len(m.Device) > 0 {
|
|
|
|
i -= len(m.Device)
|
|
|
|
copy(dAtA[i:], m.Device)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Device)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x12
|
|
|
|
}
|
|
|
|
{
|
|
|
|
size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
|
|
|
return len(dAtA) - i, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionListDeprecated) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionListDeprecated) MarshalTo(dAtA []byte) (int, error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionListDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if len(m.Versions) > 0 {
|
|
|
|
for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- {
|
|
|
|
{
|
|
|
|
size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return len(dAtA) - i, nil
|
|
|
|
}
|
|
|
|
|
2020-12-17 18:54:31 +00:00
|
|
|
func (m *ObservedFolder) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedFolder) MarshalTo(dAtA []byte) (int, error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedFolder) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
2021-06-17 11:53:02 +00:00
|
|
|
if m.RemoteEncrypted {
|
|
|
|
i--
|
|
|
|
if m.RemoteEncrypted {
|
|
|
|
dAtA[i] = 1
|
|
|
|
} else {
|
|
|
|
dAtA[i] = 0
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x20
|
|
|
|
}
|
2021-02-12 21:51:29 +00:00
|
|
|
if m.ReceiveEncrypted {
|
|
|
|
i--
|
|
|
|
if m.ReceiveEncrypted {
|
|
|
|
dAtA[i] = 1
|
|
|
|
} else {
|
|
|
|
dAtA[i] = 0
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x18
|
|
|
|
}
|
2020-12-17 18:54:31 +00:00
|
|
|
if len(m.Label) > 0 {
|
|
|
|
i -= len(m.Label)
|
|
|
|
copy(dAtA[i:], m.Label)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Label)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x12
|
|
|
|
}
|
|
|
|
n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):])
|
|
|
|
if err4 != nil {
|
|
|
|
return 0, err4
|
|
|
|
}
|
|
|
|
i -= n4
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(n4))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
|
|
|
return len(dAtA) - i, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedDevice) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedDevice) MarshalTo(dAtA []byte) (int, error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if len(m.Address) > 0 {
|
|
|
|
i -= len(m.Address)
|
|
|
|
copy(dAtA[i:], m.Address)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Address)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1a
|
|
|
|
}
|
|
|
|
if len(m.Name) > 0 {
|
|
|
|
i -= len(m.Name)
|
|
|
|
copy(dAtA[i:], m.Name)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Name)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x12
|
|
|
|
}
|
|
|
|
n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):])
|
|
|
|
if err5 != nil {
|
|
|
|
return 0, err5
|
|
|
|
}
|
|
|
|
i -= n5
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(n5))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
|
|
|
return len(dAtA) - i, nil
|
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func encodeVarintStructs(dAtA []byte, offset int, v uint64) int {
|
2019-09-04 06:33:29 +00:00
|
|
|
offset -= sovStructs(v)
|
|
|
|
base := offset
|
2016-07-04 10:40:29 +00:00
|
|
|
for v >= 1<<7 {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
2016-07-04 10:40:29 +00:00
|
|
|
v >>= 7
|
|
|
|
offset++
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[offset] = uint8(v)
|
2019-09-04 06:33:29 +00:00
|
|
|
return base
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
func (m *FileVersion) ProtoSize() (n int) {
|
2019-01-14 10:53:36 +00:00
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
l = m.Version.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
2020-05-11 13:07:06 +00:00
|
|
|
if m.Deleted {
|
|
|
|
n += 2
|
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
if len(m.Devices) > 0 {
|
|
|
|
for _, b := range m.Devices {
|
|
|
|
l = len(b)
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(m.InvalidDevices) > 0 {
|
|
|
|
for _, b := range m.InvalidDevices {
|
|
|
|
l = len(b)
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionList) ProtoSize() (n int) {
|
2019-01-14 10:53:36 +00:00
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
var l int
|
|
|
|
_ = l
|
2020-05-30 07:50:23 +00:00
|
|
|
if len(m.RawVersions) > 0 {
|
|
|
|
for _, e := range m.RawVersions {
|
2016-07-04 10:40:29 +00:00
|
|
|
l = e.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *FileInfoTruncated) ProtoSize() (n int) {
|
2019-01-14 10:53:36 +00:00
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
l = len(m.Name)
|
|
|
|
if l > 0 {
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
if m.Type != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Type))
|
|
|
|
}
|
|
|
|
if m.Size != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Size))
|
|
|
|
}
|
|
|
|
if m.Permissions != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Permissions))
|
|
|
|
}
|
2016-08-06 13:05:59 +00:00
|
|
|
if m.ModifiedS != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.ModifiedS))
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
if m.Deleted {
|
|
|
|
n += 2
|
|
|
|
}
|
2018-06-24 07:50:18 +00:00
|
|
|
if m.RawInvalid {
|
2016-07-04 10:40:29 +00:00
|
|
|
n += 2
|
|
|
|
}
|
|
|
|
if m.NoPermissions {
|
|
|
|
n += 2
|
|
|
|
}
|
|
|
|
l = m.Version.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
2016-07-29 19:54:24 +00:00
|
|
|
if m.Sequence != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Sequence))
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2016-08-06 13:05:59 +00:00
|
|
|
if m.ModifiedNs != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.ModifiedNs))
|
|
|
|
}
|
2016-12-21 16:35:20 +00:00
|
|
|
if m.ModifiedBy != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.ModifiedBy))
|
|
|
|
}
|
2018-04-16 18:08:50 +00:00
|
|
|
if m.RawBlockSize != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.RawBlockSize))
|
|
|
|
}
|
2016-12-09 18:02:18 +00:00
|
|
|
l = len(m.SymlinkTarget)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
l = len(m.BlocksHash)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
2020-11-09 14:33:32 +00:00
|
|
|
l = len(m.Encrypted)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
2018-06-24 07:50:18 +00:00
|
|
|
if m.LocalFlags != 0 {
|
|
|
|
n += 2 + sovStructs(uint64(m.LocalFlags))
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
l = len(m.VersionHash)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
func (m *BlockList) ProtoSize() (n int) {
|
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if len(m.Blocks) > 0 {
|
|
|
|
for _, e := range m.Blocks {
|
|
|
|
l = e.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) ProtoSize() (n int) {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
l = len(m.BlocksHash)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
l = len(m.VersionHash)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2017-12-14 09:51:17 +00:00
|
|
|
func (m *Counts) ProtoSize() (n int) {
|
2019-01-14 10:53:36 +00:00
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if m.Files != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Files))
|
|
|
|
}
|
|
|
|
if m.Directories != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Directories))
|
|
|
|
}
|
|
|
|
if m.Symlinks != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Symlinks))
|
|
|
|
}
|
|
|
|
if m.Deleted != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Deleted))
|
|
|
|
}
|
|
|
|
if m.Bytes != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Bytes))
|
|
|
|
}
|
|
|
|
if m.Sequence != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Sequence))
|
|
|
|
}
|
|
|
|
l = len(m.DeviceID)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
2018-07-12 08:15:57 +00:00
|
|
|
if m.LocalFlags != 0 {
|
|
|
|
n += 2 + sovStructs(uint64(m.LocalFlags))
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *CountsSet) ProtoSize() (n int) {
|
2019-01-14 10:53:36 +00:00
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if len(m.Counts) > 0 {
|
|
|
|
for _, e := range m.Counts {
|
|
|
|
l = e.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if m.Created != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Created))
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2020-05-30 07:50:23 +00:00
|
|
|
func (m *FileVersionDeprecated) ProtoSize() (n int) {
|
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
l = m.Version.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
l = len(m.Device)
|
|
|
|
if l > 0 {
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
if m.Invalid {
|
|
|
|
n += 2
|
|
|
|
}
|
|
|
|
if m.Deleted {
|
|
|
|
n += 2
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionListDeprecated) ProtoSize() (n int) {
|
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if len(m.Versions) > 0 {
|
|
|
|
for _, e := range m.Versions {
|
|
|
|
l = e.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2020-12-17 18:54:31 +00:00
|
|
|
func (m *ObservedFolder) ProtoSize() (n int) {
|
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time)
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
l = len(m.Label)
|
|
|
|
if l > 0 {
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
2021-02-12 21:51:29 +00:00
|
|
|
if m.ReceiveEncrypted {
|
|
|
|
n += 2
|
|
|
|
}
|
2021-06-17 11:53:02 +00:00
|
|
|
if m.RemoteEncrypted {
|
|
|
|
n += 2
|
|
|
|
}
|
2020-12-17 18:54:31 +00:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedDevice) ProtoSize() (n int) {
|
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time)
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
l = len(m.Name)
|
|
|
|
if l > 0 {
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
l = len(m.Address)
|
|
|
|
if l > 0 {
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2016-07-04 10:40:29 +00:00
|
|
|
func sovStructs(x uint64) (n int) {
|
2019-09-04 06:33:29 +00:00
|
|
|
return (math_bits.Len64(x|1) + 6) / 7
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
func sozStructs(x uint64) (n int) {
|
|
|
|
return sovStructs(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *FileVersion) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
wire |= uint64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: FileVersion: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: FileVersion: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
msglen |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
2016-07-04 10:40:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 2:
|
2020-05-30 07:50:23 +00:00
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
var v int
|
2016-07-04 10:40:29 +00:00
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2020-05-30 07:50:23 +00:00
|
|
|
v |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
m.Deleted = bool(v != 0)
|
2017-11-11 19:18:17 +00:00
|
|
|
case 3:
|
2020-05-30 07:50:23 +00:00
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
|
2017-11-11 19:18:17 +00:00
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
var byteLen int
|
2017-11-11 19:18:17 +00:00
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-05-30 07:50:23 +00:00
|
|
|
byteLen |= int(b&0x7F) << shift
|
2017-11-11 19:18:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Devices = append(m.Devices, make([]byte, postIndex-iNdEx))
|
|
|
|
copy(m.Devices[len(m.Devices)-1], dAtA[iNdEx:postIndex])
|
|
|
|
iNdEx = postIndex
|
2020-05-11 13:07:06 +00:00
|
|
|
case 4:
|
2020-05-30 07:50:23 +00:00
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field InvalidDevices", wireType)
|
2020-05-11 13:07:06 +00:00
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
var byteLen int
|
2020-05-11 13:07:06 +00:00
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-05-30 07:50:23 +00:00
|
|
|
byteLen |= int(b&0x7F) << shift
|
2020-05-11 13:07:06 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.InvalidDevices = append(m.InvalidDevices, make([]byte, postIndex-iNdEx))
|
|
|
|
copy(m.InvalidDevices[len(m.InvalidDevices)-1], dAtA[iNdEx:postIndex])
|
|
|
|
iNdEx = postIndex
|
2016-07-04 10:40:29 +00:00
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
2017-01-03 00:16:21 +00:00
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
2016-07-04 10:40:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-05-19 11:30:20 +00:00
|
|
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
2019-09-04 06:33:29 +00:00
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *VersionList) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
wire |= uint64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: VersionList: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: VersionList: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
2020-05-30 07:50:23 +00:00
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field RawVersions", wireType)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
msglen |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
m.RawVersions = append(m.RawVersions, FileVersion{})
|
|
|
|
if err := m.RawVersions[len(m.RawVersions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
2016-07-04 10:40:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
2017-01-03 00:16:21 +00:00
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
2016-07-04 10:40:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-05-19 11:30:20 +00:00
|
|
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
2019-09-04 06:33:29 +00:00
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *FileInfoTruncated) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
wire |= uint64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: FileInfoTruncated: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: FileInfoTruncated: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
|
|
|
}
|
|
|
|
var stringLen uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
stringLen |= uint64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
intStringLen := int(stringLen)
|
|
|
|
if intStringLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + intStringLen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
m.Name = string(dAtA[iNdEx:postIndex])
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx = postIndex
|
|
|
|
case 2:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
|
|
|
|
}
|
|
|
|
m.Type = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Type |= protocol.FileInfoType(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 3:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Size", wireType)
|
|
|
|
}
|
|
|
|
m.Size = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Size |= int64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 4:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType)
|
|
|
|
}
|
|
|
|
m.Permissions = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Permissions |= uint32(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 5:
|
|
|
|
if wireType != 0 {
|
2016-08-06 13:05:59 +00:00
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field ModifiedS", wireType)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2016-08-06 13:05:59 +00:00
|
|
|
m.ModifiedS = 0
|
2016-07-04 10:40:29 +00:00
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.ModifiedS |= int64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 6:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
|
|
|
|
}
|
|
|
|
var v int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
v |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.Deleted = bool(v != 0)
|
|
|
|
case 7:
|
|
|
|
if wireType != 0 {
|
2018-06-24 07:50:18 +00:00
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field RawInvalid", wireType)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
var v int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
v |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2018-06-24 07:50:18 +00:00
|
|
|
m.RawInvalid = bool(v != 0)
|
2016-07-04 10:40:29 +00:00
|
|
|
case 8:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field NoPermissions", wireType)
|
|
|
|
}
|
|
|
|
var v int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
v |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.NoPermissions = bool(v != 0)
|
|
|
|
case 9:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
msglen |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
2016-07-04 10:40:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 10:
|
|
|
|
if wireType != 0 {
|
2016-07-29 19:54:24 +00:00
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2016-07-29 19:54:24 +00:00
|
|
|
m.Sequence = 0
|
2016-07-04 10:40:29 +00:00
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Sequence |= int64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2016-08-06 13:05:59 +00:00
|
|
|
case 11:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field ModifiedNs", wireType)
|
|
|
|
}
|
|
|
|
m.ModifiedNs = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-08-06 13:05:59 +00:00
|
|
|
iNdEx++
|
2020-10-02 06:07:05 +00:00
|
|
|
m.ModifiedNs |= int(b&0x7F) << shift
|
2016-08-06 13:05:59 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2016-12-21 16:35:20 +00:00
|
|
|
case 12:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field ModifiedBy", wireType)
|
|
|
|
}
|
|
|
|
m.ModifiedBy = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-12-21 16:35:20 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.ModifiedBy |= github_com_syncthing_syncthing_lib_protocol.ShortID(b&0x7F) << shift
|
2016-12-21 16:35:20 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2018-04-16 18:08:50 +00:00
|
|
|
case 13:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field RawBlockSize", wireType)
|
|
|
|
}
|
|
|
|
m.RawBlockSize = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-10-02 06:07:05 +00:00
|
|
|
m.RawBlockSize |= int(b&0x7F) << shift
|
2018-04-16 18:08:50 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2016-12-09 18:02:18 +00:00
|
|
|
case 17:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field SymlinkTarget", wireType)
|
|
|
|
}
|
|
|
|
var stringLen uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-12-09 18:02:18 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
stringLen |= uint64(b&0x7F) << shift
|
2016-12-09 18:02:18 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
intStringLen := int(stringLen)
|
|
|
|
if intStringLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + intStringLen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-12-09 18:02:18 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
m.SymlinkTarget = string(dAtA[iNdEx:postIndex])
|
2016-12-09 18:02:18 +00:00
|
|
|
iNdEx = postIndex
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
case 18:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field BlocksHash", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
byteLen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.BlocksHash = append(m.BlocksHash[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.BlocksHash == nil {
|
|
|
|
m.BlocksHash = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
2020-11-09 14:33:32 +00:00
|
|
|
case 19:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Encrypted", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
byteLen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Encrypted = append(m.Encrypted[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.Encrypted == nil {
|
|
|
|
m.Encrypted = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
2018-06-24 07:50:18 +00:00
|
|
|
case 1000:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field LocalFlags", wireType)
|
|
|
|
}
|
|
|
|
m.LocalFlags = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.LocalFlags |= uint32(b&0x7F) << shift
|
2018-06-24 07:50:18 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
case 1001:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field VersionHash", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
byteLen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.VersionHash = append(m.VersionHash[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.VersionHash == nil {
|
|
|
|
m.VersionHash = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
2016-07-04 10:40:29 +00:00
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
2017-01-03 00:16:21 +00:00
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
2016-07-04 10:40:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-05-19 11:30:20 +00:00
|
|
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
2019-09-04 06:33:29 +00:00
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
func (m *BlockList) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
wire |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: BlockList: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: BlockList: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
msglen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Blocks = append(m.Blocks, protocol.BlockInfo{})
|
|
|
|
if err := m.Blocks[len(m.Blocks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-05-19 11:30:20 +00:00
|
|
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) Unmarshal(dAtA []byte) error {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
wire |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
2020-05-13 12:28:42 +00:00
|
|
|
return fmt.Errorf("proto: IndirectionHashesOnly: wiretype end group for non-group")
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
2020-05-13 12:28:42 +00:00
|
|
|
return fmt.Errorf("proto: IndirectionHashesOnly: illegal tag %d (wire type %d)", fieldNum, wire)
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 18:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field BlocksHash", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
byteLen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.BlocksHash = append(m.BlocksHash[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.BlocksHash == nil {
|
|
|
|
m.BlocksHash = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
2020-05-13 12:28:42 +00:00
|
|
|
case 1001:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field VersionHash", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
byteLen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.VersionHash = append(m.VersionHash[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.VersionHash == nil {
|
|
|
|
m.VersionHash = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-05-19 11:30:20 +00:00
|
|
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
func (m *Counts) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
wire |= uint64(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: Counts: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: Counts: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Files", wireType)
|
|
|
|
}
|
|
|
|
m.Files = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-10-02 06:07:05 +00:00
|
|
|
m.Files |= int(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 2:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Directories", wireType)
|
|
|
|
}
|
|
|
|
m.Directories = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-10-02 06:07:05 +00:00
|
|
|
m.Directories |= int(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 3:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Symlinks", wireType)
|
|
|
|
}
|
|
|
|
m.Symlinks = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-10-02 06:07:05 +00:00
|
|
|
m.Symlinks |= int(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 4:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
|
|
|
|
}
|
|
|
|
m.Deleted = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-10-02 06:07:05 +00:00
|
|
|
m.Deleted |= int(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 5:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType)
|
|
|
|
}
|
|
|
|
m.Bytes = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Bytes |= int64(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 6:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
|
|
|
|
}
|
|
|
|
m.Sequence = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Sequence |= int64(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 17:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field DeviceID", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
byteLen |= int(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.DeviceID = append(m.DeviceID[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.DeviceID == nil {
|
|
|
|
m.DeviceID = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
2018-07-12 08:15:57 +00:00
|
|
|
case 18:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field LocalFlags", wireType)
|
|
|
|
}
|
|
|
|
m.LocalFlags = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.LocalFlags |= uint32(b&0x7F) << shift
|
2018-07-12 08:15:57 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-05-19 11:30:20 +00:00
|
|
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
2019-09-04 06:33:29 +00:00
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (m *CountsSet) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
wire |= uint64(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: CountsSet: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: CountsSet: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Counts", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
msglen |= int(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Counts = append(m.Counts, Counts{})
|
|
|
|
if err := m.Counts[len(m.Counts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 2:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
|
|
|
|
}
|
|
|
|
m.Created = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Created |= int64(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
2020-05-30 07:50:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-05-19 11:30:20 +00:00
|
|
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
2020-05-30 07:50:23 +00:00
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (m *FileVersionDeprecated) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
wire |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: FileVersionDeprecated: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: FileVersionDeprecated: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
msglen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 2:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
byteLen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Device = append(m.Device[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.Device == nil {
|
|
|
|
m.Device = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 3:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Invalid", wireType)
|
|
|
|
}
|
|
|
|
var v int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
v |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.Invalid = bool(v != 0)
|
|
|
|
case 4:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
|
|
|
|
}
|
|
|
|
var v int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
v |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.Deleted = bool(v != 0)
|
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-05-19 11:30:20 +00:00
|
|
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
2020-05-30 07:50:23 +00:00
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (m *VersionListDeprecated) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
wire |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: VersionListDeprecated: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: VersionListDeprecated: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
msglen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Versions = append(m.Versions, FileVersionDeprecated{})
|
|
|
|
if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
2020-12-17 18:54:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-05-19 11:30:20 +00:00
|
|
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
2020-12-17 18:54:31 +00:00
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (m *ObservedFolder) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
wire |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: ObservedFolder: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: ObservedFolder: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
msglen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 2:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType)
|
|
|
|
}
|
|
|
|
var stringLen uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
stringLen |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
intStringLen := int(stringLen)
|
|
|
|
if intStringLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + intStringLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Label = string(dAtA[iNdEx:postIndex])
|
|
|
|
iNdEx = postIndex
|
2021-02-12 21:51:29 +00:00
|
|
|
case 3:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field ReceiveEncrypted", wireType)
|
|
|
|
}
|
|
|
|
var v int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
v |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.ReceiveEncrypted = bool(v != 0)
|
2021-06-17 11:53:02 +00:00
|
|
|
case 4:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field RemoteEncrypted", wireType)
|
|
|
|
}
|
|
|
|
var v int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
v |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.RemoteEncrypted = bool(v != 0)
|
2020-12-17 18:54:31 +00:00
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-05-19 11:30:20 +00:00
|
|
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
2020-12-17 18:54:31 +00:00
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (m *ObservedDevice) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
wire |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: ObservedDevice: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: ObservedDevice: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
msglen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 2:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
|
|
|
}
|
|
|
|
var stringLen uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
stringLen |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
intStringLen := int(stringLen)
|
|
|
|
if intStringLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + intStringLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Name = string(dAtA[iNdEx:postIndex])
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 3:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
|
|
|
|
}
|
|
|
|
var stringLen uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
stringLen |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
intStringLen := int(stringLen)
|
|
|
|
if intStringLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + intStringLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Address = string(dAtA[iNdEx:postIndex])
|
|
|
|
iNdEx = postIndex
|
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
2017-12-14 09:51:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-05-19 11:30:20 +00:00
|
|
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
2019-09-04 06:33:29 +00:00
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
func skipStructs(dAtA []byte) (n int, err error) {
|
|
|
|
l := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx := 0
|
2019-10-18 07:51:04 +00:00
|
|
|
depth := 0
|
2016-07-04 10:40:29 +00:00
|
|
|
for iNdEx < l {
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return 0, ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return 0, io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
|
|
|
wire |= (uint64(b) & 0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
switch wireType {
|
|
|
|
case 0:
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return 0, ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return 0, io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx++
|
2017-01-03 00:16:21 +00:00
|
|
|
if dAtA[iNdEx-1] < 0x80 {
|
2016-07-04 10:40:29 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 1:
|
|
|
|
iNdEx += 8
|
|
|
|
case 2:
|
|
|
|
var length int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return 0, ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return 0, io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
|
|
|
length |= (int(b) & 0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if length < 0 {
|
|
|
|
return 0, ErrInvalidLengthStructs
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
iNdEx += length
|
2016-07-04 10:40:29 +00:00
|
|
|
case 3:
|
2019-10-18 07:51:04 +00:00
|
|
|
depth++
|
2016-07-04 10:40:29 +00:00
|
|
|
case 4:
|
2019-10-18 07:51:04 +00:00
|
|
|
if depth == 0 {
|
|
|
|
return 0, ErrUnexpectedEndOfGroupStructs
|
|
|
|
}
|
|
|
|
depth--
|
2016-07-04 10:40:29 +00:00
|
|
|
case 5:
|
|
|
|
iNdEx += 4
|
|
|
|
default:
|
|
|
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
|
|
|
}
|
2019-10-18 07:51:04 +00:00
|
|
|
if iNdEx < 0 {
|
|
|
|
return 0, ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if depth == 0 {
|
|
|
|
return iNdEx, nil
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-10-18 07:51:04 +00:00
|
|
|
return 0, io.ErrUnexpectedEOF
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
2019-10-18 07:51:04 +00:00
|
|
|
ErrInvalidLengthStructs = fmt.Errorf("proto: negative length found during unmarshaling")
|
|
|
|
ErrIntOverflowStructs = fmt.Errorf("proto: integer overflow")
|
|
|
|
ErrUnexpectedEndOfGroupStructs = fmt.Errorf("proto: unexpected end of group")
|
2016-07-04 10:40:29 +00:00
|
|
|
)
|