2017-11-11 19:18:17 +00:00
|
|
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
2020-10-02 06:07:05 +00:00
|
|
|
// source: lib/db/structs.proto
|
2016-07-04 10:40:29 +00:00
|
|
|
|
|
|
|
package db
|
|
|
|
|
2019-09-04 06:33:29 +00:00
|
|
|
import (
|
|
|
|
fmt "fmt"
|
|
|
|
_ "github.com/gogo/protobuf/gogoproto"
|
|
|
|
proto "github.com/gogo/protobuf/proto"
|
2020-12-17 18:54:31 +00:00
|
|
|
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
|
|
|
_ "github.com/golang/protobuf/ptypes/timestamp"
|
2019-09-04 06:33:29 +00:00
|
|
|
github_com_syncthing_syncthing_lib_protocol "github.com/syncthing/syncthing/lib/protocol"
|
|
|
|
protocol "github.com/syncthing/syncthing/lib/protocol"
|
2020-10-02 06:07:05 +00:00
|
|
|
_ "github.com/syncthing/syncthing/proto/ext"
|
2019-09-04 06:33:29 +00:00
|
|
|
io "io"
|
|
|
|
math "math"
|
|
|
|
math_bits "math/bits"
|
2020-12-17 18:54:31 +00:00
|
|
|
time "time"
|
2019-09-04 06:33:29 +00:00
|
|
|
)
|
2016-07-04 10:40:29 +00:00
|
|
|
|
|
|
|
// Reference imports to suppress errors if they are not otherwise used.
|
|
|
|
var _ = proto.Marshal
|
|
|
|
var _ = fmt.Errorf
|
|
|
|
var _ = math.Inf
|
2020-12-17 18:54:31 +00:00
|
|
|
var _ = time.Kitchen
|
2016-07-04 10:40:29 +00:00
|
|
|
|
|
|
|
// This is a compile-time assertion to ensure that this generated file
|
|
|
|
// is compatible with the proto package it is being compiled against.
|
2017-01-03 00:16:21 +00:00
|
|
|
// A compilation error at this line likely means your copy of the
|
|
|
|
// proto package needs to be updated.
|
2019-09-04 06:33:29 +00:00
|
|
|
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
2016-07-04 10:40:29 +00:00
|
|
|
|
|
|
|
type FileVersion struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Version protocol.Vector `protobuf:"bytes,1,opt,name=version,proto3" json:"version" xml:"version"`
|
|
|
|
Deleted bool `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted" xml:"deleted"`
|
|
|
|
Devices [][]byte `protobuf:"bytes,3,rep,name=devices,proto3" json:"devices" xml:"device"`
|
|
|
|
InvalidDevices [][]byte `protobuf:"bytes,4,rep,name=invalid_devices,json=invalidDevices,proto3" json:"invalidDevices" xml:"invalidDevice"`
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 10:53:36 +00:00
|
|
|
func (m *FileVersion) Reset() { *m = FileVersion{} }
|
|
|
|
func (m *FileVersion) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*FileVersion) ProtoMessage() {}
|
|
|
|
func (*FileVersion) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{0}
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *FileVersion) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *FileVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_FileVersion.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
2019-01-14 10:53:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
func (m *FileVersion) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_FileVersion.Merge(m, src)
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *FileVersion) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *FileVersion) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_FileVersion.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_FileVersion proto.InternalMessageInfo
|
2016-07-04 10:40:29 +00:00
|
|
|
|
|
|
|
type VersionList struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
RawVersions []FileVersion `protobuf:"bytes,1,rep,name=versions,proto3" json:"versions" xml:"version"`
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionList) Reset() { *m = VersionList{} }
|
|
|
|
func (*VersionList) ProtoMessage() {}
|
|
|
|
func (*VersionList) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{1}
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *VersionList) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *VersionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_VersionList.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
2019-01-14 10:53:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
func (m *VersionList) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_VersionList.Merge(m, src)
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *VersionList) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *VersionList) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_VersionList.DiscardUnknown(m)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 10:53:36 +00:00
|
|
|
var xxx_messageInfo_VersionList proto.InternalMessageInfo
|
2016-07-04 10:40:29 +00:00
|
|
|
|
|
|
|
// Must be the same as FileInfo but without the blocks field
|
|
|
|
type FileInfoTruncated struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" xml:"name"`
|
|
|
|
Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size" xml:"size"`
|
|
|
|
ModifiedS int64 `protobuf:"varint,5,opt,name=modified_s,json=modifiedS,proto3" json:"modifiedS" xml:"modifiedS"`
|
|
|
|
ModifiedBy github_com_syncthing_syncthing_lib_protocol.ShortID `protobuf:"varint,12,opt,name=modified_by,json=modifiedBy,proto3,customtype=github.com/syncthing/syncthing/lib/protocol.ShortID" json:"modifiedBy" xml:"modifiedBy"`
|
|
|
|
Version protocol.Vector `protobuf:"bytes,9,opt,name=version,proto3" json:"version" xml:"version"`
|
|
|
|
Sequence int64 `protobuf:"varint,10,opt,name=sequence,proto3" json:"sequence" xml:"sequence"`
|
2018-04-16 18:08:50 +00:00
|
|
|
// repeated BlockInfo Blocks = 16
|
2020-10-02 06:07:05 +00:00
|
|
|
SymlinkTarget string `protobuf:"bytes,17,opt,name=symlink_target,json=symlinkTarget,proto3" json:"symlinkTarget" xml:"symlinkTarget"`
|
|
|
|
BlocksHash []byte `protobuf:"bytes,18,opt,name=blocks_hash,json=blocksHash,proto3" json:"blocksHash" xml:"blocksHash"`
|
2020-11-09 14:33:32 +00:00
|
|
|
Encrypted []byte `protobuf:"bytes,19,opt,name=encrypted,proto3" json:"encrypted" xml:"encrypted"`
|
2020-10-02 06:07:05 +00:00
|
|
|
Type protocol.FileInfoType `protobuf:"varint,2,opt,name=type,proto3,enum=protocol.FileInfoType" json:"type" xml:"type"`
|
|
|
|
Permissions uint32 `protobuf:"varint,4,opt,name=permissions,proto3" json:"permissions" xml:"permissions"`
|
|
|
|
ModifiedNs int `protobuf:"varint,11,opt,name=modified_ns,json=modifiedNs,proto3,casttype=int" json:"modifiedNs" xml:"modifiedNs"`
|
|
|
|
RawBlockSize int `protobuf:"varint,13,opt,name=block_size,json=blockSize,proto3,casttype=int" json:"blockSize" xml:"blockSize"`
|
2018-06-24 07:50:18 +00:00
|
|
|
// see bep.proto
|
2020-10-02 06:07:05 +00:00
|
|
|
LocalFlags uint32 `protobuf:"varint,1000,opt,name=local_flags,json=localFlags,proto3" json:"localFlags" xml:"localFlags"`
|
|
|
|
VersionHash []byte `protobuf:"bytes,1001,opt,name=version_hash,json=versionHash,proto3" json:"versionHash" xml:"versionHash"`
|
|
|
|
Deleted bool `protobuf:"varint,6,opt,name=deleted,proto3" json:"deleted" xml:"deleted"`
|
|
|
|
RawInvalid bool `protobuf:"varint,7,opt,name=invalid,proto3" json:"invalid" xml:"invalid"`
|
|
|
|
NoPermissions bool `protobuf:"varint,8,opt,name=no_permissions,json=noPermissions,proto3" json:"noPermissions" xml:"noPermissions"`
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 10:53:36 +00:00
|
|
|
func (m *FileInfoTruncated) Reset() { *m = FileInfoTruncated{} }
|
|
|
|
func (*FileInfoTruncated) ProtoMessage() {}
|
|
|
|
func (*FileInfoTruncated) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{2}
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *FileInfoTruncated) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *FileInfoTruncated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_FileInfoTruncated.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
2019-01-14 10:53:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
func (m *FileInfoTruncated) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_FileInfoTruncated.Merge(m, src)
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *FileInfoTruncated) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *FileInfoTruncated) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_FileInfoTruncated.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_FileInfoTruncated proto.InternalMessageInfo
|
2016-07-04 10:40:29 +00:00
|
|
|
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
// BlockList is the structure used to store block lists
|
|
|
|
type BlockList struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Blocks []protocol.BlockInfo `protobuf:"bytes,1,rep,name=blocks,proto3" json:"blocks" xml:"block"`
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *BlockList) Reset() { *m = BlockList{} }
|
|
|
|
func (m *BlockList) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*BlockList) ProtoMessage() {}
|
|
|
|
func (*BlockList) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{3}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
|
|
|
func (m *BlockList) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *BlockList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_BlockList.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func (m *BlockList) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_BlockList.Merge(m, src)
|
|
|
|
}
|
|
|
|
func (m *BlockList) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *BlockList) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_BlockList.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_BlockList proto.InternalMessageInfo
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
// IndirectionHashesOnly is used to only unmarshal the indirection hashes
|
|
|
|
// from a FileInfo
|
|
|
|
type IndirectionHashesOnly struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
BlocksHash []byte `protobuf:"bytes,18,opt,name=blocks_hash,json=blocksHash,proto3" json:"blocksHash" xml:"blocksHash"`
|
|
|
|
VersionHash []byte `protobuf:"bytes,1001,opt,name=version_hash,json=versionHash,proto3" json:"versionHash" xml:"versionHash"`
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) Reset() { *m = IndirectionHashesOnly{} }
|
|
|
|
func (m *IndirectionHashesOnly) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*IndirectionHashesOnly) ProtoMessage() {}
|
|
|
|
func (*IndirectionHashesOnly) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{4}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) XXX_Unmarshal(b []byte) error {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
if deterministic {
|
2020-05-13 12:28:42 +00:00
|
|
|
return xxx_messageInfo_IndirectionHashesOnly.Marshal(b, m, deterministic)
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_IndirectionHashesOnly.Merge(m, src)
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) XXX_Size() int {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_IndirectionHashesOnly.DiscardUnknown(m)
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
var xxx_messageInfo_IndirectionHashesOnly proto.InternalMessageInfo
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
|
2017-12-14 09:51:17 +00:00
|
|
|
// For each folder and device we keep one of these to track the current
|
|
|
|
// counts and sequence. We also keep one for the global state of the folder.
|
|
|
|
type Counts struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Files int `protobuf:"varint,1,opt,name=files,proto3,casttype=int" json:"files" xml:"files"`
|
|
|
|
Directories int `protobuf:"varint,2,opt,name=directories,proto3,casttype=int" json:"directories" xml:"directories"`
|
|
|
|
Symlinks int `protobuf:"varint,3,opt,name=symlinks,proto3,casttype=int" json:"symlinks" xml:"symlinks"`
|
|
|
|
Deleted int `protobuf:"varint,4,opt,name=deleted,proto3,casttype=int" json:"deleted" xml:"deleted"`
|
|
|
|
Bytes int64 `protobuf:"varint,5,opt,name=bytes,proto3" json:"bytes" xml:"bytes"`
|
|
|
|
Sequence int64 `protobuf:"varint,6,opt,name=sequence,proto3" json:"sequence" xml:"sequence"`
|
|
|
|
DeviceID []byte `protobuf:"bytes,17,opt,name=device_id,json=deviceId,proto3" json:"deviceId" xml:"deviceId"`
|
|
|
|
LocalFlags uint32 `protobuf:"varint,18,opt,name=local_flags,json=localFlags,proto3" json:"localFlags" xml:"localFlags"`
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
|
|
|
|
2020-09-04 09:09:36 +00:00
|
|
|
func (m *Counts) Reset() { *m = Counts{} }
|
|
|
|
func (*Counts) ProtoMessage() {}
|
2019-01-14 10:53:36 +00:00
|
|
|
func (*Counts) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{5}
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *Counts) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *Counts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_Counts.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
2019-01-14 10:53:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
func (m *Counts) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_Counts.Merge(m, src)
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *Counts) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *Counts) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_Counts.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_Counts proto.InternalMessageInfo
|
2017-12-14 09:51:17 +00:00
|
|
|
|
|
|
|
type CountsSet struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Counts []Counts `protobuf:"bytes,1,rep,name=counts,proto3" json:"counts" xml:"count"`
|
|
|
|
Created int64 `protobuf:"varint,2,opt,name=created,proto3" json:"created" xml:"created"`
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 10:53:36 +00:00
|
|
|
func (m *CountsSet) Reset() { *m = CountsSet{} }
|
|
|
|
func (m *CountsSet) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*CountsSet) ProtoMessage() {}
|
|
|
|
func (*CountsSet) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{6}
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *CountsSet) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *CountsSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_CountsSet.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
2019-01-14 10:53:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
func (m *CountsSet) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_CountsSet.Merge(m, src)
|
2019-01-14 10:53:36 +00:00
|
|
|
}
|
|
|
|
func (m *CountsSet) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *CountsSet) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_CountsSet.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_CountsSet proto.InternalMessageInfo
|
2017-12-14 09:51:17 +00:00
|
|
|
|
2020-05-30 07:50:23 +00:00
|
|
|
type FileVersionDeprecated struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Version protocol.Vector `protobuf:"bytes,1,opt,name=version,proto3" json:"version" xml:"version"`
|
|
|
|
Device []byte `protobuf:"bytes,2,opt,name=device,proto3" json:"device" xml:"device"`
|
|
|
|
Invalid bool `protobuf:"varint,3,opt,name=invalid,proto3" json:"invalid" xml:"invalid"`
|
|
|
|
Deleted bool `protobuf:"varint,4,opt,name=deleted,proto3" json:"deleted" xml:"deleted"`
|
2020-05-30 07:50:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *FileVersionDeprecated) Reset() { *m = FileVersionDeprecated{} }
|
|
|
|
func (m *FileVersionDeprecated) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*FileVersionDeprecated) ProtoMessage() {}
|
|
|
|
func (*FileVersionDeprecated) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{7}
|
2020-05-30 07:50:23 +00:00
|
|
|
}
|
|
|
|
func (m *FileVersionDeprecated) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *FileVersionDeprecated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_FileVersionDeprecated.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func (m *FileVersionDeprecated) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_FileVersionDeprecated.Merge(m, src)
|
|
|
|
}
|
|
|
|
func (m *FileVersionDeprecated) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *FileVersionDeprecated) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_FileVersionDeprecated.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_FileVersionDeprecated proto.InternalMessageInfo
|
|
|
|
|
|
|
|
type VersionListDeprecated struct {
|
2020-10-02 06:07:05 +00:00
|
|
|
Versions []FileVersionDeprecated `protobuf:"bytes,1,rep,name=versions,proto3" json:"versions" xml:"version"`
|
2020-05-30 07:50:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionListDeprecated) Reset() { *m = VersionListDeprecated{} }
|
|
|
|
func (*VersionListDeprecated) ProtoMessage() {}
|
|
|
|
func (*VersionListDeprecated) Descriptor() ([]byte, []int) {
|
2020-10-02 06:07:05 +00:00
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{8}
|
2020-05-30 07:50:23 +00:00
|
|
|
}
|
|
|
|
func (m *VersionListDeprecated) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *VersionListDeprecated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_VersionListDeprecated.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func (m *VersionListDeprecated) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_VersionListDeprecated.Merge(m, src)
|
|
|
|
}
|
|
|
|
func (m *VersionListDeprecated) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *VersionListDeprecated) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_VersionListDeprecated.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_VersionListDeprecated proto.InternalMessageInfo
|
|
|
|
|
2020-12-17 18:54:31 +00:00
|
|
|
type ObservedFolder struct {
|
|
|
|
Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time" xml:"time"`
|
|
|
|
Label string `protobuf:"bytes,2,opt,name=label,proto3" json:"label" xml:"label"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedFolder) Reset() { *m = ObservedFolder{} }
|
|
|
|
func (m *ObservedFolder) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*ObservedFolder) ProtoMessage() {}
|
|
|
|
func (*ObservedFolder) Descriptor() ([]byte, []int) {
|
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{9}
|
|
|
|
}
|
|
|
|
func (m *ObservedFolder) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *ObservedFolder) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_ObservedFolder.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func (m *ObservedFolder) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_ObservedFolder.Merge(m, src)
|
|
|
|
}
|
|
|
|
func (m *ObservedFolder) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *ObservedFolder) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_ObservedFolder.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_ObservedFolder proto.InternalMessageInfo
|
|
|
|
|
|
|
|
type ObservedDevice struct {
|
|
|
|
Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time" xml:"time"`
|
|
|
|
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name" xml:"name"`
|
|
|
|
Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address" xml:"address"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedDevice) Reset() { *m = ObservedDevice{} }
|
|
|
|
func (m *ObservedDevice) String() string { return proto.CompactTextString(m) }
|
|
|
|
func (*ObservedDevice) ProtoMessage() {}
|
|
|
|
func (*ObservedDevice) Descriptor() ([]byte, []int) {
|
|
|
|
return fileDescriptor_5465d80e8cba02e3, []int{10}
|
|
|
|
}
|
|
|
|
func (m *ObservedDevice) XXX_Unmarshal(b []byte) error {
|
|
|
|
return m.Unmarshal(b)
|
|
|
|
}
|
|
|
|
func (m *ObservedDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
|
|
if deterministic {
|
|
|
|
return xxx_messageInfo_ObservedDevice.Marshal(b, m, deterministic)
|
|
|
|
} else {
|
|
|
|
b = b[:cap(b)]
|
|
|
|
n, err := m.MarshalToSizedBuffer(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b[:n], nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func (m *ObservedDevice) XXX_Merge(src proto.Message) {
|
|
|
|
xxx_messageInfo_ObservedDevice.Merge(m, src)
|
|
|
|
}
|
|
|
|
func (m *ObservedDevice) XXX_Size() int {
|
|
|
|
return m.ProtoSize()
|
|
|
|
}
|
|
|
|
func (m *ObservedDevice) XXX_DiscardUnknown() {
|
|
|
|
xxx_messageInfo_ObservedDevice.DiscardUnknown(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxx_messageInfo_ObservedDevice proto.InternalMessageInfo
|
|
|
|
|
2016-07-04 10:40:29 +00:00
|
|
|
func init() {
|
|
|
|
proto.RegisterType((*FileVersion)(nil), "db.FileVersion")
|
|
|
|
proto.RegisterType((*VersionList)(nil), "db.VersionList")
|
|
|
|
proto.RegisterType((*FileInfoTruncated)(nil), "db.FileInfoTruncated")
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
proto.RegisterType((*BlockList)(nil), "db.BlockList")
|
2020-05-13 12:28:42 +00:00
|
|
|
proto.RegisterType((*IndirectionHashesOnly)(nil), "db.IndirectionHashesOnly")
|
2017-12-14 09:51:17 +00:00
|
|
|
proto.RegisterType((*Counts)(nil), "db.Counts")
|
|
|
|
proto.RegisterType((*CountsSet)(nil), "db.CountsSet")
|
2020-05-30 07:50:23 +00:00
|
|
|
proto.RegisterType((*FileVersionDeprecated)(nil), "db.FileVersionDeprecated")
|
|
|
|
proto.RegisterType((*VersionListDeprecated)(nil), "db.VersionListDeprecated")
|
2020-12-17 18:54:31 +00:00
|
|
|
proto.RegisterType((*ObservedFolder)(nil), "db.ObservedFolder")
|
|
|
|
proto.RegisterType((*ObservedDevice)(nil), "db.ObservedDevice")
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
|
2020-10-02 06:07:05 +00:00
|
|
|
func init() { proto.RegisterFile("lib/db/structs.proto", fileDescriptor_5465d80e8cba02e3) }
|
|
|
|
|
|
|
|
var fileDescriptor_5465d80e8cba02e3 = []byte{
|
2020-12-17 18:54:31 +00:00
|
|
|
// 1415 bytes of a gzipped FileDescriptorProto
|
|
|
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcf, 0x6f, 0xdb, 0xc6,
|
|
|
|
0x12, 0x36, 0x2d, 0xf9, 0x87, 0x56, 0xb2, 0x13, 0x33, 0x2f, 0x01, 0x9f, 0xdf, 0x7b, 0x5a, 0xbd,
|
|
|
|
0x8d, 0x03, 0xa8, 0x2d, 0x20, 0x03, 0x0e, 0x62, 0x14, 0x01, 0xda, 0x20, 0x8c, 0xe1, 0xc4, 0x41,
|
|
|
|
0x9a, 0x14, 0xeb, 0x20, 0x2d, 0x7a, 0x11, 0xf8, 0x63, 0x2d, 0x13, 0xa1, 0x48, 0x95, 0x4b, 0xdb,
|
|
|
|
0x51, 0x6e, 0xbd, 0x14, 0xe8, 0x2d, 0x08, 0x7a, 0x28, 0x8a, 0xa2, 0xc8, 0xa9, 0x7f, 0x42, 0xff,
|
|
|
|
0x82, 0x1e, 0x72, 0xf4, 0xb1, 0xe8, 0x81, 0x45, 0xec, 0x4b, 0xab, 0xa3, 0x4e, 0x45, 0x4f, 0xc5,
|
|
|
|
0xce, 0x2e, 0x97, 0x54, 0x8c, 0x14, 0x49, 0xea, 0x1b, 0xe7, 0x9b, 0x6f, 0x46, 0xe4, 0xec, 0x37,
|
|
|
|
0xb3, 0x23, 0xf4, 0xaf, 0x30, 0x70, 0x57, 0x7d, 0x77, 0x95, 0xa7, 0xc9, 0x9e, 0x97, 0xf2, 0xce,
|
|
|
|
0x20, 0x89, 0xd3, 0xd8, 0x9c, 0xf6, 0xdd, 0xe5, 0x8b, 0x09, 0x1b, 0xc4, 0x7c, 0x15, 0x00, 0x77,
|
|
|
|
0x6f, 0x67, 0xb5, 0x17, 0xf7, 0x62, 0x30, 0xe0, 0x49, 0x12, 0x97, 0x71, 0x2f, 0x8e, 0x7b, 0x21,
|
|
|
|
0x2b, 0x58, 0x69, 0xd0, 0x67, 0x3c, 0x75, 0xfa, 0x03, 0x45, 0xb8, 0x20, 0xf2, 0xc3, 0xa3, 0x17,
|
|
|
|
0x87, 0xab, 0x2e, 0xcb, 0xf1, 0x1a, 0x7b, 0x94, 0xca, 0x47, 0xf2, 0xfd, 0x34, 0xaa, 0x6f, 0x06,
|
|
|
|
0x21, 0x7b, 0xc0, 0x12, 0x1e, 0xc4, 0x91, 0x79, 0x07, 0xcd, 0xed, 0xcb, 0x47, 0xcb, 0x68, 0x19,
|
|
|
|
0xed, 0xfa, 0xda, 0xd9, 0x4e, 0x9e, 0xa0, 0xf3, 0x80, 0x79, 0x69, 0x9c, 0xd8, 0xad, 0xe7, 0x19,
|
|
|
|
0x9e, 0x1a, 0x65, 0x38, 0x27, 0x8e, 0x33, 0xbc, 0xf0, 0xa8, 0x1f, 0x5e, 0x25, 0xca, 0x26, 0x34,
|
|
|
|
0xf7, 0x98, 0xeb, 0x68, 0xce, 0x67, 0x21, 0x4b, 0x99, 0x6f, 0x4d, 0xb7, 0x8c, 0xf6, 0xbc, 0xfd,
|
|
|
|
0x5f, 0x11, 0xa7, 0x20, 0x1d, 0xa7, 0x6c, 0x42, 0x73, 0x8f, 0x79, 0x45, 0xc4, 0xed, 0x07, 0x1e,
|
|
|
|
0xe3, 0x56, 0xa5, 0x55, 0x69, 0x37, 0xec, 0xff, 0xc8, 0x38, 0x80, 0xc6, 0x19, 0x6e, 0xa8, 0x38,
|
|
|
|
0x61, 0x43, 0x18, 0x38, 0x4c, 0x8a, 0xce, 0x04, 0xd1, 0xbe, 0x13, 0x06, 0x7e, 0x37, 0x0f, 0xaf,
|
|
|
|
0x42, 0xf8, 0x3b, 0xa3, 0x0c, 0x2f, 0x2a, 0xd7, 0x86, 0xce, 0x72, 0x0e, 0xb2, 0x4c, 0xc0, 0x84,
|
|
|
|
0xbe, 0x44, 0x23, 0x5f, 0x18, 0xa8, 0xae, 0x8a, 0x73, 0x27, 0xe0, 0xa9, 0x19, 0xa2, 0x79, 0xf5,
|
|
|
|
0x75, 0xdc, 0x32, 0x5a, 0x95, 0x76, 0x7d, 0xed, 0x4c, 0xc7, 0x77, 0x3b, 0xa5, 0x1a, 0xda, 0xd7,
|
|
|
|
0x44, 0x81, 0x8e, 0x32, 0x5c, 0xa7, 0xce, 0x81, 0xc2, 0xf8, 0x28, 0xc3, 0x3a, 0xee, 0x44, 0xc1,
|
|
|
|
0x9e, 0x1e, 0xae, 0x94, 0xb9, 0x54, 0x33, 0xaf, 0x56, 0xbf, 0x79, 0x86, 0xa7, 0xc8, 0x1f, 0x08,
|
|
|
|
0x2d, 0x89, 0x1f, 0xd8, 0x8a, 0x76, 0xe2, 0xfb, 0xc9, 0x5e, 0xe4, 0x39, 0xa2, 0x48, 0xef, 0xa2,
|
|
|
|
0x6a, 0xe4, 0xf4, 0x19, 0x9c, 0x53, 0xcd, 0xbe, 0x30, 0xca, 0x30, 0xd8, 0xe3, 0x0c, 0x23, 0xc8,
|
|
|
|
0x2e, 0x0c, 0x42, 0x01, 0x13, 0x5c, 0x1e, 0x3c, 0x66, 0x56, 0xa5, 0x65, 0xb4, 0x2b, 0x92, 0x2b,
|
|
|
|
0x6c, 0xcd, 0x15, 0x06, 0xa1, 0x80, 0x99, 0xd7, 0x10, 0xea, 0xc7, 0x7e, 0xb0, 0x13, 0x30, 0xbf,
|
|
|
|
0xcb, 0xad, 0x19, 0x88, 0x68, 0x8d, 0x32, 0x5c, 0xcb, 0xd1, 0xed, 0x71, 0x86, 0xcf, 0x40, 0x98,
|
|
|
|
0x46, 0x08, 0x2d, 0xbc, 0xe6, 0x8f, 0x06, 0xaa, 0xeb, 0x0c, 0xee, 0xd0, 0x6a, 0xb4, 0x8c, 0x76,
|
|
|
|
0xd5, 0xfe, 0xda, 0x10, 0x65, 0xf9, 0x25, 0xc3, 0x97, 0x7b, 0x41, 0xba, 0xbb, 0xe7, 0x76, 0xbc,
|
|
|
|
0xb8, 0xbf, 0xca, 0x87, 0x91, 0x97, 0xee, 0x06, 0x51, 0xaf, 0xf4, 0x54, 0x16, 0x6d, 0x67, 0x7b,
|
|
|
|
0x37, 0x4e, 0xd2, 0xad, 0x8d, 0x51, 0x86, 0xf5, 0x4b, 0xd9, 0xc3, 0x71, 0x86, 0xcf, 0x4e, 0xfc,
|
|
|
|
0xbe, 0x3d, 0x24, 0xdf, 0x1e, 0xae, 0xbc, 0x4d, 0x62, 0x5a, 0x4a, 0x5b, 0x16, 0x7f, 0xed, 0x9f,
|
|
|
|
0x8b, 0xff, 0x2a, 0x9a, 0xe7, 0xec, 0xf3, 0x3d, 0x16, 0x79, 0xcc, 0x42, 0x50, 0xc5, 0xa6, 0x50,
|
|
|
|
0x41, 0x8e, 0x8d, 0x33, 0xbc, 0x28, 0x6b, 0xaf, 0x00, 0x42, 0xb5, 0xcf, 0xbc, 0x87, 0x16, 0xf9,
|
|
|
|
0xb0, 0x1f, 0x06, 0xd1, 0xc3, 0x6e, 0xea, 0x24, 0x3d, 0x96, 0x5a, 0x4b, 0x70, 0xca, 0xed, 0x51,
|
|
|
|
0x86, 0x17, 0x94, 0xe7, 0x3e, 0x38, 0xb4, 0x8e, 0x27, 0x50, 0x42, 0x27, 0x59, 0xe6, 0x0d, 0x54,
|
|
|
|
0x77, 0xc3, 0xd8, 0x7b, 0xc8, 0xbb, 0xbb, 0x0e, 0xdf, 0xb5, 0xcc, 0x96, 0xd1, 0x6e, 0xd8, 0x44,
|
|
|
|
0x94, 0x55, 0xc2, 0xb7, 0x1c, 0xbe, 0xab, 0xcb, 0x5a, 0x40, 0x84, 0x96, 0xfc, 0xe6, 0x87, 0xa8,
|
|
|
|
0xc6, 0x22, 0x2f, 0x19, 0x0e, 0x44, 0x43, 0x9f, 0x83, 0x14, 0x20, 0x0c, 0x0d, 0x6a, 0x61, 0x68,
|
|
|
|
0x84, 0xd0, 0xc2, 0x6b, 0xda, 0xa8, 0x9a, 0x0e, 0x07, 0x0c, 0x66, 0xc1, 0xe2, 0xda, 0x85, 0xa2,
|
|
|
|
0xb8, 0x5a, 0xdc, 0xc3, 0x01, 0x93, 0xea, 0x14, 0x3c, 0xad, 0x4e, 0x61, 0x10, 0x0a, 0x98, 0xb9,
|
|
|
|
0x89, 0xea, 0x03, 0x96, 0xf4, 0x03, 0x2e, 0x5b, 0xb0, 0xda, 0x32, 0xda, 0x0b, 0xf6, 0xca, 0x28,
|
|
|
|
0xc3, 0x65, 0x78, 0x9c, 0xe1, 0x25, 0x88, 0x2c, 0x61, 0x84, 0x96, 0x19, 0xe6, 0xed, 0x92, 0x46,
|
|
|
|
0x23, 0x6e, 0xd5, 0x5b, 0x46, 0x7b, 0x06, 0xe6, 0x84, 0x16, 0xc4, 0x5d, 0x7e, 0x42, 0x67, 0x77,
|
|
|
|
0x39, 0xf9, 0x33, 0xc3, 0x95, 0x20, 0x4a, 0x69, 0x89, 0x66, 0xee, 0x20, 0x59, 0xa5, 0x2e, 0xf4,
|
|
|
|
0xd8, 0x02, 0xa4, 0xba, 0x79, 0x94, 0xe1, 0x06, 0x75, 0x0e, 0x6c, 0xe1, 0xd8, 0x0e, 0x1e, 0x33,
|
|
|
|
0x51, 0x28, 0x37, 0x37, 0x74, 0xa1, 0x34, 0x92, 0x27, 0x7e, 0x7a, 0xb8, 0x32, 0x11, 0x46, 0x8b,
|
|
|
|
0x20, 0x73, 0x03, 0xd5, 0xc3, 0xd8, 0x73, 0xc2, 0xee, 0x4e, 0xe8, 0xf4, 0xb8, 0xf5, 0xdb, 0x1c,
|
|
|
|
0x7c, 0x3c, 0x9c, 0x22, 0xe0, 0x9b, 0x02, 0xd6, 0x2f, 0x5d, 0x40, 0x84, 0x96, 0xfc, 0xe6, 0x2d,
|
|
|
|
0xd4, 0x50, 0x12, 0x95, 0x5a, 0xf8, 0x7d, 0x0e, 0x4e, 0x12, 0x6a, 0xa8, 0x1c, 0x4a, 0x0d, 0x4b,
|
|
|
|
0x65, 0x65, 0x4b, 0x39, 0x94, 0x19, 0xe5, 0xf1, 0x3e, 0xfb, 0x26, 0xe3, 0x9d, 0xa2, 0x39, 0x35,
|
|
|
|
0x65, 0xad, 0x39, 0x88, 0x7b, 0xff, 0x28, 0xc3, 0x88, 0x3a, 0x07, 0x5b, 0x12, 0x15, 0x59, 0x14,
|
|
|
|
0x41, 0x67, 0x51, 0xb6, 0x98, 0x95, 0x25, 0x26, 0xcd, 0x79, 0xa2, 0x63, 0xa2, 0xb8, 0x5b, 0x96,
|
|
|
|
0xc6, 0x3c, 0xa4, 0x86, 0x8e, 0x89, 0xe2, 0x8f, 0x27, 0xc4, 0x21, 0x3b, 0x66, 0x02, 0x25, 0x74,
|
|
|
|
0x92, 0xa5, 0x46, 0xef, 0x27, 0xa8, 0x06, 0x47, 0x01, 0xb3, 0xff, 0x36, 0x9a, 0x95, 0xdd, 0xa0,
|
|
|
|
0x26, 0xff, 0xb9, 0x42, 0xc1, 0x40, 0x12, 0x12, 0xb6, 0xff, 0xa7, 0x26, 0x84, 0xa2, 0x8e, 0x33,
|
|
|
|
0x5c, 0x2f, 0x4e, 0x9a, 0x50, 0x05, 0x93, 0x1f, 0x0c, 0x74, 0x7e, 0x2b, 0xf2, 0x83, 0x84, 0x79,
|
|
|
|
0xa9, 0xaa, 0x27, 0xe3, 0xf7, 0xa2, 0x70, 0x78, 0x3a, 0xad, 0x7a, 0x6a, 0x87, 0x4c, 0xbe, 0xab,
|
|
|
|
0xa2, 0xd9, 0x1b, 0xf1, 0x5e, 0x94, 0x72, 0xf3, 0x0a, 0x9a, 0xd9, 0x09, 0x42, 0xc6, 0xe1, 0xca,
|
|
|
|
0x99, 0xb1, 0xf1, 0x28, 0xc3, 0x12, 0xd0, 0x1f, 0x09, 0x96, 0xee, 0x11, 0xe9, 0x34, 0x3f, 0x42,
|
|
|
|
0x75, 0xf9, 0x9d, 0x71, 0x12, 0x30, 0x0e, 0xdd, 0x3f, 0x63, 0xbf, 0x27, 0xde, 0xa4, 0x04, 0xeb,
|
|
|
|
0x37, 0x29, 0x61, 0x3a, 0x51, 0x99, 0x68, 0x5e, 0x47, 0xf3, 0x6a, 0xb6, 0x71, 0xb8, 0xcf, 0x66,
|
|
|
|
0xec, 0x4b, 0x30, 0x57, 0x15, 0x56, 0xcc, 0x55, 0x05, 0xe8, 0x2c, 0x9a, 0x62, 0x7e, 0x50, 0x08,
|
|
|
|
0xb7, 0x0a, 0x19, 0x2e, 0xfe, 0x9d, 0x70, 0xf3, 0x78, 0xad, 0xdf, 0x0e, 0x9a, 0x71, 0x87, 0x29,
|
|
|
|
0xcb, 0x2f, 0x47, 0x4b, 0xd4, 0x01, 0x80, 0xe2, 0xb0, 0x85, 0x45, 0xa8, 0x44, 0x27, 0x6e, 0x82,
|
|
|
|
0xd9, 0x37, 0xbc, 0x09, 0xb6, 0x51, 0x4d, 0xee, 0x32, 0xdd, 0xc0, 0x87, 0x4b, 0xa0, 0x61, 0xaf,
|
|
|
|
0x1f, 0x65, 0x78, 0x5e, 0xee, 0x27, 0x70, 0x33, 0xce, 0x4b, 0xc2, 0x96, 0xaf, 0x13, 0xe5, 0x80,
|
|
|
|
0xe8, 0x16, 0xcd, 0xa4, 0x9a, 0x27, 0x24, 0x56, 0x1e, 0x24, 0xe6, 0xdb, 0xcc, 0x11, 0xd5, 0x20,
|
|
|
|
0x5f, 0x1a, 0xa8, 0x26, 0xe5, 0xb1, 0xcd, 0x52, 0xf3, 0x3a, 0x9a, 0xf5, 0xc0, 0x50, 0x1d, 0x82,
|
|
|
|
0xc4, 0x6e, 0x24, 0xdd, 0x45, 0x63, 0x48, 0x86, 0xae, 0x15, 0x98, 0x84, 0x2a, 0x58, 0x0c, 0x15,
|
|
|
|
0x2f, 0x61, 0x4e, 0xbe, 0x33, 0x56, 0xe4, 0x50, 0x51, 0x90, 0x3e, 0x1b, 0x65, 0x13, 0x9a, 0x7b,
|
|
|
|
0xc8, 0x57, 0xd3, 0xe8, 0x7c, 0x69, 0x0b, 0xdb, 0x60, 0x83, 0x84, 0xc9, 0x45, 0xe9, 0x74, 0x77,
|
|
|
|
0xda, 0x35, 0x34, 0x2b, 0xeb, 0x08, 0xaf, 0xd7, 0xb0, 0x97, 0xc5, 0x27, 0x49, 0xe4, 0xc4, 0x66,
|
|
|
|
0xaa, 0x70, 0xf1, 0x4d, 0xf9, 0xc0, 0xab, 0x14, 0x83, 0xf2, 0x55, 0x23, 0xae, 0x18, 0x6a, 0xeb,
|
|
|
|
0x93, 0x3a, 0x7d, 0xdd, 0x01, 0x4b, 0x0e, 0xd0, 0xf9, 0xd2, 0xce, 0x5a, 0x2a, 0xc5, 0xa7, 0x27,
|
|
|
|
0xb6, 0xd7, 0x7f, 0xbf, 0xb4, 0xbd, 0x16, 0x64, 0xfb, 0xff, 0xaa, 0x28, 0xaf, 0x5e, 0x5c, 0x4f,
|
|
|
|
0x6c, 0xaa, 0x4f, 0x0c, 0xb4, 0x78, 0xcf, 0xe5, 0x2c, 0xd9, 0x67, 0xfe, 0x66, 0x1c, 0xfa, 0x2c,
|
|
|
|
0x31, 0xef, 0xa2, 0xaa, 0xf8, 0x5f, 0xa2, 0x4a, 0xbf, 0xdc, 0x91, 0x7f, 0x5a, 0x3a, 0xf9, 0x9f,
|
|
|
|
0x96, 0xce, 0xfd, 0xfc, 0x4f, 0x8b, 0xdd, 0x54, 0xbf, 0x07, 0xfc, 0xe2, 0xf2, 0x0f, 0xfa, 0x8c,
|
|
|
|
0x3c, 0xf9, 0x15, 0x1b, 0x14, 0x70, 0xd1, 0x7c, 0xa1, 0xe3, 0xb2, 0x10, 0xca, 0x5f, 0x93, 0xcd,
|
|
|
|
0x07, 0x80, 0x16, 0x14, 0x58, 0x84, 0x4a, 0x94, 0xfc, 0x54, 0x7a, 0x25, 0xd9, 0x0a, 0xa7, 0xfe,
|
|
|
|
0x4a, 0xf9, 0x26, 0x3e, 0xfd, 0x1a, 0x9b, 0xf8, 0x3a, 0x9a, 0x73, 0x7c, 0x3f, 0x61, 0x5c, 0x0e,
|
|
|
|
0xaf, 0x9a, 0x3c, 0x52, 0x05, 0xe9, 0x02, 0x2b, 0x9b, 0xd0, 0xdc, 0x63, 0xdf, 0x7c, 0xfe, 0xa2,
|
|
|
|
0x39, 0x75, 0xf8, 0xa2, 0x39, 0xf5, 0xfc, 0xa8, 0x69, 0x1c, 0x1e, 0x35, 0x8d, 0x27, 0xc7, 0xcd,
|
|
|
|
0xa9, 0x67, 0xc7, 0x4d, 0xe3, 0xf0, 0xb8, 0x39, 0xf5, 0xf3, 0x71, 0x73, 0xea, 0xb3, 0x4b, 0xaf,
|
|
|
|
0xb1, 0xfe, 0xfa, 0xae, 0x3b, 0x0b, 0x9f, 0x79, 0xf9, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9e,
|
|
|
|
0xa0, 0xbe, 0xf2, 0x7d, 0x0e, 0x00, 0x00,
|
2019-09-04 06:33:29 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *FileVersion) Marshal() (dAtA []byte, err error) {
|
2016-07-04 10:40:29 +00:00
|
|
|
size := m.ProtoSize()
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA = make([]byte, size)
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
2016-07-04 10:40:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
return dAtA[:n], nil
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *FileVersion) MarshalTo(dAtA []byte) (int, error) {
|
2019-09-04 06:33:29 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *FileVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
2020-05-30 07:50:23 +00:00
|
|
|
if len(m.InvalidDevices) > 0 {
|
|
|
|
for iNdEx := len(m.InvalidDevices) - 1; iNdEx >= 0; iNdEx-- {
|
|
|
|
i -= len(m.InvalidDevices[iNdEx])
|
|
|
|
copy(dAtA[i:], m.InvalidDevices[iNdEx])
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.InvalidDevices[iNdEx])))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x22
|
2020-05-11 13:07:06 +00:00
|
|
|
}
|
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
if len(m.Devices) > 0 {
|
|
|
|
for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
|
|
|
|
i -= len(m.Devices[iNdEx])
|
|
|
|
copy(dAtA[i:], m.Devices[iNdEx])
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Devices[iNdEx])))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1a
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if m.Deleted {
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
2020-05-30 07:50:23 +00:00
|
|
|
if m.Deleted {
|
2017-11-11 19:18:17 +00:00
|
|
|
dAtA[i] = 1
|
|
|
|
} else {
|
|
|
|
dAtA[i] = 0
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
2020-05-30 07:50:23 +00:00
|
|
|
dAtA[i] = 0x10
|
2017-11-11 19:18:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
{
|
|
|
|
size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
|
|
|
return len(dAtA) - i, nil
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *VersionList) Marshal() (dAtA []byte, err error) {
|
2016-07-04 10:40:29 +00:00
|
|
|
size := m.ProtoSize()
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA = make([]byte, size)
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
2016-07-04 10:40:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
return dAtA[:n], nil
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *VersionList) MarshalTo(dAtA []byte) (int, error) {
|
2019-09-04 06:33:29 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
2020-05-30 07:50:23 +00:00
|
|
|
if len(m.RawVersions) > 0 {
|
|
|
|
for iNdEx := len(m.RawVersions) - 1; iNdEx >= 0; iNdEx-- {
|
2019-09-04 06:33:29 +00:00
|
|
|
{
|
2020-05-30 07:50:23 +00:00
|
|
|
size, err := m.RawVersions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
2019-09-04 06:33:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
return len(dAtA) - i, nil
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *FileInfoTruncated) Marshal() (dAtA []byte, err error) {
|
2016-07-04 10:40:29 +00:00
|
|
|
size := m.ProtoSize()
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA = make([]byte, size)
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
2016-07-04 10:40:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
return dAtA[:n], nil
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *FileInfoTruncated) MarshalTo(dAtA []byte) (int, error) {
|
2019-09-04 06:33:29 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *FileInfoTruncated) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
2020-05-13 12:28:42 +00:00
|
|
|
if len(m.VersionHash) > 0 {
|
|
|
|
i -= len(m.VersionHash)
|
|
|
|
copy(dAtA[i:], m.VersionHash)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.VersionHash)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x3e
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xca
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.LocalFlags != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.LocalFlags))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x3e
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xc0
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2020-11-09 14:33:32 +00:00
|
|
|
if len(m.Encrypted) > 0 {
|
|
|
|
i -= len(m.Encrypted)
|
|
|
|
copy(dAtA[i:], m.Encrypted)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Encrypted)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x9a
|
|
|
|
}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
if len(m.BlocksHash) > 0 {
|
|
|
|
i -= len(m.BlocksHash)
|
|
|
|
copy(dAtA[i:], m.BlocksHash)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.BlocksHash)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x92
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if len(m.SymlinkTarget) > 0 {
|
|
|
|
i -= len(m.SymlinkTarget)
|
|
|
|
copy(dAtA[i:], m.SymlinkTarget)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.SymlinkTarget)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x8a
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.RawBlockSize != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.RawBlockSize))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x68
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.ModifiedBy != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.ModifiedBy))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x60
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.ModifiedNs != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.ModifiedNs))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x58
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Sequence != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Sequence))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x50
|
|
|
|
}
|
|
|
|
{
|
|
|
|
size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x4a
|
|
|
|
if m.NoPermissions {
|
|
|
|
i--
|
|
|
|
if m.NoPermissions {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[i] = 1
|
2016-07-04 10:40:29 +00:00
|
|
|
} else {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[i] = 0
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
|
|
|
dAtA[i] = 0x40
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2018-06-24 07:50:18 +00:00
|
|
|
if m.RawInvalid {
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
2018-06-24 07:50:18 +00:00
|
|
|
if m.RawInvalid {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[i] = 1
|
2016-07-04 10:40:29 +00:00
|
|
|
} else {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[i] = 0
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
|
|
|
dAtA[i] = 0x38
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Deleted {
|
|
|
|
i--
|
|
|
|
if m.Deleted {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[i] = 1
|
2016-07-04 10:40:29 +00:00
|
|
|
} else {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[i] = 0
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
|
|
|
dAtA[i] = 0x30
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.ModifiedS != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.ModifiedS))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x28
|
2016-08-06 13:05:59 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Permissions != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Permissions))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x20
|
2016-12-21 16:35:20 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Size != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Size))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x18
|
2018-04-16 18:08:50 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Type != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Type))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x10
|
2016-12-09 18:02:18 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if len(m.Name) > 0 {
|
|
|
|
i -= len(m.Name)
|
|
|
|
copy(dAtA[i:], m.Name)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Name)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
2018-06-24 07:50:18 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
return len(dAtA) - i, nil
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
func (m *BlockList) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *BlockList) MarshalTo(dAtA []byte) (int, error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *BlockList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if len(m.Blocks) > 0 {
|
|
|
|
for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- {
|
|
|
|
{
|
|
|
|
size, err := m.Blocks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return len(dAtA) - i, nil
|
|
|
|
}
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) Marshal() (dAtA []byte, err error) {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) MarshalTo(dAtA []byte) (int, error) {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
i := len(dAtA)
|
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
2020-05-13 12:28:42 +00:00
|
|
|
if len(m.VersionHash) > 0 {
|
|
|
|
i -= len(m.VersionHash)
|
|
|
|
copy(dAtA[i:], m.VersionHash)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.VersionHash)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x3e
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xca
|
|
|
|
}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
if len(m.BlocksHash) > 0 {
|
|
|
|
i -= len(m.BlocksHash)
|
|
|
|
copy(dAtA[i:], m.BlocksHash)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.BlocksHash)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x92
|
|
|
|
}
|
|
|
|
return len(dAtA) - i, nil
|
|
|
|
}
|
|
|
|
|
2017-12-14 09:51:17 +00:00
|
|
|
func (m *Counts) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
2017-12-14 09:51:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Counts) MarshalTo(dAtA []byte) (int, error) {
|
2019-09-04 06:33:29 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Counts) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
2017-12-14 09:51:17 +00:00
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.LocalFlags != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.LocalFlags))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x90
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if len(m.DeviceID) > 0 {
|
|
|
|
i -= len(m.DeviceID)
|
|
|
|
copy(dAtA[i:], m.DeviceID)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.DeviceID)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x8a
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Sequence != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Sequence))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x30
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
|
|
|
if m.Bytes != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Bytes))
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
|
|
|
dAtA[i] = 0x28
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Deleted != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Deleted))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x20
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Symlinks != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Symlinks))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x18
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Directories != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Directories))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x10
|
|
|
|
}
|
|
|
|
if m.Files != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Files))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x8
|
2018-07-12 08:15:57 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
return len(dAtA) - i, nil
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *CountsSet) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
2019-09-04 06:33:29 +00:00
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
2017-12-14 09:51:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *CountsSet) MarshalTo(dAtA []byte) (int, error) {
|
2019-09-04 06:33:29 +00:00
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *CountsSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
2017-12-14 09:51:17 +00:00
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
2019-09-04 06:33:29 +00:00
|
|
|
if m.Created != 0 {
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(m.Created))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x10
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
if len(m.Counts) > 0 {
|
2019-09-04 06:33:29 +00:00
|
|
|
for iNdEx := len(m.Counts) - 1; iNdEx >= 0; iNdEx-- {
|
|
|
|
{
|
|
|
|
size, err := m.Counts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
return len(dAtA) - i, nil
|
2017-12-14 09:51:17 +00:00
|
|
|
}
|
|
|
|
|
2020-05-30 07:50:23 +00:00
|
|
|
func (m *FileVersionDeprecated) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *FileVersionDeprecated) MarshalTo(dAtA []byte) (int, error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *FileVersionDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if m.Deleted {
|
|
|
|
i--
|
|
|
|
if m.Deleted {
|
|
|
|
dAtA[i] = 1
|
|
|
|
} else {
|
|
|
|
dAtA[i] = 0
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x20
|
|
|
|
}
|
|
|
|
if m.Invalid {
|
|
|
|
i--
|
|
|
|
if m.Invalid {
|
|
|
|
dAtA[i] = 1
|
|
|
|
} else {
|
|
|
|
dAtA[i] = 0
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x18
|
|
|
|
}
|
|
|
|
if len(m.Device) > 0 {
|
|
|
|
i -= len(m.Device)
|
|
|
|
copy(dAtA[i:], m.Device)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Device)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x12
|
|
|
|
}
|
|
|
|
{
|
|
|
|
size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
|
|
|
return len(dAtA) - i, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionListDeprecated) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionListDeprecated) MarshalTo(dAtA []byte) (int, error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionListDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if len(m.Versions) > 0 {
|
|
|
|
for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- {
|
|
|
|
{
|
|
|
|
size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
i -= size
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(size))
|
|
|
|
}
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return len(dAtA) - i, nil
|
|
|
|
}
|
|
|
|
|
2020-12-17 18:54:31 +00:00
|
|
|
func (m *ObservedFolder) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedFolder) MarshalTo(dAtA []byte) (int, error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedFolder) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if len(m.Label) > 0 {
|
|
|
|
i -= len(m.Label)
|
|
|
|
copy(dAtA[i:], m.Label)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Label)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x12
|
|
|
|
}
|
|
|
|
n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):])
|
|
|
|
if err4 != nil {
|
|
|
|
return 0, err4
|
|
|
|
}
|
|
|
|
i -= n4
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(n4))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
|
|
|
return len(dAtA) - i, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedDevice) Marshal() (dAtA []byte, err error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
dAtA = make([]byte, size)
|
|
|
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dAtA[:n], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedDevice) MarshalTo(dAtA []byte) (int, error) {
|
|
|
|
size := m.ProtoSize()
|
|
|
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|
|
|
i := len(dAtA)
|
|
|
|
_ = i
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if len(m.Address) > 0 {
|
|
|
|
i -= len(m.Address)
|
|
|
|
copy(dAtA[i:], m.Address)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Address)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x1a
|
|
|
|
}
|
|
|
|
if len(m.Name) > 0 {
|
|
|
|
i -= len(m.Name)
|
|
|
|
copy(dAtA[i:], m.Name)
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(len(m.Name)))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0x12
|
|
|
|
}
|
|
|
|
n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):])
|
|
|
|
if err5 != nil {
|
|
|
|
return 0, err5
|
|
|
|
}
|
|
|
|
i -= n5
|
|
|
|
i = encodeVarintStructs(dAtA, i, uint64(n5))
|
|
|
|
i--
|
|
|
|
dAtA[i] = 0xa
|
|
|
|
return len(dAtA) - i, nil
|
|
|
|
}
|
|
|
|
|
2017-01-03 00:16:21 +00:00
|
|
|
func encodeVarintStructs(dAtA []byte, offset int, v uint64) int {
|
2019-09-04 06:33:29 +00:00
|
|
|
offset -= sovStructs(v)
|
|
|
|
base := offset
|
2016-07-04 10:40:29 +00:00
|
|
|
for v >= 1<<7 {
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
2016-07-04 10:40:29 +00:00
|
|
|
v >>= 7
|
|
|
|
offset++
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
dAtA[offset] = uint8(v)
|
2019-09-04 06:33:29 +00:00
|
|
|
return base
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
func (m *FileVersion) ProtoSize() (n int) {
|
2019-01-14 10:53:36 +00:00
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
l = m.Version.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
2020-05-11 13:07:06 +00:00
|
|
|
if m.Deleted {
|
|
|
|
n += 2
|
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
if len(m.Devices) > 0 {
|
|
|
|
for _, b := range m.Devices {
|
|
|
|
l = len(b)
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(m.InvalidDevices) > 0 {
|
|
|
|
for _, b := range m.InvalidDevices {
|
|
|
|
l = len(b)
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionList) ProtoSize() (n int) {
|
2019-01-14 10:53:36 +00:00
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
var l int
|
|
|
|
_ = l
|
2020-05-30 07:50:23 +00:00
|
|
|
if len(m.RawVersions) > 0 {
|
|
|
|
for _, e := range m.RawVersions {
|
2016-07-04 10:40:29 +00:00
|
|
|
l = e.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *FileInfoTruncated) ProtoSize() (n int) {
|
2019-01-14 10:53:36 +00:00
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
l = len(m.Name)
|
|
|
|
if l > 0 {
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
if m.Type != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Type))
|
|
|
|
}
|
|
|
|
if m.Size != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Size))
|
|
|
|
}
|
|
|
|
if m.Permissions != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Permissions))
|
|
|
|
}
|
2016-08-06 13:05:59 +00:00
|
|
|
if m.ModifiedS != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.ModifiedS))
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
if m.Deleted {
|
|
|
|
n += 2
|
|
|
|
}
|
2018-06-24 07:50:18 +00:00
|
|
|
if m.RawInvalid {
|
2016-07-04 10:40:29 +00:00
|
|
|
n += 2
|
|
|
|
}
|
|
|
|
if m.NoPermissions {
|
|
|
|
n += 2
|
|
|
|
}
|
|
|
|
l = m.Version.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
2016-07-29 19:54:24 +00:00
|
|
|
if m.Sequence != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Sequence))
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2016-08-06 13:05:59 +00:00
|
|
|
if m.ModifiedNs != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.ModifiedNs))
|
|
|
|
}
|
2016-12-21 16:35:20 +00:00
|
|
|
if m.ModifiedBy != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.ModifiedBy))
|
|
|
|
}
|
2018-04-16 18:08:50 +00:00
|
|
|
if m.RawBlockSize != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.RawBlockSize))
|
|
|
|
}
|
2016-12-09 18:02:18 +00:00
|
|
|
l = len(m.SymlinkTarget)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
l = len(m.BlocksHash)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
2020-11-09 14:33:32 +00:00
|
|
|
l = len(m.Encrypted)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
2018-06-24 07:50:18 +00:00
|
|
|
if m.LocalFlags != 0 {
|
|
|
|
n += 2 + sovStructs(uint64(m.LocalFlags))
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
l = len(m.VersionHash)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
func (m *BlockList) ProtoSize() (n int) {
|
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if len(m.Blocks) > 0 {
|
|
|
|
for _, e := range m.Blocks {
|
|
|
|
l = e.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) ProtoSize() (n int) {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
l = len(m.BlocksHash)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
l = len(m.VersionHash)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2017-12-14 09:51:17 +00:00
|
|
|
func (m *Counts) ProtoSize() (n int) {
|
2019-01-14 10:53:36 +00:00
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if m.Files != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Files))
|
|
|
|
}
|
|
|
|
if m.Directories != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Directories))
|
|
|
|
}
|
|
|
|
if m.Symlinks != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Symlinks))
|
|
|
|
}
|
|
|
|
if m.Deleted != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Deleted))
|
|
|
|
}
|
|
|
|
if m.Bytes != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Bytes))
|
|
|
|
}
|
|
|
|
if m.Sequence != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Sequence))
|
|
|
|
}
|
|
|
|
l = len(m.DeviceID)
|
|
|
|
if l > 0 {
|
|
|
|
n += 2 + l + sovStructs(uint64(l))
|
|
|
|
}
|
2018-07-12 08:15:57 +00:00
|
|
|
if m.LocalFlags != 0 {
|
|
|
|
n += 2 + sovStructs(uint64(m.LocalFlags))
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *CountsSet) ProtoSize() (n int) {
|
2019-01-14 10:53:36 +00:00
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if len(m.Counts) > 0 {
|
|
|
|
for _, e := range m.Counts {
|
|
|
|
l = e.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if m.Created != 0 {
|
|
|
|
n += 1 + sovStructs(uint64(m.Created))
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2020-05-30 07:50:23 +00:00
|
|
|
func (m *FileVersionDeprecated) ProtoSize() (n int) {
|
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
l = m.Version.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
l = len(m.Device)
|
|
|
|
if l > 0 {
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
if m.Invalid {
|
|
|
|
n += 2
|
|
|
|
}
|
|
|
|
if m.Deleted {
|
|
|
|
n += 2
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *VersionListDeprecated) ProtoSize() (n int) {
|
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
if len(m.Versions) > 0 {
|
|
|
|
for _, e := range m.Versions {
|
|
|
|
l = e.ProtoSize()
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2020-12-17 18:54:31 +00:00
|
|
|
func (m *ObservedFolder) ProtoSize() (n int) {
|
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time)
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
l = len(m.Label)
|
|
|
|
if l > 0 {
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ObservedDevice) ProtoSize() (n int) {
|
|
|
|
if m == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var l int
|
|
|
|
_ = l
|
|
|
|
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time)
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
l = len(m.Name)
|
|
|
|
if l > 0 {
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
l = len(m.Address)
|
|
|
|
if l > 0 {
|
|
|
|
n += 1 + l + sovStructs(uint64(l))
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2016-07-04 10:40:29 +00:00
|
|
|
func sovStructs(x uint64) (n int) {
|
2019-09-04 06:33:29 +00:00
|
|
|
return (math_bits.Len64(x|1) + 6) / 7
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
func sozStructs(x uint64) (n int) {
|
|
|
|
return sovStructs(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *FileVersion) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
wire |= uint64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: FileVersion: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: FileVersion: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
msglen |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
2016-07-04 10:40:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 2:
|
2020-05-30 07:50:23 +00:00
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
var v int
|
2016-07-04 10:40:29 +00:00
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2020-05-30 07:50:23 +00:00
|
|
|
v |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
m.Deleted = bool(v != 0)
|
2017-11-11 19:18:17 +00:00
|
|
|
case 3:
|
2020-05-30 07:50:23 +00:00
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
|
2017-11-11 19:18:17 +00:00
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
var byteLen int
|
2017-11-11 19:18:17 +00:00
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-05-30 07:50:23 +00:00
|
|
|
byteLen |= int(b&0x7F) << shift
|
2017-11-11 19:18:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Devices = append(m.Devices, make([]byte, postIndex-iNdEx))
|
|
|
|
copy(m.Devices[len(m.Devices)-1], dAtA[iNdEx:postIndex])
|
|
|
|
iNdEx = postIndex
|
2020-05-11 13:07:06 +00:00
|
|
|
case 4:
|
2020-05-30 07:50:23 +00:00
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field InvalidDevices", wireType)
|
2020-05-11 13:07:06 +00:00
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
var byteLen int
|
2020-05-11 13:07:06 +00:00
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-05-30 07:50:23 +00:00
|
|
|
byteLen |= int(b&0x7F) << shift
|
2020-05-11 13:07:06 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.InvalidDevices = append(m.InvalidDevices, make([]byte, postIndex-iNdEx))
|
|
|
|
copy(m.InvalidDevices[len(m.InvalidDevices)-1], dAtA[iNdEx:postIndex])
|
|
|
|
iNdEx = postIndex
|
2016-07-04 10:40:29 +00:00
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
2017-01-03 00:16:21 +00:00
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
2016-07-04 10:40:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if skippy < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if (iNdEx + skippy) < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *VersionList) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
wire |= uint64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: VersionList: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: VersionList: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
2020-05-30 07:50:23 +00:00
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field RawVersions", wireType)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
msglen |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2020-05-30 07:50:23 +00:00
|
|
|
m.RawVersions = append(m.RawVersions, FileVersion{})
|
|
|
|
if err := m.RawVersions[len(m.RawVersions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
2016-07-04 10:40:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
2017-01-03 00:16:21 +00:00
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
2016-07-04 10:40:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if skippy < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if (iNdEx + skippy) < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
func (m *FileInfoTruncated) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
wire |= uint64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: FileInfoTruncated: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: FileInfoTruncated: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
|
|
|
}
|
|
|
|
var stringLen uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
stringLen |= uint64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
intStringLen := int(stringLen)
|
|
|
|
if intStringLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + intStringLen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
m.Name = string(dAtA[iNdEx:postIndex])
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx = postIndex
|
|
|
|
case 2:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
|
|
|
|
}
|
|
|
|
m.Type = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Type |= protocol.FileInfoType(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 3:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Size", wireType)
|
|
|
|
}
|
|
|
|
m.Size = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Size |= int64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 4:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType)
|
|
|
|
}
|
|
|
|
m.Permissions = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Permissions |= uint32(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 5:
|
|
|
|
if wireType != 0 {
|
2016-08-06 13:05:59 +00:00
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field ModifiedS", wireType)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2016-08-06 13:05:59 +00:00
|
|
|
m.ModifiedS = 0
|
2016-07-04 10:40:29 +00:00
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.ModifiedS |= int64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 6:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
|
|
|
|
}
|
|
|
|
var v int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
v |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.Deleted = bool(v != 0)
|
|
|
|
case 7:
|
|
|
|
if wireType != 0 {
|
2018-06-24 07:50:18 +00:00
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field RawInvalid", wireType)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
var v int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
v |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2018-06-24 07:50:18 +00:00
|
|
|
m.RawInvalid = bool(v != 0)
|
2016-07-04 10:40:29 +00:00
|
|
|
case 8:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field NoPermissions", wireType)
|
|
|
|
}
|
|
|
|
var v int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
v |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.NoPermissions = bool(v != 0)
|
|
|
|
case 9:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
msglen |= int(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
2016-07-04 10:40:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 10:
|
|
|
|
if wireType != 0 {
|
2016-07-29 19:54:24 +00:00
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2016-07-29 19:54:24 +00:00
|
|
|
m.Sequence = 0
|
2016-07-04 10:40:29 +00:00
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Sequence |= int64(b&0x7F) << shift
|
2016-07-04 10:40:29 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2016-08-06 13:05:59 +00:00
|
|
|
case 11:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field ModifiedNs", wireType)
|
|
|
|
}
|
|
|
|
m.ModifiedNs = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-08-06 13:05:59 +00:00
|
|
|
iNdEx++
|
2020-10-02 06:07:05 +00:00
|
|
|
m.ModifiedNs |= int(b&0x7F) << shift
|
2016-08-06 13:05:59 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2016-12-21 16:35:20 +00:00
|
|
|
case 12:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field ModifiedBy", wireType)
|
|
|
|
}
|
|
|
|
m.ModifiedBy = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-12-21 16:35:20 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.ModifiedBy |= github_com_syncthing_syncthing_lib_protocol.ShortID(b&0x7F) << shift
|
2016-12-21 16:35:20 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2018-04-16 18:08:50 +00:00
|
|
|
case 13:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field RawBlockSize", wireType)
|
|
|
|
}
|
|
|
|
m.RawBlockSize = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-10-02 06:07:05 +00:00
|
|
|
m.RawBlockSize |= int(b&0x7F) << shift
|
2018-04-16 18:08:50 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2016-12-09 18:02:18 +00:00
|
|
|
case 17:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field SymlinkTarget", wireType)
|
|
|
|
}
|
|
|
|
var stringLen uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-12-09 18:02:18 +00:00
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
stringLen |= uint64(b&0x7F) << shift
|
2016-12-09 18:02:18 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
intStringLen := int(stringLen)
|
|
|
|
if intStringLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + intStringLen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-12-09 18:02:18 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
m.SymlinkTarget = string(dAtA[iNdEx:postIndex])
|
2016-12-09 18:02:18 +00:00
|
|
|
iNdEx = postIndex
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
case 18:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field BlocksHash", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
byteLen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.BlocksHash = append(m.BlocksHash[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.BlocksHash == nil {
|
|
|
|
m.BlocksHash = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
2020-11-09 14:33:32 +00:00
|
|
|
case 19:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Encrypted", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
byteLen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Encrypted = append(m.Encrypted[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.Encrypted == nil {
|
|
|
|
m.Encrypted = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
2018-06-24 07:50:18 +00:00
|
|
|
case 1000:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field LocalFlags", wireType)
|
|
|
|
}
|
|
|
|
m.LocalFlags = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.LocalFlags |= uint32(b&0x7F) << shift
|
2018-06-24 07:50:18 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
case 1001:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field VersionHash", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
byteLen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.VersionHash = append(m.VersionHash[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.VersionHash == nil {
|
|
|
|
m.VersionHash = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
2016-07-04 10:40:29 +00:00
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
2017-01-03 00:16:21 +00:00
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
2016-07-04 10:40:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if skippy < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if (iNdEx + skippy) < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
func (m *BlockList) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
wire |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: BlockList: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: BlockList: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
msglen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Blocks = append(m.Blocks, protocol.BlockInfo{})
|
|
|
|
if err := m.Blocks[len(m.Blocks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if skippy < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2020-05-13 12:28:42 +00:00
|
|
|
func (m *IndirectionHashesOnly) Unmarshal(dAtA []byte) error {
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
wire |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
2020-05-13 12:28:42 +00:00
|
|
|
return fmt.Errorf("proto: IndirectionHashesOnly: wiretype end group for non-group")
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
2020-05-13 12:28:42 +00:00
|
|
|
return fmt.Errorf("proto: IndirectionHashesOnly: illegal tag %d (wire type %d)", fieldNum, wire)
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 18:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field BlocksHash", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
byteLen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.BlocksHash = append(m.BlocksHash[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.BlocksHash == nil {
|
|
|
|
m.BlocksHash = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
2020-05-13 12:28:42 +00:00
|
|
|
case 1001:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field VersionHash", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
byteLen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.VersionHash = append(m.VersionHash[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.VersionHash == nil {
|
|
|
|
m.VersionHash = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 07:35:44 +00:00
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if skippy < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
func (m *Counts) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
wire |= uint64(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: Counts: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: Counts: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Files", wireType)
|
|
|
|
}
|
|
|
|
m.Files = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-10-02 06:07:05 +00:00
|
|
|
m.Files |= int(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 2:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Directories", wireType)
|
|
|
|
}
|
|
|
|
m.Directories = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-10-02 06:07:05 +00:00
|
|
|
m.Directories |= int(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 3:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Symlinks", wireType)
|
|
|
|
}
|
|
|
|
m.Symlinks = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-10-02 06:07:05 +00:00
|
|
|
m.Symlinks |= int(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 4:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
|
|
|
|
}
|
|
|
|
m.Deleted = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2020-10-02 06:07:05 +00:00
|
|
|
m.Deleted |= int(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 5:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType)
|
|
|
|
}
|
|
|
|
m.Bytes = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Bytes |= int64(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 6:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
|
|
|
|
}
|
|
|
|
m.Sequence = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Sequence |= int64(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 17:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field DeviceID", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
byteLen |= int(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.DeviceID = append(m.DeviceID[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.DeviceID == nil {
|
|
|
|
m.DeviceID = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
2018-07-12 08:15:57 +00:00
|
|
|
case 18:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field LocalFlags", wireType)
|
|
|
|
}
|
|
|
|
m.LocalFlags = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.LocalFlags |= uint32(b&0x7F) << shift
|
2018-07-12 08:15:57 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if skippy < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if (iNdEx + skippy) < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (m *CountsSet) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
wire |= uint64(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: CountsSet: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: CountsSet: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Counts", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
msglen |= int(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
2019-09-04 06:33:29 +00:00
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Counts = append(m.Counts, Counts{})
|
|
|
|
if err := m.Counts[len(m.Counts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 2:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
|
|
|
|
}
|
|
|
|
m.Created = 0
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
2019-09-04 06:33:29 +00:00
|
|
|
m.Created |= int64(b&0x7F) << shift
|
2017-12-14 09:51:17 +00:00
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
2020-05-30 07:50:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if skippy < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (m *FileVersionDeprecated) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
wire |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: FileVersionDeprecated: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: FileVersionDeprecated: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
msglen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 2:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
|
|
|
|
}
|
|
|
|
var byteLen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
byteLen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if byteLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + byteLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Device = append(m.Device[:0], dAtA[iNdEx:postIndex]...)
|
|
|
|
if m.Device == nil {
|
|
|
|
m.Device = []byte{}
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 3:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Invalid", wireType)
|
|
|
|
}
|
|
|
|
var v int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
v |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.Invalid = bool(v != 0)
|
|
|
|
case 4:
|
|
|
|
if wireType != 0 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
|
|
|
|
}
|
|
|
|
var v int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
v |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.Deleted = bool(v != 0)
|
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if skippy < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (m *VersionListDeprecated) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
wire |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: VersionListDeprecated: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: VersionListDeprecated: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
msglen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Versions = append(m.Versions, FileVersionDeprecated{})
|
|
|
|
if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
2020-12-17 18:54:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if skippy < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (m *ObservedFolder) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
wire |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: ObservedFolder: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: ObservedFolder: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
msglen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 2:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType)
|
|
|
|
}
|
|
|
|
var stringLen uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
stringLen |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
intStringLen := int(stringLen)
|
|
|
|
if intStringLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + intStringLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Label = string(dAtA[iNdEx:postIndex])
|
|
|
|
iNdEx = postIndex
|
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if skippy < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (m *ObservedDevice) Unmarshal(dAtA []byte) error {
|
|
|
|
l := len(dAtA)
|
|
|
|
iNdEx := 0
|
|
|
|
for iNdEx < l {
|
|
|
|
preIndex := iNdEx
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
wire |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldNum := int32(wire >> 3)
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
if wireType == 4 {
|
|
|
|
return fmt.Errorf("proto: ObservedDevice: wiretype end group for non-group")
|
|
|
|
}
|
|
|
|
if fieldNum <= 0 {
|
|
|
|
return fmt.Errorf("proto: ObservedDevice: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
|
|
}
|
|
|
|
switch fieldNum {
|
|
|
|
case 1:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
|
|
|
|
}
|
|
|
|
var msglen int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
msglen |= int(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if msglen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + msglen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 2:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
|
|
|
}
|
|
|
|
var stringLen uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
stringLen |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
intStringLen := int(stringLen)
|
|
|
|
if intStringLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + intStringLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Name = string(dAtA[iNdEx:postIndex])
|
|
|
|
iNdEx = postIndex
|
|
|
|
case 3:
|
|
|
|
if wireType != 2 {
|
|
|
|
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
|
|
|
|
}
|
|
|
|
var stringLen uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
b := dAtA[iNdEx]
|
|
|
|
iNdEx++
|
|
|
|
stringLen |= uint64(b&0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
intStringLen := int(stringLen)
|
|
|
|
if intStringLen < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
postIndex := iNdEx + intStringLen
|
|
|
|
if postIndex < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if postIndex > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
m.Address = string(dAtA[iNdEx:postIndex])
|
|
|
|
iNdEx = postIndex
|
|
|
|
default:
|
|
|
|
iNdEx = preIndex
|
|
|
|
skippy, err := skipStructs(dAtA[iNdEx:])
|
2017-12-14 09:51:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if skippy < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
if (iNdEx + skippy) < 0 {
|
|
|
|
return ErrInvalidLengthStructs
|
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
if (iNdEx + skippy) > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx += skippy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if iNdEx > l {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
func skipStructs(dAtA []byte) (n int, err error) {
|
|
|
|
l := len(dAtA)
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx := 0
|
2019-10-18 07:51:04 +00:00
|
|
|
depth := 0
|
2016-07-04 10:40:29 +00:00
|
|
|
for iNdEx < l {
|
|
|
|
var wire uint64
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return 0, ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return 0, io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
|
|
|
wire |= (uint64(b) & 0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wireType := int(wire & 0x7)
|
|
|
|
switch wireType {
|
|
|
|
case 0:
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return 0, ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return 0, io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
iNdEx++
|
2017-01-03 00:16:21 +00:00
|
|
|
if dAtA[iNdEx-1] < 0x80 {
|
2016-07-04 10:40:29 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 1:
|
|
|
|
iNdEx += 8
|
|
|
|
case 2:
|
|
|
|
var length int
|
|
|
|
for shift := uint(0); ; shift += 7 {
|
|
|
|
if shift >= 64 {
|
|
|
|
return 0, ErrIntOverflowStructs
|
|
|
|
}
|
|
|
|
if iNdEx >= l {
|
|
|
|
return 0, io.ErrUnexpectedEOF
|
|
|
|
}
|
2017-01-03 00:16:21 +00:00
|
|
|
b := dAtA[iNdEx]
|
2016-07-04 10:40:29 +00:00
|
|
|
iNdEx++
|
|
|
|
length |= (int(b) & 0x7F) << shift
|
|
|
|
if b < 0x80 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if length < 0 {
|
|
|
|
return 0, ErrInvalidLengthStructs
|
|
|
|
}
|
2019-09-04 06:33:29 +00:00
|
|
|
iNdEx += length
|
2016-07-04 10:40:29 +00:00
|
|
|
case 3:
|
2019-10-18 07:51:04 +00:00
|
|
|
depth++
|
2016-07-04 10:40:29 +00:00
|
|
|
case 4:
|
2019-10-18 07:51:04 +00:00
|
|
|
if depth == 0 {
|
|
|
|
return 0, ErrUnexpectedEndOfGroupStructs
|
|
|
|
}
|
|
|
|
depth--
|
2016-07-04 10:40:29 +00:00
|
|
|
case 5:
|
|
|
|
iNdEx += 4
|
|
|
|
default:
|
|
|
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
|
|
|
}
|
2019-10-18 07:51:04 +00:00
|
|
|
if iNdEx < 0 {
|
|
|
|
return 0, ErrInvalidLengthStructs
|
|
|
|
}
|
|
|
|
if depth == 0 {
|
|
|
|
return iNdEx, nil
|
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-10-18 07:51:04 +00:00
|
|
|
return 0, io.ErrUnexpectedEOF
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
2019-10-18 07:51:04 +00:00
|
|
|
ErrInvalidLengthStructs = fmt.Errorf("proto: negative length found during unmarshaling")
|
|
|
|
ErrIntOverflowStructs = fmt.Errorf("proto: integer overflow")
|
|
|
|
ErrUnexpectedEndOfGroupStructs = fmt.Errorf("proto: unexpected end of group")
|
2016-07-04 10:40:29 +00:00
|
|
|
)
|