2014-11-16 20:13:20 +00:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-09-29 19:43:32 +00:00
|
|
|
//
|
2015-03-07 20:36:35 +00:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-09 06:52:18 +00:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2014-06-01 20:50:14 +00:00
|
|
|
|
2015-01-12 13:50:30 +00:00
|
|
|
package db_test
|
2014-03-28 13:36:57 +00:00
|
|
|
|
|
|
|
import (
|
2014-08-25 06:47:59 +00:00
|
|
|
"bytes"
|
2014-03-28 13:36:57 +00:00
|
|
|
"fmt"
|
2016-01-25 09:04:53 +00:00
|
|
|
"os"
|
2018-12-05 07:24:14 +00:00
|
|
|
"path/filepath"
|
2014-03-28 13:36:57 +00:00
|
|
|
"sort"
|
|
|
|
"testing"
|
2018-09-02 18:58:32 +00:00
|
|
|
"time"
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2016-03-06 20:32:10 +00:00
|
|
|
"github.com/d4l3k/messagediff"
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/db"
|
2019-11-29 08:11:52 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/db/backend"
|
2020-12-21 11:59:22 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/events"
|
2015-09-22 17:38:46 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2014-03-28 13:36:57 +00:00
|
|
|
)
|
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
var remoteDevice0, remoteDevice1 protocol.DeviceID
|
2014-07-06 12:46:48 +00:00
|
|
|
|
|
|
|
func init() {
|
2014-09-28 11:00:38 +00:00
|
|
|
remoteDevice0, _ = protocol.DeviceIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
|
|
|
|
remoteDevice1, _ = protocol.DeviceIDFromString("I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU")
|
2014-07-06 12:46:48 +00:00
|
|
|
}
|
|
|
|
|
2015-03-25 21:37:23 +00:00
|
|
|
const myID = 1
|
|
|
|
|
2015-10-21 21:19:26 +00:00
|
|
|
func genBlocks(n int) []protocol.BlockInfo {
|
|
|
|
b := make([]protocol.BlockInfo, n)
|
|
|
|
for i := range b {
|
|
|
|
h := make([]byte, 32)
|
|
|
|
for j := range h {
|
|
|
|
h[j] = byte(i + j)
|
|
|
|
}
|
2020-10-02 06:07:05 +00:00
|
|
|
b[i].Size = i
|
2015-10-21 21:19:26 +00:00
|
|
|
b[i].Hash = h
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
func globalList(t testing.TB, s *db.FileSet) []protocol.FileInfo {
|
2014-07-12 21:06:48 +00:00
|
|
|
var fs []protocol.FileInfo
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := snapshot(t, s)
|
2020-01-21 17:23:08 +00:00
|
|
|
defer snap.Release()
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
snap.WithGlobal(func(fi protocol.FileInfo) bool {
|
|
|
|
fs = append(fs, fi)
|
2014-07-06 12:46:48 +00:00
|
|
|
return true
|
|
|
|
})
|
|
|
|
return fs
|
|
|
|
}
|
2024-11-19 10:32:56 +00:00
|
|
|
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
func globalListPrefixed(t testing.TB, s *db.FileSet, prefix string) []protocol.FileInfo {
|
|
|
|
var fs []protocol.FileInfo
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := snapshot(t, s)
|
2020-01-21 17:23:08 +00:00
|
|
|
defer snap.Release()
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
snap.WithPrefixedGlobalTruncated(prefix, func(fi protocol.FileInfo) bool {
|
|
|
|
fs = append(fs, fi)
|
2018-05-17 07:26:40 +00:00
|
|
|
return true
|
|
|
|
})
|
|
|
|
return fs
|
|
|
|
}
|
2014-07-06 12:46:48 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
func haveList(t testing.TB, s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
|
2014-07-12 21:06:48 +00:00
|
|
|
var fs []protocol.FileInfo
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := snapshot(t, s)
|
2020-01-21 17:23:08 +00:00
|
|
|
defer snap.Release()
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
snap.WithHave(n, func(fi protocol.FileInfo) bool {
|
|
|
|
fs = append(fs, fi)
|
2014-07-06 12:46:48 +00:00
|
|
|
return true
|
|
|
|
})
|
|
|
|
return fs
|
|
|
|
}
|
|
|
|
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
func haveListPrefixed(t testing.TB, s *db.FileSet, n protocol.DeviceID, prefix string) []protocol.FileInfo {
|
|
|
|
var fs []protocol.FileInfo
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := snapshot(t, s)
|
2020-01-21 17:23:08 +00:00
|
|
|
defer snap.Release()
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
snap.WithPrefixedHaveTruncated(n, prefix, func(fi protocol.FileInfo) bool {
|
|
|
|
fs = append(fs, fi)
|
2018-05-17 07:26:40 +00:00
|
|
|
return true
|
|
|
|
})
|
|
|
|
return fs
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
func needList(t testing.TB, s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
|
2014-07-12 21:06:48 +00:00
|
|
|
var fs []protocol.FileInfo
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := snapshot(t, s)
|
2020-01-21 17:23:08 +00:00
|
|
|
defer snap.Release()
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
snap.WithNeed(n, func(fi protocol.FileInfo) bool {
|
|
|
|
fs = append(fs, fi)
|
2014-07-06 12:46:48 +00:00
|
|
|
return true
|
|
|
|
})
|
|
|
|
return fs
|
|
|
|
}
|
|
|
|
|
2014-07-12 21:06:48 +00:00
|
|
|
type fileList []protocol.FileInfo
|
2014-03-28 13:36:57 +00:00
|
|
|
|
|
|
|
func (l fileList) Len() int {
|
|
|
|
return len(l)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l fileList) Less(a, b int) bool {
|
|
|
|
return l[a].Name < l[b].Name
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l fileList) Swap(a, b int) {
|
|
|
|
l[a], l[b] = l[b], l[a]
|
|
|
|
}
|
|
|
|
|
2014-09-01 07:07:51 +00:00
|
|
|
func (l fileList) String() string {
|
|
|
|
var b bytes.Buffer
|
|
|
|
b.WriteString("[]protocol.FileList{\n")
|
|
|
|
for _, f := range l {
|
2017-11-22 08:05:27 +00:00
|
|
|
fmt.Fprintf(&b, " %q: #%v, %d bytes, %d blocks, perms=%o\n", f.Name, f.Version, f.Size, len(f.Blocks), f.Permissions)
|
2014-09-01 07:07:51 +00:00
|
|
|
}
|
|
|
|
b.WriteString("}")
|
|
|
|
return b.String()
|
|
|
|
}
|
|
|
|
|
2020-03-11 07:15:45 +00:00
|
|
|
func setSequence(seq int64, files fileList) int64 {
|
|
|
|
for i := range files {
|
|
|
|
seq++
|
|
|
|
files[i].Sequence = seq
|
|
|
|
}
|
|
|
|
return seq
|
|
|
|
}
|
|
|
|
|
2020-06-27 05:44:33 +00:00
|
|
|
func setBlocksHash(files fileList) {
|
|
|
|
for i, f := range files {
|
|
|
|
files[i].BlocksHash = protocol.BlocksHash(f.Blocks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-28 13:36:57 +00:00
|
|
|
func TestGlobalSet(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2014-07-06 12:46:48 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
m := newFileSet(t, "test", ldb)
|
2014-07-06 12:46:48 +00:00
|
|
|
|
2014-09-01 07:07:51 +00:00
|
|
|
local0 := fileList{
|
2020-03-11 07:15:45 +00:00
|
|
|
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
|
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(2)},
|
|
|
|
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(3)},
|
|
|
|
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(4)},
|
|
|
|
protocol.FileInfo{Name: "z", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(8)},
|
2014-07-06 12:46:48 +00:00
|
|
|
}
|
2020-03-11 07:15:45 +00:00
|
|
|
localSeq := setSequence(0, local0)
|
2020-06-27 05:44:33 +00:00
|
|
|
setBlocksHash(local0)
|
2014-09-01 07:07:51 +00:00
|
|
|
local1 := fileList{
|
2017-11-12 20:20:34 +00:00
|
|
|
protocol.FileInfo{Name: "a", Sequence: 6, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
|
|
|
|
protocol.FileInfo{Name: "b", Sequence: 7, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(2)},
|
|
|
|
protocol.FileInfo{Name: "c", Sequence: 8, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(3)},
|
|
|
|
protocol.FileInfo{Name: "d", Sequence: 9, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(4)},
|
|
|
|
protocol.FileInfo{Name: "z", Sequence: 10, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Deleted: true},
|
2014-07-06 12:46:48 +00:00
|
|
|
}
|
2020-03-11 07:15:45 +00:00
|
|
|
setSequence(localSeq, local1)
|
2020-06-27 05:44:33 +00:00
|
|
|
setBlocksHash(local1)
|
2014-09-01 07:07:51 +00:00
|
|
|
localTot := fileList{
|
2017-11-12 20:20:34 +00:00
|
|
|
local1[0],
|
|
|
|
local1[1],
|
|
|
|
local1[2],
|
|
|
|
local1[3],
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "z", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Deleted: true},
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-09-01 07:07:51 +00:00
|
|
|
remote0 := fileList{
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
|
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(2)},
|
|
|
|
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(5)},
|
2014-06-05 13:13:21 +00:00
|
|
|
}
|
2020-03-11 07:15:45 +00:00
|
|
|
remoteSeq := setSequence(0, remote0)
|
2020-06-27 05:44:33 +00:00
|
|
|
setBlocksHash(remote0)
|
2014-09-01 07:07:51 +00:00
|
|
|
remote1 := fileList{
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(6)},
|
|
|
|
protocol.FileInfo{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(7)},
|
2014-06-05 13:13:21 +00:00
|
|
|
}
|
2020-03-11 07:15:45 +00:00
|
|
|
setSequence(remoteSeq, remote1)
|
2020-06-27 05:44:33 +00:00
|
|
|
setBlocksHash(remote1)
|
2014-09-01 07:07:51 +00:00
|
|
|
remoteTot := fileList{
|
2014-07-06 12:46:48 +00:00
|
|
|
remote0[0],
|
|
|
|
remote1[0],
|
|
|
|
remote0[2],
|
|
|
|
remote1[1],
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-09-01 07:07:51 +00:00
|
|
|
expectedGlobal := fileList{
|
|
|
|
remote0[0], // a
|
|
|
|
remote1[0], // b
|
|
|
|
remote0[2], // c
|
|
|
|
localTot[3], // d
|
|
|
|
remote1[1], // e
|
|
|
|
localTot[4], // z
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-09-01 07:07:51 +00:00
|
|
|
expectedLocalNeed := fileList{
|
2014-07-06 12:46:48 +00:00
|
|
|
remote1[0],
|
|
|
|
remote0[2],
|
|
|
|
remote1[1],
|
2014-06-05 13:13:21 +00:00
|
|
|
}
|
|
|
|
|
2014-09-01 07:07:51 +00:00
|
|
|
expectedRemoteNeed := fileList{
|
2014-07-06 12:46:48 +00:00
|
|
|
local0[3],
|
2014-06-05 13:13:21 +00:00
|
|
|
}
|
|
|
|
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(m, protocol.LocalDeviceID, local0)
|
|
|
|
replace(m, protocol.LocalDeviceID, local1)
|
|
|
|
replace(m, remoteDevice0, remote0)
|
2014-09-28 11:00:38 +00:00
|
|
|
m.Update(remoteDevice0, remote1)
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2020-03-11 07:15:45 +00:00
|
|
|
check := func() {
|
|
|
|
t.Helper()
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
g := fileList(globalList(t, m))
|
2020-03-11 07:15:45 +00:00
|
|
|
sort.Sort(g)
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2020-03-11 07:15:45 +00:00
|
|
|
if fmt.Sprint(g) != fmt.Sprint(expectedGlobal) {
|
|
|
|
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal)
|
2015-10-21 11:59:07 +00:00
|
|
|
}
|
2020-03-11 07:15:45 +00:00
|
|
|
|
2020-10-02 06:07:05 +00:00
|
|
|
var globalFiles, globalDirectories, globalDeleted int
|
|
|
|
var globalBytes int64
|
2020-03-11 07:15:45 +00:00
|
|
|
for _, f := range g {
|
|
|
|
if f.IsInvalid() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
switch {
|
|
|
|
case f.IsDeleted():
|
|
|
|
globalDeleted++
|
|
|
|
case f.IsDirectory():
|
|
|
|
globalDirectories++
|
|
|
|
default:
|
|
|
|
globalFiles++
|
|
|
|
}
|
|
|
|
globalBytes += f.FileSize()
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
gs := globalSize(t, m)
|
2020-03-11 07:15:45 +00:00
|
|
|
if gs.Files != globalFiles {
|
|
|
|
t.Errorf("Incorrect GlobalSize files; %d != %d", gs.Files, globalFiles)
|
|
|
|
}
|
|
|
|
if gs.Directories != globalDirectories {
|
|
|
|
t.Errorf("Incorrect GlobalSize directories; %d != %d", gs.Directories, globalDirectories)
|
|
|
|
}
|
|
|
|
if gs.Deleted != globalDeleted {
|
|
|
|
t.Errorf("Incorrect GlobalSize deleted; %d != %d", gs.Deleted, globalDeleted)
|
|
|
|
}
|
|
|
|
if gs.Bytes != globalBytes {
|
|
|
|
t.Errorf("Incorrect GlobalSize bytes; %d != %d", gs.Bytes, globalBytes)
|
2015-10-21 11:59:07 +00:00
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
h := fileList(haveList(t, m, protocol.LocalDeviceID))
|
2020-03-11 07:15:45 +00:00
|
|
|
sort.Sort(h)
|
2014-06-05 13:13:21 +00:00
|
|
|
|
2020-03-11 07:15:45 +00:00
|
|
|
if fmt.Sprint(h) != fmt.Sprint(localTot) {
|
|
|
|
t.Errorf("Have incorrect (local);\n A: %v !=\n E: %v", h, localTot)
|
|
|
|
}
|
2014-06-05 13:13:21 +00:00
|
|
|
|
2020-10-02 06:07:05 +00:00
|
|
|
var haveFiles, haveDirectories, haveDeleted int
|
|
|
|
var haveBytes int64
|
2020-03-11 07:15:45 +00:00
|
|
|
for _, f := range h {
|
|
|
|
if f.IsInvalid() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
switch {
|
|
|
|
case f.IsDeleted():
|
|
|
|
haveDeleted++
|
|
|
|
case f.IsDirectory():
|
|
|
|
haveDirectories++
|
|
|
|
default:
|
|
|
|
haveFiles++
|
|
|
|
}
|
|
|
|
haveBytes += f.FileSize()
|
2015-10-21 11:59:07 +00:00
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
ls := localSize(t, m)
|
2020-03-11 07:15:45 +00:00
|
|
|
if ls.Files != haveFiles {
|
|
|
|
t.Errorf("Incorrect LocalSize files; %d != %d", ls.Files, haveFiles)
|
|
|
|
}
|
|
|
|
if ls.Directories != haveDirectories {
|
|
|
|
t.Errorf("Incorrect LocalSize directories; %d != %d", ls.Directories, haveDirectories)
|
|
|
|
}
|
|
|
|
if ls.Deleted != haveDeleted {
|
|
|
|
t.Errorf("Incorrect LocalSize deleted; %d != %d", ls.Deleted, haveDeleted)
|
|
|
|
}
|
|
|
|
if ls.Bytes != haveBytes {
|
|
|
|
t.Errorf("Incorrect LocalSize bytes; %d != %d", ls.Bytes, haveBytes)
|
2015-10-21 11:59:07 +00:00
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
h = fileList(haveList(t, m, remoteDevice0))
|
2020-03-11 07:15:45 +00:00
|
|
|
sort.Sort(h)
|
2014-06-05 13:13:21 +00:00
|
|
|
|
2020-03-11 07:15:45 +00:00
|
|
|
if fmt.Sprint(h) != fmt.Sprint(remoteTot) {
|
|
|
|
t.Errorf("Have incorrect (remote);\n A: %v !=\n E: %v", h, remoteTot)
|
|
|
|
}
|
2014-06-05 13:13:21 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
n := fileList(needList(t, m, protocol.LocalDeviceID))
|
2020-03-11 07:15:45 +00:00
|
|
|
sort.Sort(n)
|
2014-06-05 13:13:21 +00:00
|
|
|
|
2020-03-11 07:15:45 +00:00
|
|
|
if fmt.Sprint(n) != fmt.Sprint(expectedLocalNeed) {
|
|
|
|
t.Errorf("Need incorrect (local);\n A: %v !=\n E: %v", n, expectedLocalNeed)
|
|
|
|
}
|
2014-06-05 13:13:21 +00:00
|
|
|
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, m, protocol.LocalDeviceID, expectedLocalNeed)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
n = fileList(needList(t, m, remoteDevice0))
|
2020-03-11 07:15:45 +00:00
|
|
|
sort.Sort(n)
|
2014-06-05 13:13:21 +00:00
|
|
|
|
2020-03-11 07:15:45 +00:00
|
|
|
if fmt.Sprint(n) != fmt.Sprint(expectedRemoteNeed) {
|
|
|
|
t.Errorf("Need incorrect (remote);\n A: %v !=\n E: %v", n, expectedRemoteNeed)
|
|
|
|
}
|
2014-06-05 13:13:21 +00:00
|
|
|
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, m, remoteDevice0, expectedRemoteNeed)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := snapshot(t, m)
|
2020-03-11 07:15:45 +00:00
|
|
|
defer snap.Release()
|
|
|
|
f, ok := snap.Get(protocol.LocalDeviceID, "b")
|
|
|
|
if !ok {
|
|
|
|
t.Error("Unexpectedly not OK")
|
|
|
|
}
|
|
|
|
if fmt.Sprint(f) != fmt.Sprint(localTot[1]) {
|
|
|
|
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, localTot[1])
|
|
|
|
}
|
2014-06-05 13:13:21 +00:00
|
|
|
|
2020-03-11 07:15:45 +00:00
|
|
|
f, ok = snap.Get(remoteDevice0, "b")
|
|
|
|
if !ok {
|
|
|
|
t.Error("Unexpectedly not OK")
|
|
|
|
}
|
|
|
|
if fmt.Sprint(f) != fmt.Sprint(remote1[0]) {
|
|
|
|
t.Errorf("Get incorrect (remote);\n A: %v !=\n E: %v", f, remote1[0])
|
|
|
|
}
|
2014-06-05 13:13:21 +00:00
|
|
|
|
2020-03-11 07:15:45 +00:00
|
|
|
f, ok = snap.GetGlobal("b")
|
|
|
|
if !ok {
|
|
|
|
t.Error("Unexpectedly not OK")
|
|
|
|
}
|
|
|
|
if fmt.Sprint(f) != fmt.Sprint(expectedGlobal[1]) {
|
|
|
|
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, remote1[0])
|
|
|
|
}
|
2014-06-05 13:13:21 +00:00
|
|
|
|
2020-03-11 07:15:45 +00:00
|
|
|
f, ok = snap.Get(protocol.LocalDeviceID, "zz")
|
|
|
|
if ok {
|
|
|
|
t.Error("Unexpectedly OK")
|
|
|
|
}
|
|
|
|
if f.Name != "" {
|
|
|
|
t.Errorf("Get incorrect (local);\n A: %v !=\n E: %v", f, protocol.FileInfo{})
|
|
|
|
}
|
2014-07-06 12:46:48 +00:00
|
|
|
|
2020-03-11 07:15:45 +00:00
|
|
|
f, ok = snap.GetGlobal("zz")
|
|
|
|
if ok {
|
|
|
|
t.Error("Unexpectedly OK")
|
|
|
|
}
|
|
|
|
if f.Name != "" {
|
|
|
|
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
|
|
|
|
}
|
2014-06-05 13:13:21 +00:00
|
|
|
}
|
2014-07-06 12:46:48 +00:00
|
|
|
|
2020-03-11 07:15:45 +00:00
|
|
|
check()
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := snapshot(t, m)
|
2020-03-11 07:15:45 +00:00
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
av := []protocol.DeviceID{protocol.LocalDeviceID, remoteDevice0}
|
2020-01-21 17:23:08 +00:00
|
|
|
a := snap.Availability("a")
|
2014-07-06 12:46:48 +00:00
|
|
|
if !(len(a) == 2 && (a[0] == av[0] && a[1] == av[1] || a[0] == av[1] && a[1] == av[0])) {
|
2014-06-05 13:13:21 +00:00
|
|
|
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
2020-01-21 17:23:08 +00:00
|
|
|
a = snap.Availability("b")
|
2014-09-28 11:00:38 +00:00
|
|
|
if len(a) != 1 || a[0] != remoteDevice0 {
|
|
|
|
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteDevice0)
|
2014-07-06 12:46:48 +00:00
|
|
|
}
|
2020-01-21 17:23:08 +00:00
|
|
|
a = snap.Availability("d")
|
2014-09-28 11:00:38 +00:00
|
|
|
if len(a) != 1 || a[0] != protocol.LocalDeviceID {
|
|
|
|
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, protocol.LocalDeviceID)
|
2014-07-06 12:46:48 +00:00
|
|
|
}
|
2020-03-11 07:15:45 +00:00
|
|
|
|
|
|
|
snap.Release()
|
|
|
|
|
|
|
|
// Now bring another remote into play
|
|
|
|
|
|
|
|
secRemote := fileList{
|
|
|
|
local1[0], // a
|
|
|
|
remote1[0], // b
|
|
|
|
local1[3], // d
|
|
|
|
remote1[1], // e
|
|
|
|
local1[4], // z
|
|
|
|
}
|
|
|
|
secRemote[0].Version = secRemote[0].Version.Update(remoteDevice1.Short())
|
|
|
|
secRemote[1].Version = secRemote[1].Version.Update(remoteDevice1.Short())
|
|
|
|
secRemote[4].Version = secRemote[4].Version.Update(remoteDevice1.Short())
|
|
|
|
secRemote[4].Deleted = false
|
|
|
|
secRemote[4].Blocks = genBlocks(1)
|
|
|
|
setSequence(0, secRemote)
|
|
|
|
|
|
|
|
expectedGlobal = fileList{
|
|
|
|
secRemote[0], // a
|
|
|
|
secRemote[1], // b
|
|
|
|
remote0[2], // c
|
|
|
|
localTot[3], // d
|
|
|
|
secRemote[3], // e
|
|
|
|
secRemote[4], // z
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedLocalNeed = fileList{
|
|
|
|
secRemote[0], // a
|
|
|
|
secRemote[1], // b
|
|
|
|
remote0[2], // c
|
|
|
|
secRemote[3], // e
|
|
|
|
secRemote[4], // z
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedRemoteNeed = fileList{
|
|
|
|
secRemote[0], // a
|
|
|
|
secRemote[1], // b
|
|
|
|
local0[3], // d
|
|
|
|
secRemote[4], // z
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedSecRemoteNeed := fileList{
|
|
|
|
remote0[2], // c
|
|
|
|
}
|
|
|
|
|
|
|
|
m.Update(remoteDevice1, secRemote)
|
|
|
|
|
|
|
|
check()
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
h := fileList(haveList(t, m, remoteDevice1))
|
2020-03-11 07:15:45 +00:00
|
|
|
sort.Sort(h)
|
|
|
|
|
|
|
|
if fmt.Sprint(h) != fmt.Sprint(secRemote) {
|
|
|
|
t.Errorf("Have incorrect (secRemote);\n A: %v !=\n E: %v", h, secRemote)
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
n := fileList(needList(t, m, remoteDevice1))
|
2020-03-11 07:15:45 +00:00
|
|
|
sort.Sort(n)
|
|
|
|
|
|
|
|
if fmt.Sprint(n) != fmt.Sprint(expectedSecRemoteNeed) {
|
|
|
|
t.Errorf("Need incorrect (secRemote);\n A: %v !=\n E: %v", n, expectedSecRemoteNeed)
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
|
|
|
|
checkNeed(t, m, remoteDevice1, expectedSecRemoteNeed)
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-09-01 07:07:51 +00:00
|
|
|
func TestNeedWithInvalid(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2014-09-01 07:07:51 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2014-09-01 07:07:51 +00:00
|
|
|
|
|
|
|
localHave := fileList{
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
|
2014-09-01 07:07:51 +00:00
|
|
|
}
|
|
|
|
remote0Have := fileList{
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)},
|
2018-06-24 07:50:18 +00:00
|
|
|
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(5), RawInvalid: true},
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Blocks: genBlocks(7)},
|
2014-09-01 07:07:51 +00:00
|
|
|
}
|
|
|
|
remote1Have := fileList{
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(7)},
|
2018-06-24 07:50:18 +00:00
|
|
|
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Blocks: genBlocks(5), RawInvalid: true},
|
|
|
|
protocol.FileInfo{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1004}}}, Blocks: genBlocks(5), RawInvalid: true},
|
2014-09-01 07:07:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
expectedNeed := fileList{
|
2021-03-15 06:58:01 +00:00
|
|
|
remote0Have[0],
|
|
|
|
remote1Have[0],
|
|
|
|
remote0Have[2],
|
2014-09-01 07:07:51 +00:00
|
|
|
}
|
|
|
|
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(s, protocol.LocalDeviceID, localHave)
|
|
|
|
replace(s, remoteDevice0, remote0Have)
|
|
|
|
replace(s, remoteDevice1, remote1Have)
|
2014-09-01 07:07:51 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
need := fileList(needList(t, s, protocol.LocalDeviceID))
|
2014-09-01 07:07:51 +00:00
|
|
|
sort.Sort(need)
|
|
|
|
|
|
|
|
if fmt.Sprint(need) != fmt.Sprint(expectedNeed) {
|
|
|
|
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", need, expectedNeed)
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
|
|
|
|
checkNeed(t, s, protocol.LocalDeviceID, expectedNeed)
|
2014-09-01 07:07:51 +00:00
|
|
|
}
|
|
|
|
|
2014-09-04 20:29:53 +00:00
|
|
|
func TestUpdateToInvalid(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2014-09-04 20:29:53 +00:00
|
|
|
|
2018-06-24 07:50:18 +00:00
|
|
|
folder := "test"
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, folder, ldb)
|
2017-10-24 20:05:29 +00:00
|
|
|
f := db.NewBlockFinder(ldb)
|
2014-09-04 20:29:53 +00:00
|
|
|
|
|
|
|
localHave := fileList{
|
2020-05-16 12:39:27 +00:00
|
|
|
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1), Size: 1},
|
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2), Size: 1},
|
|
|
|
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(5), LocalFlags: protocol.FlagLocalIgnored, Size: 1},
|
|
|
|
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Blocks: genBlocks(7), Size: 1},
|
|
|
|
protocol.FileInfo{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, LocalFlags: protocol.FlagLocalIgnored, Size: 1},
|
2014-09-04 20:29:53 +00:00
|
|
|
}
|
|
|
|
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(s, protocol.LocalDeviceID, localHave)
|
2014-09-04 20:29:53 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
have := fileList(haveList(t, s, protocol.LocalDeviceID))
|
2014-09-04 20:29:53 +00:00
|
|
|
sort.Sort(have)
|
|
|
|
|
|
|
|
if fmt.Sprint(have) != fmt.Sprint(localHave) {
|
|
|
|
t.Errorf("Have incorrect before invalidation;\n A: %v !=\n E: %v", have, localHave)
|
|
|
|
}
|
|
|
|
|
2017-10-24 20:05:29 +00:00
|
|
|
oldBlockHash := localHave[1].Blocks[0].Hash
|
2018-06-24 07:50:18 +00:00
|
|
|
|
|
|
|
localHave[1].LocalFlags = protocol.FlagLocalIgnored
|
2017-10-24 20:05:29 +00:00
|
|
|
localHave[1].Blocks = nil
|
2018-06-24 07:50:18 +00:00
|
|
|
|
|
|
|
localHave[4].LocalFlags = 0
|
2017-10-24 20:05:29 +00:00
|
|
|
localHave[4].Blocks = genBlocks(3)
|
2018-06-24 07:50:18 +00:00
|
|
|
|
2017-10-24 20:05:29 +00:00
|
|
|
s.Update(protocol.LocalDeviceID, append(fileList{}, localHave[1], localHave[4]))
|
2014-09-04 20:29:53 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
have = fileList(haveList(t, s, protocol.LocalDeviceID))
|
2014-09-04 20:29:53 +00:00
|
|
|
sort.Sort(have)
|
|
|
|
|
|
|
|
if fmt.Sprint(have) != fmt.Sprint(localHave) {
|
|
|
|
t.Errorf("Have incorrect after invalidation;\n A: %v !=\n E: %v", have, localHave)
|
|
|
|
}
|
2017-10-24 20:05:29 +00:00
|
|
|
|
|
|
|
f.Iterate([]string{folder}, oldBlockHash, func(folder, file string, index int32) bool {
|
|
|
|
if file == localHave[1].Name {
|
|
|
|
t.Errorf("Found unexpected block in blockmap for invalidated file")
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
|
|
|
|
if !f.Iterate([]string{folder}, localHave[4].Blocks[0].Hash, func(folder, file string, index int32) bool {
|
2019-02-02 11:09:07 +00:00
|
|
|
return file == localHave[4].Name
|
2017-10-24 20:05:29 +00:00
|
|
|
}) {
|
|
|
|
t.Errorf("First block of un-invalidated file is missing from blockmap")
|
|
|
|
}
|
2014-09-04 20:29:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestInvalidAvailability(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2014-09-04 20:29:53 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2014-09-04 20:29:53 +00:00
|
|
|
|
|
|
|
remote0Have := fileList{
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "both", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)},
|
2018-06-24 07:50:18 +00:00
|
|
|
protocol.FileInfo{Name: "r1only", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(5), RawInvalid: true},
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "r0only", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Blocks: genBlocks(7)},
|
2018-06-24 07:50:18 +00:00
|
|
|
protocol.FileInfo{Name: "none", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1004}}}, Blocks: genBlocks(5), RawInvalid: true},
|
2014-09-04 20:29:53 +00:00
|
|
|
}
|
|
|
|
remote1Have := fileList{
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "both", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)},
|
|
|
|
protocol.FileInfo{Name: "r1only", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(7)},
|
2018-06-24 07:50:18 +00:00
|
|
|
protocol.FileInfo{Name: "r0only", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Blocks: genBlocks(5), RawInvalid: true},
|
|
|
|
protocol.FileInfo{Name: "none", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1004}}}, Blocks: genBlocks(5), RawInvalid: true},
|
2014-09-04 20:29:53 +00:00
|
|
|
}
|
|
|
|
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(s, remoteDevice0, remote0Have)
|
|
|
|
replace(s, remoteDevice1, remote1Have)
|
2014-09-04 20:29:53 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := snapshot(t, s)
|
2020-01-21 17:23:08 +00:00
|
|
|
defer snap.Release()
|
|
|
|
|
|
|
|
if av := snap.Availability("both"); len(av) != 2 {
|
2014-09-04 20:29:53 +00:00
|
|
|
t.Error("Incorrect availability for 'both':", av)
|
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
if av := snap.Availability("r0only"); len(av) != 1 || av[0] != remoteDevice0 {
|
2014-09-04 20:29:53 +00:00
|
|
|
t.Error("Incorrect availability for 'r0only':", av)
|
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
if av := snap.Availability("r1only"); len(av) != 1 || av[0] != remoteDevice1 {
|
2014-09-04 20:29:53 +00:00
|
|
|
t.Error("Incorrect availability for 'r1only':", av)
|
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
if av := snap.Availability("none"); len(av) != 0 {
|
2014-09-04 20:29:53 +00:00
|
|
|
t.Error("Incorrect availability for 'none':", av)
|
|
|
|
}
|
|
|
|
}
|
2014-03-28 13:36:57 +00:00
|
|
|
|
|
|
|
func TestGlobalReset(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2014-07-06 12:46:48 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
m := newFileSet(t, "test", ldb)
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2014-07-12 21:06:48 +00:00
|
|
|
local := []protocol.FileInfo{
|
2018-01-18 12:40:43 +00:00
|
|
|
{Name: "a", Sequence: 1, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: "b", Sequence: 2, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: "c", Sequence: 3, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: "d", Sequence: 4, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-07-12 21:06:48 +00:00
|
|
|
remote := []protocol.FileInfo{
|
2016-07-04 10:40:29 +00:00
|
|
|
{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}},
|
|
|
|
{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}},
|
|
|
|
{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(m, protocol.LocalDeviceID, local)
|
2021-03-07 12:43:22 +00:00
|
|
|
g := globalList(t, m)
|
2014-06-05 13:13:21 +00:00
|
|
|
sort.Sort(fileList(g))
|
|
|
|
|
2018-01-18 12:40:43 +00:00
|
|
|
if diff, equal := messagediff.PrettyDiff(local, g); !equal {
|
|
|
|
t.Errorf("Global incorrect;\nglobal: %v\n!=\nlocal: %v\ndiff:\n%s", g, local, diff)
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(m, remoteDevice0, remote)
|
|
|
|
replace(m, remoteDevice0, nil)
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
g = globalList(t, m)
|
2014-06-05 13:13:21 +00:00
|
|
|
sort.Sort(fileList(g))
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2018-01-18 12:40:43 +00:00
|
|
|
if diff, equal := messagediff.PrettyDiff(local, g); !equal {
|
|
|
|
t.Errorf("Global incorrect;\nglobal: %v\n!=\nlocal: %v\ndiff:\n%s", g, local, diff)
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestNeed(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2014-07-06 12:46:48 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
m := newFileSet(t, "test", ldb)
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2014-07-12 21:06:48 +00:00
|
|
|
local := []protocol.FileInfo{
|
2016-07-04 10:40:29 +00:00
|
|
|
{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-07-12 21:06:48 +00:00
|
|
|
remote := []protocol.FileInfo{
|
2016-07-04 10:40:29 +00:00
|
|
|
{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}},
|
|
|
|
{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}},
|
|
|
|
{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-07-12 21:06:48 +00:00
|
|
|
shouldNeed := []protocol.FileInfo{
|
2016-07-04 10:40:29 +00:00
|
|
|
{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}},
|
|
|
|
{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}},
|
|
|
|
{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(m, protocol.LocalDeviceID, local)
|
|
|
|
replace(m, remoteDevice0, remote)
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
need := needList(t, m, protocol.LocalDeviceID)
|
2014-04-09 08:24:43 +00:00
|
|
|
|
|
|
|
sort.Sort(fileList(need))
|
|
|
|
sort.Sort(fileList(shouldNeed))
|
|
|
|
|
2014-07-06 12:46:48 +00:00
|
|
|
if fmt.Sprint(need) != fmt.Sprint(shouldNeed) {
|
2014-03-28 13:36:57 +00:00
|
|
|
t.Errorf("Need incorrect;\n%v !=\n%v", need, shouldNeed)
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
|
|
|
|
checkNeed(t, m, protocol.LocalDeviceID, shouldNeed)
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2016-07-29 19:54:24 +00:00
|
|
|
func TestSequence(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2014-07-06 12:46:48 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
m := newFileSet(t, "test", ldb)
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2014-07-12 21:06:48 +00:00
|
|
|
local1 := []protocol.FileInfo{
|
2016-07-04 10:40:29 +00:00
|
|
|
{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-07-12 21:06:48 +00:00
|
|
|
local2 := []protocol.FileInfo{
|
2014-03-28 13:36:57 +00:00
|
|
|
local1[0],
|
|
|
|
// [1] deleted
|
|
|
|
local1[2],
|
2016-07-04 10:40:29 +00:00
|
|
|
{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}},
|
|
|
|
{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(m, protocol.LocalDeviceID, local1)
|
2016-07-29 19:54:24 +00:00
|
|
|
c0 := m.Sequence(protocol.LocalDeviceID)
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(m, protocol.LocalDeviceID, local2)
|
2016-07-29 19:54:24 +00:00
|
|
|
c1 := m.Sequence(protocol.LocalDeviceID)
|
2014-03-28 13:36:57 +00:00
|
|
|
if !(c1 > c0) {
|
2014-07-15 11:04:37 +00:00
|
|
|
t.Fatal("Local version number should have incremented")
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
}
|
2014-08-10 05:27:24 +00:00
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
func TestListDropFolder(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2014-08-31 11:34:17 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
s0 := newFileSet(t, "test0", ldb)
|
2014-08-31 11:34:17 +00:00
|
|
|
local1 := []protocol.FileInfo{
|
2016-07-04 10:40:29 +00:00
|
|
|
{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
2014-08-31 11:34:17 +00:00
|
|
|
}
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(s0, protocol.LocalDeviceID, local1)
|
2014-08-31 11:34:17 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
s1 := newFileSet(t, "test1", ldb)
|
2014-08-31 11:34:17 +00:00
|
|
|
local2 := []protocol.FileInfo{
|
2016-07-04 10:40:29 +00:00
|
|
|
{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}},
|
|
|
|
{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}},
|
|
|
|
{Name: "f", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}},
|
2014-08-31 11:34:17 +00:00
|
|
|
}
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(s1, remoteDevice0, local2)
|
2014-08-31 11:34:17 +00:00
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
// Check that we have both folders and their data is in the global list
|
2014-08-31 11:34:17 +00:00
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
expectedFolderList := []string{"test0", "test1"}
|
2016-03-06 20:32:10 +00:00
|
|
|
actualFolderList := ldb.ListFolders()
|
2016-03-17 07:03:29 +00:00
|
|
|
if diff, equal := messagediff.PrettyDiff(expectedFolderList, actualFolderList); !equal {
|
2016-03-06 20:32:10 +00:00
|
|
|
t.Fatalf("FolderList mismatch. Diff:\n%s", diff)
|
2014-08-31 11:34:17 +00:00
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if l := len(globalList(t, s0)); l != 3 {
|
2014-08-31 11:34:17 +00:00
|
|
|
t.Errorf("Incorrect global length %d != 3 for s0", l)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if l := len(globalList(t, s1)); l != 3 {
|
2014-08-31 11:34:17 +00:00
|
|
|
t.Errorf("Incorrect global length %d != 3 for s1", l)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Drop one of them and check that it's gone.
|
|
|
|
|
2015-01-12 13:50:30 +00:00
|
|
|
db.DropFolder(ldb, "test1")
|
2014-08-31 11:34:17 +00:00
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
expectedFolderList = []string{"test0"}
|
2016-03-06 20:32:10 +00:00
|
|
|
actualFolderList = ldb.ListFolders()
|
2016-03-17 07:03:29 +00:00
|
|
|
if diff, equal := messagediff.PrettyDiff(expectedFolderList, actualFolderList); !equal {
|
2016-03-06 20:32:10 +00:00
|
|
|
t.Fatalf("FolderList mismatch. Diff:\n%s", diff)
|
2014-08-31 11:34:17 +00:00
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if l := len(globalList(t, s0)); l != 3 {
|
2014-08-31 11:34:17 +00:00
|
|
|
t.Errorf("Incorrect global length %d != 3 for s0", l)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if l := len(globalList(t, s1)); l != 0 {
|
2014-08-31 11:34:17 +00:00
|
|
|
t.Errorf("Incorrect global length %d != 0 for s1", l)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-08 07:37:42 +00:00
|
|
|
func TestGlobalNeedWithInvalid(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2014-09-08 07:37:42 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test1", ldb)
|
2014-09-08 07:37:42 +00:00
|
|
|
|
|
|
|
rem0 := fileList{
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(4)},
|
2018-06-24 07:50:18 +00:00
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, RawInvalid: true},
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(4)},
|
2018-02-10 18:40:57 +00:00
|
|
|
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: remoteDevice0.Short(), Value: 1002}}}},
|
2014-09-08 07:37:42 +00:00
|
|
|
}
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(s, remoteDevice0, rem0)
|
2014-09-08 07:37:42 +00:00
|
|
|
|
|
|
|
rem1 := fileList{
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(4)},
|
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(4)},
|
2018-06-24 07:50:18 +00:00
|
|
|
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, RawInvalid: true},
|
|
|
|
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, RawInvalid: true, ModifiedS: 10},
|
2014-09-08 07:37:42 +00:00
|
|
|
}
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(s, remoteDevice1, rem1)
|
2014-09-08 07:37:42 +00:00
|
|
|
|
|
|
|
total := fileList{
|
|
|
|
// There's a valid copy of each file, so it should be merged
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(4)},
|
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(4)},
|
|
|
|
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(4)},
|
2018-02-10 18:40:57 +00:00
|
|
|
// in conflict and older, but still wins as the other is invalid
|
|
|
|
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: remoteDevice0.Short(), Value: 1002}}}},
|
2014-09-08 07:37:42 +00:00
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
need := fileList(needList(t, s, protocol.LocalDeviceID))
|
2014-09-08 07:37:42 +00:00
|
|
|
if fmt.Sprint(need) != fmt.Sprint(total) {
|
|
|
|
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", need, total)
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, protocol.LocalDeviceID, total)
|
2014-09-08 07:37:42 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
global := fileList(globalList(t, s))
|
2014-09-08 07:37:42 +00:00
|
|
|
if fmt.Sprint(global) != fmt.Sprint(total) {
|
|
|
|
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", global, total)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-25 06:47:59 +00:00
|
|
|
func TestLongPath(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2014-08-25 06:47:59 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2014-08-25 06:47:59 +00:00
|
|
|
|
|
|
|
var b bytes.Buffer
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
b.WriteString("012345678901234567890123456789012345678901234567890")
|
|
|
|
}
|
|
|
|
name := b.String() // 5000 characters
|
|
|
|
|
|
|
|
local := []protocol.FileInfo{
|
2019-11-29 08:11:52 +00:00
|
|
|
{Name: name, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
2014-08-25 06:47:59 +00:00
|
|
|
}
|
|
|
|
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(s, protocol.LocalDeviceID, local)
|
2014-08-25 06:47:59 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
gf := globalList(t, s)
|
2014-08-25 06:47:59 +00:00
|
|
|
if l := len(gf); l != 1 {
|
|
|
|
t.Fatalf("Incorrect len %d != 1 for global list", l)
|
|
|
|
}
|
|
|
|
if gf[0].Name != local[0].Name {
|
2014-10-06 22:03:24 +00:00
|
|
|
t.Errorf("Incorrect long filename;\n%q !=\n%q",
|
|
|
|
gf[0].Name, local[0].Name)
|
2014-08-25 06:47:59 +00:00
|
|
|
}
|
|
|
|
}
|
2016-01-25 09:04:53 +00:00
|
|
|
|
|
|
|
func BenchmarkUpdateOneFile(b *testing.B) {
|
|
|
|
local0 := fileList{
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
|
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(2)},
|
|
|
|
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(3)},
|
|
|
|
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(4)},
|
2016-01-25 09:04:53 +00:00
|
|
|
// A longer name is more realistic and causes more allocations
|
2016-07-04 10:40:29 +00:00
|
|
|
protocol.FileInfo{Name: "zajksdhaskjdh/askjdhaskjdashkajshd/kasjdhaskjdhaskdjhaskdjash/dkjashdaksjdhaskdjahskdjh", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(8)},
|
2016-01-25 09:04:53 +00:00
|
|
|
}
|
|
|
|
|
2019-11-29 08:11:52 +00:00
|
|
|
be, err := backend.Open("testdata/benchmarkupdate.db", backend.TuningAuto)
|
2016-01-25 09:04:53 +00:00
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevel(b, be)
|
2016-01-25 09:04:53 +00:00
|
|
|
defer func() {
|
|
|
|
ldb.Close()
|
|
|
|
os.RemoveAll("testdata/benchmarkupdate.db")
|
|
|
|
}()
|
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
m := newFileSet(b, "test", ldb)
|
2017-11-12 20:20:34 +00:00
|
|
|
replace(m, protocol.LocalDeviceID, local0)
|
2016-01-25 09:04:53 +00:00
|
|
|
l := local0[4:5]
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
l[0].Version = l[0].Version.Update(myID)
|
|
|
|
m.Update(protocol.LocalDeviceID, local0)
|
|
|
|
}
|
|
|
|
|
|
|
|
b.ReportAllocs()
|
|
|
|
}
|
2016-07-23 12:46:31 +00:00
|
|
|
|
|
|
|
func TestIndexID(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2016-07-23 12:46:31 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2016-07-23 12:46:31 +00:00
|
|
|
|
|
|
|
// The Index ID for some random device is zero by default.
|
|
|
|
id := s.IndexID(remoteDevice0)
|
|
|
|
if id != 0 {
|
|
|
|
t.Errorf("index ID for remote device should default to zero, not %d", id)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The Index ID for someone else should be settable
|
|
|
|
s.SetIndexID(remoteDevice0, 42)
|
|
|
|
id = s.IndexID(remoteDevice0)
|
|
|
|
if id != 42 {
|
|
|
|
t.Errorf("index ID for remote device should be remembered; got %d, expected %d", id, 42)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Our own index ID should be generated randomly.
|
|
|
|
id = s.IndexID(protocol.LocalDeviceID)
|
|
|
|
if id == 0 {
|
|
|
|
t.Errorf("index ID for local device should be random, not zero")
|
|
|
|
}
|
|
|
|
t.Logf("random index ID is 0x%016x", id)
|
|
|
|
|
|
|
|
// But of course always the same after that.
|
|
|
|
again := s.IndexID(protocol.LocalDeviceID)
|
|
|
|
if again != id {
|
|
|
|
t.Errorf("index ID changed; %d != %d", again, id)
|
|
|
|
}
|
|
|
|
}
|
2017-11-12 20:20:34 +00:00
|
|
|
|
|
|
|
func TestDropFiles(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2017-11-12 20:20:34 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
m := newFileSet(t, "test", ldb)
|
2017-11-12 20:20:34 +00:00
|
|
|
|
|
|
|
local0 := fileList{
|
|
|
|
protocol.FileInfo{Name: "a", Sequence: 1, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
|
|
|
|
protocol.FileInfo{Name: "b", Sequence: 2, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(2)},
|
|
|
|
protocol.FileInfo{Name: "c", Sequence: 3, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(3)},
|
|
|
|
protocol.FileInfo{Name: "d", Sequence: 4, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(4)},
|
|
|
|
protocol.FileInfo{Name: "z", Sequence: 5, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(8)},
|
|
|
|
}
|
|
|
|
|
|
|
|
remote0 := fileList{
|
|
|
|
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
|
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(2)},
|
|
|
|
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(5)},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert files
|
|
|
|
|
|
|
|
m.Update(protocol.LocalDeviceID, local0)
|
|
|
|
m.Update(remoteDevice0, remote0)
|
|
|
|
|
|
|
|
// Check that they're there
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
h := haveList(t, m, protocol.LocalDeviceID)
|
2017-11-12 20:20:34 +00:00
|
|
|
if len(h) != len(local0) {
|
|
|
|
t.Errorf("Incorrect number of files after update, %d != %d", len(h), len(local0))
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
h = haveList(t, m, remoteDevice0)
|
2017-11-12 20:20:34 +00:00
|
|
|
if len(h) != len(remote0) {
|
|
|
|
t.Errorf("Incorrect number of files after update, %d != %d", len(h), len(local0))
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
g := globalList(t, m)
|
2017-11-12 20:20:34 +00:00
|
|
|
if len(g) != len(local0) {
|
|
|
|
// local0 covers all files
|
|
|
|
t.Errorf("Incorrect global files after update, %d != %d", len(g), len(local0))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Drop the local files and recheck
|
|
|
|
|
|
|
|
m.Drop(protocol.LocalDeviceID)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
h = haveList(t, m, protocol.LocalDeviceID)
|
2017-11-12 20:20:34 +00:00
|
|
|
if len(h) != 0 {
|
|
|
|
t.Errorf("Incorrect number of files after drop, %d != %d", len(h), 0)
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
h = haveList(t, m, remoteDevice0)
|
2017-11-12 20:20:34 +00:00
|
|
|
if len(h) != len(remote0) {
|
|
|
|
t.Errorf("Incorrect number of files after update, %d != %d", len(h), len(local0))
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
g = globalList(t, m)
|
2017-11-12 20:20:34 +00:00
|
|
|
if len(g) != len(remote0) {
|
|
|
|
// the ones in remote0 remain
|
|
|
|
t.Errorf("Incorrect global files after update, %d != %d", len(g), len(remote0))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-27 09:09:13 +00:00
|
|
|
func TestIssue4701(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2018-01-27 09:09:13 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2018-01-27 09:09:13 +00:00
|
|
|
|
|
|
|
localHave := fileList{
|
|
|
|
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
2018-06-24 07:50:18 +00:00
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, LocalFlags: protocol.FlagLocalIgnored},
|
2018-01-27 09:09:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
s.Update(protocol.LocalDeviceID, localHave)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if c := localSize(t, s); c.Files != 1 {
|
2018-01-27 09:09:13 +00:00
|
|
|
t.Errorf("Expected 1 local file, got %v", c.Files)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if c := globalSize(t, s); c.Files != 1 {
|
2018-01-27 09:09:13 +00:00
|
|
|
t.Errorf("Expected 1 global file, got %v", c.Files)
|
|
|
|
}
|
|
|
|
|
2018-06-24 07:50:18 +00:00
|
|
|
localHave[1].LocalFlags = 0
|
2018-01-27 09:09:13 +00:00
|
|
|
s.Update(protocol.LocalDeviceID, localHave)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if c := localSize(t, s); c.Files != 2 {
|
2018-01-27 09:09:13 +00:00
|
|
|
t.Errorf("Expected 2 local files, got %v", c.Files)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if c := globalSize(t, s); c.Files != 2 {
|
2018-01-27 09:09:13 +00:00
|
|
|
t.Errorf("Expected 2 global files, got %v", c.Files)
|
|
|
|
}
|
|
|
|
|
2018-06-24 07:50:18 +00:00
|
|
|
localHave[0].LocalFlags = protocol.FlagLocalIgnored
|
|
|
|
localHave[1].LocalFlags = protocol.FlagLocalIgnored
|
2018-01-27 09:09:13 +00:00
|
|
|
s.Update(protocol.LocalDeviceID, localHave)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if c := localSize(t, s); c.Files != 0 {
|
2018-01-27 09:09:13 +00:00
|
|
|
t.Errorf("Expected 0 local files, got %v", c.Files)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if c := globalSize(t, s); c.Files != 0 {
|
2018-01-27 09:09:13 +00:00
|
|
|
t.Errorf("Expected 0 global files, got %v", c.Files)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-01 21:39:15 +00:00
|
|
|
func TestWithHaveSequence(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2018-05-01 21:39:15 +00:00
|
|
|
|
2018-06-24 07:50:18 +00:00
|
|
|
folder := "test"
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, folder, ldb)
|
2018-05-01 21:39:15 +00:00
|
|
|
|
|
|
|
// The files must not be in alphabetical order
|
|
|
|
localHave := fileList{
|
2018-06-24 07:50:18 +00:00
|
|
|
protocol.FileInfo{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, RawInvalid: true},
|
2018-05-01 21:39:15 +00:00
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)},
|
|
|
|
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Blocks: genBlocks(7)},
|
|
|
|
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
|
2018-06-24 07:50:18 +00:00
|
|
|
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(5), RawInvalid: true},
|
2018-05-01 21:39:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
replace(s, protocol.LocalDeviceID, localHave)
|
|
|
|
|
|
|
|
i := 2
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := snapshot(t, s)
|
2020-01-21 17:23:08 +00:00
|
|
|
defer snap.Release()
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
snap.WithHaveSequence(int64(i), func(fi protocol.FileInfo) bool {
|
|
|
|
if !fi.IsEquivalent(localHave[i-1], 0) {
|
|
|
|
t.Fatalf("Got %v\nExpected %v", fi, localHave[i-1])
|
2018-05-01 21:39:15 +00:00
|
|
|
}
|
|
|
|
i++
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-09-02 18:58:32 +00:00
|
|
|
func TestStressWithHaveSequence(t *testing.T) {
|
2022-08-23 13:44:11 +00:00
|
|
|
// This races two loops against each other: one that continuously does
|
2019-11-29 08:11:52 +00:00
|
|
|
// updates, and one that continuously does sequence walks. The test fails
|
2018-09-02 18:58:32 +00:00
|
|
|
// if the sequence walker sees a discontinuity.
|
|
|
|
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("Takes a long time")
|
|
|
|
}
|
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2018-09-02 18:58:32 +00:00
|
|
|
|
|
|
|
folder := "test"
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, folder, ldb)
|
2018-09-02 18:58:32 +00:00
|
|
|
|
|
|
|
var localHave []protocol.FileInfo
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
localHave = append(localHave, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Blocks: genBlocks(i * 10)})
|
|
|
|
}
|
|
|
|
|
|
|
|
done := make(chan struct{})
|
|
|
|
t0 := time.Now()
|
|
|
|
go func() {
|
|
|
|
for time.Since(t0) < 10*time.Second {
|
|
|
|
for j, f := range localHave {
|
|
|
|
localHave[j].Version = f.Version.Update(42)
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Update(protocol.LocalDeviceID, localHave)
|
|
|
|
}
|
|
|
|
close(done)
|
|
|
|
}()
|
|
|
|
|
2019-11-29 08:11:52 +00:00
|
|
|
var prevSeq int64
|
2018-09-02 18:58:32 +00:00
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
break loop
|
|
|
|
default:
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := snapshot(t, s)
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
snap.WithHaveSequence(prevSeq+1, func(fi protocol.FileInfo) bool {
|
2018-09-02 18:58:32 +00:00
|
|
|
if fi.SequenceNo() < prevSeq+1 {
|
|
|
|
t.Fatal("Skipped ", prevSeq+1, fi.SequenceNo())
|
|
|
|
}
|
|
|
|
prevSeq = fi.SequenceNo()
|
|
|
|
return true
|
|
|
|
})
|
2020-01-21 17:23:08 +00:00
|
|
|
snap.Release()
|
2018-09-02 18:58:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-17 07:26:40 +00:00
|
|
|
func TestIssue4925(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2018-05-17 07:26:40 +00:00
|
|
|
|
2018-07-12 08:15:57 +00:00
|
|
|
folder := "test"
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, folder, ldb)
|
2018-05-17 07:26:40 +00:00
|
|
|
|
|
|
|
localHave := fileList{
|
|
|
|
protocol.FileInfo{Name: "dir"},
|
|
|
|
protocol.FileInfo{Name: "dir.file"},
|
|
|
|
protocol.FileInfo{Name: "dir/file"},
|
|
|
|
}
|
|
|
|
|
|
|
|
replace(s, protocol.LocalDeviceID, localHave)
|
|
|
|
|
|
|
|
for _, prefix := range []string{"dir", "dir/"} {
|
2021-03-07 12:43:22 +00:00
|
|
|
pl := haveListPrefixed(t, s, protocol.LocalDeviceID, prefix)
|
2018-05-17 07:26:40 +00:00
|
|
|
if l := len(pl); l != 2 {
|
|
|
|
t.Errorf("Expected 2, got %v local items below %v", l, prefix)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
pl = globalListPrefixed(t, s, prefix)
|
2018-05-17 07:26:40 +00:00
|
|
|
if l := len(pl); l != 2 {
|
|
|
|
t.Errorf("Expected 2, got %v global items below %v", l, prefix)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-24 16:17:45 +00:00
|
|
|
func TestMoveGlobalBack(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2018-05-24 16:17:45 +00:00
|
|
|
|
|
|
|
folder := "test"
|
|
|
|
file := "foo"
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, folder, ldb)
|
2018-05-24 16:17:45 +00:00
|
|
|
|
|
|
|
localHave := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}, Blocks: genBlocks(1), ModifiedS: 10, Size: 1}}
|
|
|
|
remote0Have := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}, {ID: remoteDevice0.Short(), Value: 1}}}, Blocks: genBlocks(2), ModifiedS: 0, Size: 2}}
|
|
|
|
|
|
|
|
s.Update(protocol.LocalDeviceID, localHave)
|
|
|
|
s.Update(remoteDevice0, remote0Have)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 1 {
|
2018-05-24 16:17:45 +00:00
|
|
|
t.Error("Expected 1 local need, got", need)
|
2019-07-23 19:48:53 +00:00
|
|
|
} else if !need[0].IsEquivalent(remote0Have[0], 0) {
|
2018-05-24 16:17:45 +00:00
|
|
|
t.Errorf("Local need incorrect;\n A: %v !=\n E: %v", need[0], remote0Have[0])
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, protocol.LocalDeviceID, remote0Have[:1])
|
2018-05-24 16:17:45 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if need := needList(t, s, remoteDevice0); len(need) != 0 {
|
2018-05-24 16:17:45 +00:00
|
|
|
t.Error("Expected no need for remote 0, got", need)
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, remoteDevice0, nil)
|
2018-05-24 16:17:45 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
ls := localSize(t, s)
|
2018-05-24 16:17:45 +00:00
|
|
|
if haveBytes := localHave[0].Size; ls.Bytes != haveBytes {
|
|
|
|
t.Errorf("Incorrect LocalSize bytes; %d != %d", ls.Bytes, haveBytes)
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
gs := globalSize(t, s)
|
2018-05-24 16:17:45 +00:00
|
|
|
if globalBytes := remote0Have[0].Size; gs.Bytes != globalBytes {
|
|
|
|
t.Errorf("Incorrect GlobalSize bytes; %d != %d", gs.Bytes, globalBytes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// That's what happens when something becomes unignored or something.
|
|
|
|
// In any case it will be moved back from first spot in the global list
|
|
|
|
// which is the scenario to be tested here.
|
|
|
|
remote0Have[0].Version = remote0Have[0].Version.Update(remoteDevice0.Short()).DropOthers(remoteDevice0.Short())
|
|
|
|
s.Update(remoteDevice0, remote0Have)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if need := needList(t, s, remoteDevice0); len(need) != 1 {
|
2018-05-24 16:17:45 +00:00
|
|
|
t.Error("Expected 1 need for remote 0, got", need)
|
2019-07-23 19:48:53 +00:00
|
|
|
} else if !need[0].IsEquivalent(localHave[0], 0) {
|
2018-05-24 16:17:45 +00:00
|
|
|
t.Errorf("Need for remote 0 incorrect;\n A: %v !=\n E: %v", need[0], localHave[0])
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, remoteDevice0, localHave[:1])
|
2018-05-24 16:17:45 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 0 {
|
2018-05-24 16:17:45 +00:00
|
|
|
t.Error("Expected no local need, got", need)
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, protocol.LocalDeviceID, nil)
|
2018-05-24 16:17:45 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
ls = localSize(t, s)
|
2018-05-24 16:17:45 +00:00
|
|
|
if haveBytes := localHave[0].Size; ls.Bytes != haveBytes {
|
|
|
|
t.Errorf("Incorrect LocalSize bytes; %d != %d", ls.Bytes, haveBytes)
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
gs = globalSize(t, s)
|
2018-05-24 16:17:45 +00:00
|
|
|
if globalBytes := localHave[0].Size; gs.Bytes != globalBytes {
|
|
|
|
t.Errorf("Incorrect GlobalSize bytes; %d != %d", gs.Bytes, globalBytes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-18 06:23:40 +00:00
|
|
|
// TestIssue5007 checks, that updating the local device with an invalid file
|
|
|
|
// info with the newest version does indeed remove that file from the list of
|
|
|
|
// needed files.
|
|
|
|
// https://github.com/syncthing/syncthing/issues/5007
|
|
|
|
func TestIssue5007(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2018-06-18 06:23:40 +00:00
|
|
|
|
|
|
|
folder := "test"
|
|
|
|
file := "foo"
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, folder, ldb)
|
2018-06-18 06:23:40 +00:00
|
|
|
|
|
|
|
fs := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}}}
|
|
|
|
|
|
|
|
s.Update(remoteDevice0, fs)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 1 {
|
2018-06-18 06:23:40 +00:00
|
|
|
t.Fatal("Expected 1 local need, got", need)
|
2019-07-23 19:48:53 +00:00
|
|
|
} else if !need[0].IsEquivalent(fs[0], 0) {
|
2018-06-18 06:23:40 +00:00
|
|
|
t.Fatalf("Local need incorrect;\n A: %v !=\n E: %v", need[0], fs[0])
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, protocol.LocalDeviceID, fs[:1])
|
2018-06-18 06:23:40 +00:00
|
|
|
|
2018-06-24 07:50:18 +00:00
|
|
|
fs[0].LocalFlags = protocol.FlagLocalIgnored
|
2018-06-18 06:23:40 +00:00
|
|
|
s.Update(protocol.LocalDeviceID, fs)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 0 {
|
2018-06-18 06:23:40 +00:00
|
|
|
t.Fatal("Expected no local need, got", need)
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, protocol.LocalDeviceID, nil)
|
2018-06-18 06:23:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestNeedDeleted checks that a file that doesn't exist locally isn't needed
|
|
|
|
// when the global file is deleted.
|
|
|
|
func TestNeedDeleted(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2018-06-18 06:23:40 +00:00
|
|
|
|
|
|
|
folder := "test"
|
|
|
|
file := "foo"
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, folder, ldb)
|
2018-06-18 06:23:40 +00:00
|
|
|
|
|
|
|
fs := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}, Deleted: true}}
|
|
|
|
|
|
|
|
s.Update(remoteDevice0, fs)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 0 {
|
2018-06-18 06:23:40 +00:00
|
|
|
t.Fatal("Expected no local need, got", need)
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, protocol.LocalDeviceID, nil)
|
2018-06-18 06:23:40 +00:00
|
|
|
|
|
|
|
fs[0].Deleted = false
|
|
|
|
fs[0].Version = fs[0].Version.Update(remoteDevice0.Short())
|
|
|
|
s.Update(remoteDevice0, fs)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 1 {
|
2018-06-18 06:23:40 +00:00
|
|
|
t.Fatal("Expected 1 local need, got", need)
|
2019-07-23 19:48:53 +00:00
|
|
|
} else if !need[0].IsEquivalent(fs[0], 0) {
|
2018-06-18 06:23:40 +00:00
|
|
|
t.Fatalf("Local need incorrect;\n A: %v !=\n E: %v", need[0], fs[0])
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, protocol.LocalDeviceID, fs[:1])
|
2018-06-18 06:23:40 +00:00
|
|
|
|
|
|
|
fs[0].Deleted = true
|
|
|
|
fs[0].Version = fs[0].Version.Update(remoteDevice0.Short())
|
|
|
|
s.Update(remoteDevice0, fs)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 0 {
|
2018-06-18 06:23:40 +00:00
|
|
|
t.Fatal("Expected no local need, got", need)
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, protocol.LocalDeviceID, nil)
|
2018-06-18 06:23:40 +00:00
|
|
|
}
|
|
|
|
|
2018-07-12 08:15:57 +00:00
|
|
|
func TestReceiveOnlyAccounting(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2018-07-12 08:15:57 +00:00
|
|
|
|
|
|
|
folder := "test"
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, folder, ldb)
|
2018-07-12 08:15:57 +00:00
|
|
|
|
|
|
|
local := protocol.DeviceID{1}
|
|
|
|
remote := protocol.DeviceID{2}
|
|
|
|
|
|
|
|
// Three files that have been created by the remote device
|
|
|
|
|
|
|
|
version := protocol.Vector{Counters: []protocol.Counter{{ID: remote.Short(), Value: 1}}}
|
|
|
|
files := fileList{
|
|
|
|
protocol.FileInfo{Name: "f1", Size: 10, Sequence: 1, Version: version},
|
|
|
|
protocol.FileInfo{Name: "f2", Size: 10, Sequence: 1, Version: version},
|
|
|
|
protocol.FileInfo{Name: "f3", Size: 10, Sequence: 1, Version: version},
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have synced them locally
|
|
|
|
|
|
|
|
replace(s, protocol.LocalDeviceID, files)
|
|
|
|
replace(s, remote, files)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := localSize(t, s).Files; n != 3 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 3 local files initially, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := localSize(t, s).Bytes; n != 30 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 30 local bytes initially, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := globalSize(t, s).Files; n != 3 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 3 global files initially, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := globalSize(t, s).Bytes; n != 30 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 30 global bytes initially, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := receiveOnlyChangedSize(t, s).Files; n != 0 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 0 receive only changed files initially, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := receiveOnlyChangedSize(t, s).Bytes; n != 0 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 0 receive only changed bytes initially, not", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Detected a local change in a receive only folder
|
|
|
|
|
|
|
|
changed := files[0]
|
|
|
|
changed.Version = changed.Version.Update(local.Short())
|
|
|
|
changed.Size = 100
|
|
|
|
changed.ModifiedBy = local.Short()
|
|
|
|
changed.LocalFlags = protocol.FlagLocalReceiveOnly
|
|
|
|
s.Update(protocol.LocalDeviceID, []protocol.FileInfo{changed})
|
|
|
|
|
|
|
|
// Check that we see the files
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := localSize(t, s).Files; n != 3 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 3 local files after local change, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := localSize(t, s).Bytes; n != 120 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 120 local bytes after local change, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := globalSize(t, s).Files; n != 3 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 3 global files after local change, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := globalSize(t, s).Bytes; n != 30 {
|
2018-10-10 10:43:07 +00:00
|
|
|
t.Fatal("expected 30 global files after local change, not", n)
|
2018-07-12 08:15:57 +00:00
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := receiveOnlyChangedSize(t, s).Files; n != 1 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 1 receive only changed file after local change, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := receiveOnlyChangedSize(t, s).Bytes; n != 100 {
|
2022-08-23 13:44:11 +00:00
|
|
|
t.Fatal("expected 100 receive only changed bytes after local change, not", n)
|
2018-07-12 08:15:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fake a revert. That's a two step process, first converting our
|
|
|
|
// changed file into a less preferred variant, then pulling down the old
|
|
|
|
// version.
|
|
|
|
|
|
|
|
changed.Version = protocol.Vector{}
|
|
|
|
changed.LocalFlags &^= protocol.FlagLocalReceiveOnly
|
|
|
|
s.Update(protocol.LocalDeviceID, []protocol.FileInfo{changed})
|
|
|
|
|
|
|
|
s.Update(protocol.LocalDeviceID, []protocol.FileInfo{files[0]})
|
|
|
|
|
|
|
|
// Check that we see the files, same data as initially
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := localSize(t, s).Files; n != 3 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 3 local files after revert, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := localSize(t, s).Bytes; n != 30 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 30 local bytes after revert, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := globalSize(t, s).Files; n != 3 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 3 global files after revert, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := globalSize(t, s).Bytes; n != 30 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 30 global bytes after revert, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := receiveOnlyChangedSize(t, s).Files; n != 0 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 0 receive only changed files after revert, not", n)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if n := receiveOnlyChangedSize(t, s).Bytes; n != 0 {
|
2018-07-12 08:15:57 +00:00
|
|
|
t.Fatal("expected 0 receive only changed bytes after revert, not", n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-10 15:32:34 +00:00
|
|
|
func TestNeedAfterUnignore(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2018-07-10 15:32:34 +00:00
|
|
|
|
|
|
|
folder := "test"
|
|
|
|
file := "foo"
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, folder, ldb)
|
2018-07-10 15:32:34 +00:00
|
|
|
|
|
|
|
remID := remoteDevice0.Short()
|
|
|
|
|
|
|
|
// Initial state: Devices in sync, locally ignored
|
|
|
|
local := protocol.FileInfo{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: remID, Value: 1}, {ID: myID, Value: 1}}}, ModifiedS: 10}
|
2021-02-08 14:30:39 +00:00
|
|
|
local.SetIgnored()
|
2018-07-10 15:32:34 +00:00
|
|
|
remote := protocol.FileInfo{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: remID, Value: 1}, {ID: myID, Value: 1}}}, ModifiedS: 10}
|
|
|
|
s.Update(protocol.LocalDeviceID, fileList{local})
|
|
|
|
s.Update(remoteDevice0, fileList{remote})
|
|
|
|
|
|
|
|
// Unignore locally -> conflicting changes. Remote is newer, thus winning.
|
|
|
|
local.Version = local.Version.Update(myID)
|
|
|
|
local.Version = local.Version.DropOthers(myID)
|
|
|
|
local.LocalFlags = 0
|
|
|
|
local.ModifiedS = 0
|
|
|
|
s.Update(protocol.LocalDeviceID, fileList{local})
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 1 {
|
2018-07-10 15:32:34 +00:00
|
|
|
t.Fatal("Expected one local need, got", need)
|
2019-07-23 19:48:53 +00:00
|
|
|
} else if !need[0].IsEquivalent(remote, 0) {
|
2018-07-10 15:32:34 +00:00
|
|
|
t.Fatalf("Got %v, expected %v", need[0], remote)
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, protocol.LocalDeviceID, []protocol.FileInfo{remote})
|
2018-07-10 15:32:34 +00:00
|
|
|
}
|
|
|
|
|
2018-07-31 11:00:03 +00:00
|
|
|
func TestRemoteInvalidNotAccounted(t *testing.T) {
|
|
|
|
// Remote files with the invalid bit should not count.
|
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2018-07-31 11:00:03 +00:00
|
|
|
|
|
|
|
files := []protocol.FileInfo{
|
|
|
|
{Name: "a", Size: 1234, Sequence: 42, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}}, // valid, should count
|
|
|
|
{Name: "b", Size: 1234, Sequence: 43, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, RawInvalid: true}, // invalid, doesn't count
|
|
|
|
}
|
|
|
|
s.Update(remoteDevice0, files)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
global := globalSize(t, s)
|
2018-07-31 11:00:03 +00:00
|
|
|
if global.Files != 1 {
|
|
|
|
t.Error("Expected one file in global size, not", global.Files)
|
|
|
|
}
|
|
|
|
if global.Bytes != 1234 {
|
|
|
|
t.Error("Expected 1234 bytes in global size, not", global.Bytes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-10 10:43:07 +00:00
|
|
|
func TestNeedWithNewerInvalid(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2018-10-10 10:43:07 +00:00
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "default", ldb)
|
2018-10-10 10:43:07 +00:00
|
|
|
|
|
|
|
rem0ID := remoteDevice0.Short()
|
|
|
|
rem1ID := remoteDevice1.Short()
|
|
|
|
|
|
|
|
// Initial state: file present on rem0 and rem1, but not locally.
|
|
|
|
file := protocol.FileInfo{Name: "foo"}
|
|
|
|
file.Version = file.Version.Update(rem0ID)
|
|
|
|
s.Update(remoteDevice0, fileList{file})
|
|
|
|
s.Update(remoteDevice1, fileList{file})
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
need := needList(t, s, protocol.LocalDeviceID)
|
2018-10-10 10:43:07 +00:00
|
|
|
if len(need) != 1 {
|
|
|
|
t.Fatal("Locally missing file should be needed")
|
|
|
|
}
|
2019-07-23 19:48:53 +00:00
|
|
|
if !need[0].IsEquivalent(file, 0) {
|
2018-10-10 10:43:07 +00:00
|
|
|
t.Fatalf("Got needed file %v, expected %v", need[0], file)
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, protocol.LocalDeviceID, []protocol.FileInfo{file})
|
2018-10-10 10:43:07 +00:00
|
|
|
|
|
|
|
// rem1 sends an invalid file with increased version
|
|
|
|
inv := file
|
|
|
|
inv.Version = inv.Version.Update(rem1ID)
|
|
|
|
inv.RawInvalid = true
|
|
|
|
s.Update(remoteDevice1, fileList{inv})
|
|
|
|
|
|
|
|
// We still have an old file, we need the newest valid file
|
2021-03-07 12:43:22 +00:00
|
|
|
need = needList(t, s, protocol.LocalDeviceID)
|
2018-10-10 10:43:07 +00:00
|
|
|
if len(need) != 1 {
|
|
|
|
t.Fatal("Locally missing file should be needed regardless of invalid files")
|
|
|
|
}
|
2019-07-23 19:48:53 +00:00
|
|
|
if !need[0].IsEquivalent(file, 0) {
|
2018-10-10 10:43:07 +00:00
|
|
|
t.Fatalf("Got needed file %v, expected %v", need[0], file)
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, protocol.LocalDeviceID, []protocol.FileInfo{file})
|
2018-10-10 10:43:07 +00:00
|
|
|
}
|
|
|
|
|
2018-10-30 04:40:51 +00:00
|
|
|
func TestNeedAfterDeviceRemove(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2018-10-30 04:40:51 +00:00
|
|
|
|
|
|
|
file := "foo"
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2018-10-30 04:40:51 +00:00
|
|
|
|
|
|
|
fs := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}}}
|
|
|
|
|
|
|
|
s.Update(protocol.LocalDeviceID, fs)
|
|
|
|
|
|
|
|
fs[0].Version = fs[0].Version.Update(myID)
|
|
|
|
|
|
|
|
s.Update(remoteDevice0, fs)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 1 {
|
2018-10-30 04:40:51 +00:00
|
|
|
t.Fatal("Expected one local need, got", need)
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Drop(remoteDevice0)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 0 {
|
2018-10-30 04:40:51 +00:00
|
|
|
t.Fatal("Expected no local need, got", need)
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
checkNeed(t, s, protocol.LocalDeviceID, nil)
|
2018-10-30 04:40:51 +00:00
|
|
|
}
|
|
|
|
|
2018-11-26 16:58:00 +00:00
|
|
|
func TestCaseSensitive(t *testing.T) {
|
|
|
|
// Normal case sensitive lookup should work
|
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2018-11-26 16:58:00 +00:00
|
|
|
|
|
|
|
local := []protocol.FileInfo{
|
2018-12-05 07:24:14 +00:00
|
|
|
{Name: filepath.FromSlash("D1/f1"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: filepath.FromSlash("F1"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: filepath.FromSlash("d1/F1"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: filepath.FromSlash("d1/f1"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: filepath.FromSlash("f1"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
2018-11-26 16:58:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
replace(s, protocol.LocalDeviceID, local)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
gf := globalList(t, s)
|
2018-11-26 16:58:00 +00:00
|
|
|
if l := len(gf); l != len(local) {
|
|
|
|
t.Fatalf("Incorrect len %d != %d for global list", l, len(local))
|
|
|
|
}
|
|
|
|
for i := range local {
|
|
|
|
if gf[i].Name != local[i].Name {
|
|
|
|
t.Errorf("Incorrect filename;\n%q !=\n%q",
|
|
|
|
gf[i].Name, local[i].Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-18 10:34:18 +00:00
|
|
|
func TestSequenceIndex(t *testing.T) {
|
|
|
|
// This test attempts to verify correct operation of the sequence index.
|
|
|
|
|
|
|
|
// It's a stress test and needs to run for a long time, but we don't
|
|
|
|
// really have time for that in normal builds.
|
|
|
|
runtime := time.Minute
|
|
|
|
if testing.Short() {
|
|
|
|
runtime = time.Second
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set up a db and a few files that we will manipulate.
|
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2019-01-18 10:34:18 +00:00
|
|
|
|
|
|
|
local := []protocol.FileInfo{
|
|
|
|
{Name: filepath.FromSlash("banana"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: filepath.FromSlash("pineapple"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: filepath.FromSlash("orange"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: filepath.FromSlash("apple"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
{Name: filepath.FromSlash("jackfruit"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start a background routine that makes updates to these files as fast
|
|
|
|
// as it can. We always update the same files in the same order.
|
|
|
|
|
|
|
|
done := make(chan struct{})
|
|
|
|
defer close(done)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range local {
|
|
|
|
local[i].Version = local[i].Version.Update(42)
|
|
|
|
}
|
|
|
|
s.Update(protocol.LocalDeviceID, local)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Start a routine to walk the sequence index and inspect the result.
|
|
|
|
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
seen := make(map[string]protocol.FileInfo)
|
|
|
|
latest := make([]protocol.FileInfo, 0, len(local))
|
2019-01-18 10:34:18 +00:00
|
|
|
var seq int64
|
|
|
|
t0 := time.Now()
|
|
|
|
|
|
|
|
for time.Since(t0) < runtime {
|
|
|
|
// Walk the changes since our last iteration. This should give is
|
|
|
|
// one instance each of the files that are changed all the time, or
|
|
|
|
// a subset of those files if we manage to run before a complete
|
|
|
|
// update has happened since our last iteration.
|
|
|
|
latest = latest[:0]
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := snapshot(t, s)
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
snap.WithHaveSequence(seq+1, func(f protocol.FileInfo) bool {
|
2019-01-18 10:34:18 +00:00
|
|
|
seen[f.FileName()] = f
|
|
|
|
latest = append(latest, f)
|
|
|
|
seq = f.SequenceNo()
|
|
|
|
return true
|
|
|
|
})
|
2020-01-21 17:23:08 +00:00
|
|
|
snap.Release()
|
2019-01-18 10:34:18 +00:00
|
|
|
|
|
|
|
// Calculate the spread in sequence number.
|
|
|
|
var max, min int64
|
|
|
|
for _, v := range seen {
|
|
|
|
s := v.SequenceNo()
|
|
|
|
if max == 0 || max < s {
|
|
|
|
max = s
|
|
|
|
}
|
|
|
|
if min == 0 || min > s {
|
|
|
|
min = s
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We shouldn't see a spread larger than the number of files, as
|
|
|
|
// that would mean we have missed updates. For example, if we were
|
|
|
|
// to see the following:
|
|
|
|
//
|
|
|
|
// banana N
|
|
|
|
// pineapple N+1
|
|
|
|
// orange N+2
|
|
|
|
// apple N+10
|
|
|
|
// jackfruit N+11
|
|
|
|
//
|
|
|
|
// that would mean that there have been updates to banana, pineapple
|
|
|
|
// and orange that we didn't see in this pass. If those files aren't
|
|
|
|
// updated again, those updates are permanently lost.
|
|
|
|
if max-min > int64(len(local)) {
|
|
|
|
for _, v := range seen {
|
|
|
|
t.Log("seen", v.FileName(), v.SequenceNo())
|
|
|
|
}
|
|
|
|
for _, v := range latest {
|
|
|
|
t.Log("latest", v.FileName(), v.SequenceNo())
|
|
|
|
}
|
|
|
|
t.Fatal("large spread")
|
|
|
|
}
|
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-12 06:47:39 +00:00
|
|
|
func TestIgnoreAfterReceiveOnly(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-02-11 13:31:43 +00:00
|
|
|
defer ldb.Close()
|
2019-09-12 06:47:39 +00:00
|
|
|
|
|
|
|
file := "foo"
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2019-09-12 06:47:39 +00:00
|
|
|
|
|
|
|
fs := fileList{{
|
|
|
|
Name: file,
|
|
|
|
Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}},
|
|
|
|
LocalFlags: protocol.FlagLocalReceiveOnly,
|
|
|
|
}}
|
|
|
|
|
|
|
|
s.Update(protocol.LocalDeviceID, fs)
|
|
|
|
|
|
|
|
fs[0].LocalFlags = protocol.FlagLocalIgnored
|
|
|
|
|
|
|
|
s.Update(protocol.LocalDeviceID, fs)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := snapshot(t, s)
|
2020-01-21 17:23:08 +00:00
|
|
|
defer snap.Release()
|
|
|
|
if f, ok := snap.Get(protocol.LocalDeviceID, file); !ok {
|
2019-09-12 06:47:39 +00:00
|
|
|
t.Error("File missing in db")
|
|
|
|
} else if f.IsReceiveOnlyChanged() {
|
|
|
|
t.Error("File is still receive-only changed")
|
|
|
|
} else if !f.IsIgnored() {
|
|
|
|
t.Error("File is not ignored")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-16 12:34:53 +00:00
|
|
|
// https://github.com/syncthing/syncthing/issues/6650
|
|
|
|
func TestUpdateWithOneFileTwice(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-05-16 12:34:53 +00:00
|
|
|
defer ldb.Close()
|
|
|
|
|
|
|
|
file := "foo"
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2020-05-16 12:34:53 +00:00
|
|
|
|
|
|
|
fs := fileList{{
|
|
|
|
Name: file,
|
|
|
|
Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}},
|
|
|
|
Sequence: 1,
|
|
|
|
}}
|
|
|
|
|
|
|
|
s.Update(protocol.LocalDeviceID, fs)
|
|
|
|
|
|
|
|
fs = append(fs, fs[0])
|
|
|
|
for i := range fs {
|
|
|
|
fs[i].Sequence++
|
|
|
|
fs[i].Version = fs[i].Version.Update(myID)
|
|
|
|
}
|
|
|
|
fs[1].Sequence++
|
|
|
|
fs[1].Version = fs[1].Version.Update(myID)
|
|
|
|
|
|
|
|
s.Update(protocol.LocalDeviceID, fs)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := snapshot(t, s)
|
2020-05-16 12:34:53 +00:00
|
|
|
defer snap.Release()
|
|
|
|
count := 0
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
snap.WithHaveSequence(0, func(_ protocol.FileInfo) bool {
|
2020-05-16 12:34:53 +00:00
|
|
|
count++
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
if count != 1 {
|
|
|
|
t.Error("Expected to have one file, got", count)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-20 09:01:27 +00:00
|
|
|
// https://github.com/syncthing/syncthing/issues/6668
|
|
|
|
func TestNeedRemoteOnly(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-05-20 09:01:27 +00:00
|
|
|
defer ldb.Close()
|
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2020-05-20 09:01:27 +00:00
|
|
|
|
|
|
|
remote0Have := fileList{
|
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)},
|
|
|
|
}
|
|
|
|
s.Update(remoteDevice0, remote0Have)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
need := needSize(t, s, remoteDevice0)
|
2020-05-20 09:01:27 +00:00
|
|
|
if !need.Equal(db.Counts{}) {
|
|
|
|
t.Error("Expected nothing needed, got", need)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-23 05:29:27 +00:00
|
|
|
// https://github.com/syncthing/syncthing/issues/6784
|
|
|
|
func TestNeedRemoteAfterReset(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-06-23 05:29:27 +00:00
|
|
|
defer ldb.Close()
|
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2020-06-23 05:29:27 +00:00
|
|
|
|
|
|
|
files := fileList{
|
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)},
|
|
|
|
}
|
|
|
|
s.Update(protocol.LocalDeviceID, files)
|
|
|
|
s.Update(remoteDevice0, files)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
need := needSize(t, s, remoteDevice0)
|
2020-06-23 05:29:27 +00:00
|
|
|
if !need.Equal(db.Counts{}) {
|
|
|
|
t.Error("Expected nothing needed, got", need)
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Drop(remoteDevice0)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
need = needSize(t, s, remoteDevice0)
|
2020-06-23 05:29:27 +00:00
|
|
|
if exp := (db.Counts{Files: 1}); !need.Equal(exp) {
|
|
|
|
t.Errorf("Expected %v, got %v", exp, need)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-24 10:36:16 +00:00
|
|
|
// https://github.com/syncthing/syncthing/issues/6850
|
|
|
|
func TestIgnoreLocalChanged(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-07-24 10:36:16 +00:00
|
|
|
defer ldb.Close()
|
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2020-07-24 10:36:16 +00:00
|
|
|
|
|
|
|
// Add locally changed file
|
|
|
|
files := fileList{
|
|
|
|
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2), LocalFlags: protocol.FlagLocalReceiveOnly},
|
|
|
|
}
|
|
|
|
s.Update(protocol.LocalDeviceID, files)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if c := globalSize(t, s).Files; c != 0 {
|
2020-07-30 11:49:14 +00:00
|
|
|
t.Error("Expected no global file, got", c)
|
2020-07-24 10:36:16 +00:00
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if c := localSize(t, s).Files; c != 1 {
|
2020-07-24 10:36:16 +00:00
|
|
|
t.Error("Expected one local file, got", c)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change file to ignored
|
|
|
|
files[0].LocalFlags = protocol.FlagLocalIgnored
|
|
|
|
s.Update(protocol.LocalDeviceID, files)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if c := globalSize(t, s).Files; c != 0 {
|
2020-07-24 10:36:16 +00:00
|
|
|
t.Error("Expected no global file, got", c)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if c := localSize(t, s).Files; c != 0 {
|
2020-07-24 10:36:16 +00:00
|
|
|
t.Error("Expected no local file, got", c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-25 21:54:05 +00:00
|
|
|
// Dropping the index ID on Drop is bad, because Drop gets called when receiving
|
|
|
|
// an Index (as opposed to an IndexUpdate), and we don't want to loose the index
|
|
|
|
// ID when that happens.
|
|
|
|
func TestNoIndexIDResetOnDrop(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2020-11-25 21:54:05 +00:00
|
|
|
defer ldb.Close()
|
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2020-11-25 21:54:05 +00:00
|
|
|
|
|
|
|
s.SetIndexID(remoteDevice0, 1)
|
|
|
|
s.Drop(remoteDevice0)
|
|
|
|
if got := s.IndexID(remoteDevice0); got != 1 {
|
|
|
|
t.Errorf("Expected unchanged (%v), got %v", 1, got)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-21 10:32:59 +00:00
|
|
|
func TestConcurrentIndexID(t *testing.T) {
|
|
|
|
done := make(chan struct{})
|
|
|
|
var ids [2]protocol.IndexID
|
|
|
|
setID := func(s *db.FileSet, i int) {
|
|
|
|
ids[i] = s.IndexID(protocol.LocalDeviceID)
|
|
|
|
done <- struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
max := 100
|
|
|
|
if testing.Short() {
|
|
|
|
max = 10
|
|
|
|
}
|
|
|
|
for i := 0; i < max; i++ {
|
2020-12-21 11:59:22 +00:00
|
|
|
ldb := newLowlevelMemory(t)
|
2022-01-31 09:12:52 +00:00
|
|
|
s := newFileSet(t, "test", ldb)
|
2020-12-21 10:32:59 +00:00
|
|
|
go setID(s, 0)
|
|
|
|
go setID(s, 1)
|
|
|
|
<-done
|
|
|
|
<-done
|
|
|
|
ldb.Close()
|
|
|
|
if ids[0] != ids[1] {
|
|
|
|
t.Fatalf("IDs differ after %v rounds", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-17 08:15:11 +00:00
|
|
|
func TestNeedRemoveLastValid(t *testing.T) {
|
|
|
|
db := newLowlevelMemory(t)
|
|
|
|
defer db.Close()
|
|
|
|
|
|
|
|
folder := "test"
|
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
fs := newFileSet(t, folder, db)
|
2021-06-17 08:15:11 +00:00
|
|
|
|
|
|
|
files := []protocol.FileInfo{
|
|
|
|
{Name: "foo", Version: protocol.Vector{}.Update(myID), Sequence: 1},
|
|
|
|
}
|
|
|
|
fs.Update(remoteDevice0, files)
|
|
|
|
files[0].Version = files[0].Version.Update(myID)
|
|
|
|
fs.Update(remoteDevice1, files)
|
|
|
|
files[0].LocalFlags = protocol.FlagLocalIgnored
|
|
|
|
fs.Update(protocol.LocalDeviceID, files)
|
|
|
|
|
|
|
|
snap := snapshot(t, fs)
|
|
|
|
c := snap.NeedSize(remoteDevice0)
|
|
|
|
if c.Files != 1 {
|
|
|
|
t.Errorf("Expected 1 needed files initially, got %v", c.Files)
|
|
|
|
}
|
|
|
|
snap.Release()
|
|
|
|
|
|
|
|
fs.Drop(remoteDevice1)
|
|
|
|
|
|
|
|
snap = snapshot(t, fs)
|
|
|
|
c = snap.NeedSize(remoteDevice0)
|
|
|
|
if c.Files != 0 {
|
|
|
|
t.Errorf("Expected no needed files, got %v", c.Files)
|
|
|
|
}
|
|
|
|
snap.Release()
|
|
|
|
}
|
|
|
|
|
2017-11-12 20:20:34 +00:00
|
|
|
func replace(fs *db.FileSet, device protocol.DeviceID, files []protocol.FileInfo) {
|
|
|
|
fs.Drop(device)
|
|
|
|
fs.Update(device, files)
|
|
|
|
}
|
2020-01-21 17:23:08 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
func localSize(t testing.TB, fs *db.FileSet) db.Counts {
|
|
|
|
snap := snapshot(t, fs)
|
2020-01-21 17:23:08 +00:00
|
|
|
defer snap.Release()
|
|
|
|
return snap.LocalSize()
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
func globalSize(t testing.TB, fs *db.FileSet) db.Counts {
|
|
|
|
snap := snapshot(t, fs)
|
2020-01-21 17:23:08 +00:00
|
|
|
defer snap.Release()
|
|
|
|
return snap.GlobalSize()
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
func needSize(t testing.TB, fs *db.FileSet, id protocol.DeviceID) db.Counts {
|
|
|
|
snap := snapshot(t, fs)
|
2020-05-11 13:07:06 +00:00
|
|
|
defer snap.Release()
|
|
|
|
return snap.NeedSize(id)
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
func receiveOnlyChangedSize(t testing.TB, fs *db.FileSet) db.Counts {
|
|
|
|
snap := snapshot(t, fs)
|
2020-01-21 17:23:08 +00:00
|
|
|
defer snap.Release()
|
|
|
|
return snap.ReceiveOnlyChangedSize()
|
|
|
|
}
|
2020-05-11 13:07:06 +00:00
|
|
|
|
|
|
|
func filesToCounts(files []protocol.FileInfo) db.Counts {
|
|
|
|
cp := db.Counts{}
|
|
|
|
for _, f := range files {
|
|
|
|
switch {
|
|
|
|
case f.IsDeleted():
|
|
|
|
cp.Deleted++
|
|
|
|
case f.IsDirectory() && !f.IsSymlink():
|
|
|
|
cp.Directories++
|
|
|
|
case f.IsSymlink():
|
|
|
|
cp.Symlinks++
|
|
|
|
default:
|
|
|
|
cp.Files++
|
|
|
|
}
|
|
|
|
cp.Bytes += f.FileSize()
|
|
|
|
}
|
|
|
|
return cp
|
|
|
|
}
|
|
|
|
|
|
|
|
func checkNeed(t testing.TB, s *db.FileSet, dev protocol.DeviceID, expected []protocol.FileInfo) {
|
|
|
|
t.Helper()
|
2021-03-07 12:43:22 +00:00
|
|
|
counts := needSize(t, s, dev)
|
2020-05-11 13:07:06 +00:00
|
|
|
if exp := filesToCounts(expected); !exp.Equal(counts) {
|
|
|
|
t.Errorf("Count incorrect (%v): expected %v, got %v", dev, exp, counts)
|
|
|
|
}
|
|
|
|
}
|
2020-12-21 11:59:22 +00:00
|
|
|
|
|
|
|
func newLowlevel(t testing.TB, backend backend.Backend) *db.Lowlevel {
|
|
|
|
t.Helper()
|
|
|
|
ll, err := db.NewLowlevel(backend, events.NoopLogger)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
return ll
|
|
|
|
}
|
|
|
|
|
|
|
|
func newLowlevelMemory(t testing.TB) *db.Lowlevel {
|
|
|
|
return newLowlevel(t, backend.OpenMemory())
|
|
|
|
}
|
|
|
|
|
2022-01-31 09:12:52 +00:00
|
|
|
func newFileSet(t testing.TB, folder string, ll *db.Lowlevel) *db.FileSet {
|
2020-12-21 11:59:22 +00:00
|
|
|
t.Helper()
|
2022-01-31 09:12:52 +00:00
|
|
|
fset, err := db.NewFileSet(folder, ll)
|
2020-12-21 11:59:22 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
return fset
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
|
|
|
|
func snapshot(t testing.TB, fset *db.FileSet) *db.Snapshot {
|
|
|
|
t.Helper()
|
|
|
|
snap, err := fset.Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
return snap
|
|
|
|
}
|