mirror of
https://github.com/octoleo/syncthing.git
synced 2024-11-14 01:04:14 +00:00
This adds a cache to the expensive key generation operations. It's fixes size LRU/MRU stuff to keep memory usage bounded under absurd conditions. Also closes #8600.
This commit is contained in:
parent
3ffe859fe8
commit
466b56ded1
@ -35,6 +35,7 @@ type CLI struct {
|
|||||||
TokenPath string `placeholder:"PATH" help:"Path to the token file within the folder (used to determine folder ID)"`
|
TokenPath string `placeholder:"PATH" help:"Path to the token file within the folder (used to determine folder ID)"`
|
||||||
|
|
||||||
folderKey *[32]byte
|
folderKey *[32]byte
|
||||||
|
keyGen *protocol.KeyGenerator
|
||||||
}
|
}
|
||||||
|
|
||||||
type storedEncryptionToken struct {
|
type storedEncryptionToken struct {
|
||||||
@ -68,7 +69,8 @@ func (c *CLI) Run() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c.folderKey = protocol.KeyFromPassword(c.FolderID, c.Password)
|
c.keyGen = protocol.NewKeyGenerator()
|
||||||
|
c.folderKey = c.keyGen.KeyFromPassword(c.FolderID, c.Password)
|
||||||
|
|
||||||
return c.walk()
|
return c.walk()
|
||||||
}
|
}
|
||||||
@ -151,7 +153,7 @@ func (c *CLI) process(srcFs fs.Filesystem, dstFs fs.Filesystem, path string) err
|
|||||||
// in native format, while protocol expects wire format (slashes).
|
// in native format, while protocol expects wire format (slashes).
|
||||||
encFi.Name = osutil.NormalizedFilename(encFi.Name)
|
encFi.Name = osutil.NormalizedFilename(encFi.Name)
|
||||||
|
|
||||||
plainFi, err := protocol.DecryptFileInfo(*encFi, c.folderKey)
|
plainFi, err := protocol.DecryptFileInfo(c.keyGen, *encFi, c.folderKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%s: decrypting metadata: %w", path, err)
|
return fmt.Errorf("%s: decrypting metadata: %w", path, err)
|
||||||
}
|
}
|
||||||
@ -162,7 +164,7 @@ func (c *CLI) process(srcFs fs.Filesystem, dstFs fs.Filesystem, path string) err
|
|||||||
|
|
||||||
var plainFd fs.File
|
var plainFd fs.File
|
||||||
if dstFs != nil {
|
if dstFs != nil {
|
||||||
if err := dstFs.MkdirAll(filepath.Dir(plainFi.Name), 0700); err != nil {
|
if err := dstFs.MkdirAll(filepath.Dir(plainFi.Name), 0o700); err != nil {
|
||||||
return fmt.Errorf("%s: %w", plainFi.Name, err)
|
return fmt.Errorf("%s: %w", plainFi.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,7 +211,7 @@ func (c *CLI) decryptFile(encFi *protocol.FileInfo, plainFi *protocol.FileInfo,
|
|||||||
return fmt.Errorf("block count mismatch: encrypted %d != plaintext %d", len(encFi.Blocks), len(plainFi.Blocks))
|
return fmt.Errorf("block count mismatch: encrypted %d != plaintext %d", len(encFi.Blocks), len(plainFi.Blocks))
|
||||||
}
|
}
|
||||||
|
|
||||||
fileKey := protocol.FileKey(plainFi.Name, c.folderKey)
|
fileKey := c.keyGen.FileKey(plainFi.Name, c.folderKey)
|
||||||
for i, encBlock := range encFi.Blocks {
|
for i, encBlock := range encFi.Blocks {
|
||||||
// Read the encrypted block
|
// Read the encrypted block
|
||||||
buf := make([]byte, encBlock.Size)
|
buf := make([]byte, encBlock.Size)
|
||||||
|
@ -161,6 +161,7 @@ type service struct {
|
|||||||
natService *nat.Service
|
natService *nat.Service
|
||||||
evLogger events.Logger
|
evLogger events.Logger
|
||||||
registry *registry.Registry
|
registry *registry.Registry
|
||||||
|
keyGen *protocol.KeyGenerator
|
||||||
|
|
||||||
dialNow chan struct{}
|
dialNow chan struct{}
|
||||||
dialNowDevices map[protocol.DeviceID]struct{}
|
dialNowDevices map[protocol.DeviceID]struct{}
|
||||||
@ -171,7 +172,7 @@ type service struct {
|
|||||||
listenerTokens map[string]suture.ServiceToken
|
listenerTokens map[string]suture.ServiceToken
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewService(cfg config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *tls.Config, discoverer discover.Finder, bepProtocolName string, tlsDefaultCommonName string, evLogger events.Logger, registry *registry.Registry) Service {
|
func NewService(cfg config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *tls.Config, discoverer discover.Finder, bepProtocolName string, tlsDefaultCommonName string, evLogger events.Logger, registry *registry.Registry, keyGen *protocol.KeyGenerator) Service {
|
||||||
spec := svcutil.SpecWithInfoLogger(l)
|
spec := svcutil.SpecWithInfoLogger(l)
|
||||||
service := &service{
|
service := &service{
|
||||||
Supervisor: suture.New("connections.Service", spec),
|
Supervisor: suture.New("connections.Service", spec),
|
||||||
@ -190,6 +191,7 @@ func NewService(cfg config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *t
|
|||||||
natService: nat.NewService(myID, cfg),
|
natService: nat.NewService(myID, cfg),
|
||||||
evLogger: evLogger,
|
evLogger: evLogger,
|
||||||
registry: registry,
|
registry: registry,
|
||||||
|
keyGen: keyGen,
|
||||||
|
|
||||||
dialNowDevicesMut: sync.NewMutex(),
|
dialNowDevicesMut: sync.NewMutex(),
|
||||||
dialNow: make(chan struct{}, 1),
|
dialNow: make(chan struct{}, 1),
|
||||||
@ -411,7 +413,7 @@ func (s *service) handleHellos(ctx context.Context) error {
|
|||||||
// connections are limited.
|
// connections are limited.
|
||||||
rd, wr := s.limiter.getLimiters(remoteID, c, c.IsLocal())
|
rd, wr := s.limiter.getLimiters(remoteID, c, c.IsLocal())
|
||||||
|
|
||||||
protoConn := protocol.NewConnection(remoteID, rd, wr, c, s.model, c, deviceCfg.Compression, s.cfg.FolderPasswords(remoteID))
|
protoConn := protocol.NewConnection(remoteID, rd, wr, c, s.model, c, deviceCfg.Compression, s.cfg.FolderPasswords(remoteID), s.keyGen)
|
||||||
go func() {
|
go func() {
|
||||||
<-protoConn.Closed()
|
<-protoConn.Closed()
|
||||||
s.dialNowDevicesMut.Lock()
|
s.dialNowDevicesMut.Lock()
|
||||||
@ -426,6 +428,7 @@ func (s *service) handleHellos(ctx context.Context) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *service) connect(ctx context.Context) error {
|
func (s *service) connect(ctx context.Context) error {
|
||||||
// Map of when to earliest dial each given device + address again
|
// Map of when to earliest dial each given device + address again
|
||||||
nextDialAt := make(nextDialRegistry)
|
nextDialAt := make(nextDialRegistry)
|
||||||
@ -1020,8 +1023,10 @@ func urlsToStrings(urls []*url.URL) []string {
|
|||||||
return strings
|
return strings
|
||||||
}
|
}
|
||||||
|
|
||||||
var warningLimiters = make(map[protocol.DeviceID]*rate.Limiter)
|
var (
|
||||||
var warningLimitersMut = sync.NewMutex()
|
warningLimiters = make(map[protocol.DeviceID]*rate.Limiter)
|
||||||
|
warningLimitersMut = sync.NewMutex()
|
||||||
|
)
|
||||||
|
|
||||||
func warningFor(dev protocol.DeviceID, msg string) {
|
func warningFor(dev protocol.DeviceID, msg string) {
|
||||||
warningLimitersMut.Lock()
|
warningLimitersMut.Lock()
|
||||||
|
@ -142,6 +142,7 @@ type model struct {
|
|||||||
folderIOLimiter *util.Semaphore
|
folderIOLimiter *util.Semaphore
|
||||||
fatalChan chan error
|
fatalChan chan error
|
||||||
started chan struct{}
|
started chan struct{}
|
||||||
|
keyGen *protocol.KeyGenerator
|
||||||
|
|
||||||
// fields protected by fmut
|
// fields protected by fmut
|
||||||
fmut sync.RWMutex
|
fmut sync.RWMutex
|
||||||
@ -174,9 +175,7 @@ var _ config.Verifier = &model{}
|
|||||||
|
|
||||||
type folderFactory func(*model, *db.FileSet, *ignore.Matcher, config.FolderConfiguration, versioner.Versioner, events.Logger, *util.Semaphore) service
|
type folderFactory func(*model, *db.FileSet, *ignore.Matcher, config.FolderConfiguration, versioner.Versioner, events.Logger, *util.Semaphore) service
|
||||||
|
|
||||||
var (
|
var folderFactories = make(map[config.FolderType]folderFactory)
|
||||||
folderFactories = make(map[config.FolderType]folderFactory)
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errDeviceUnknown = errors.New("unknown device")
|
errDeviceUnknown = errors.New("unknown device")
|
||||||
@ -205,7 +204,7 @@ var (
|
|||||||
// NewModel creates and starts a new model. The model starts in read-only mode,
|
// NewModel creates and starts a new model. The model starts in read-only mode,
|
||||||
// where it sends index information to connected peers and responds to requests
|
// where it sends index information to connected peers and responds to requests
|
||||||
// for file data without altering the local folder in any way.
|
// for file data without altering the local folder in any way.
|
||||||
func NewModel(cfg config.Wrapper, id protocol.DeviceID, clientName, clientVersion string, ldb *db.Lowlevel, protectedFiles []string, evLogger events.Logger) Model {
|
func NewModel(cfg config.Wrapper, id protocol.DeviceID, clientName, clientVersion string, ldb *db.Lowlevel, protectedFiles []string, evLogger events.Logger, keyGen *protocol.KeyGenerator) Model {
|
||||||
spec := svcutil.SpecWithDebugLogger(l)
|
spec := svcutil.SpecWithDebugLogger(l)
|
||||||
m := &model{
|
m := &model{
|
||||||
Supervisor: suture.New("model", spec),
|
Supervisor: suture.New("model", spec),
|
||||||
@ -227,6 +226,7 @@ func NewModel(cfg config.Wrapper, id protocol.DeviceID, clientName, clientVersio
|
|||||||
folderIOLimiter: util.NewSemaphore(cfg.Options().MaxFolderConcurrency()),
|
folderIOLimiter: util.NewSemaphore(cfg.Options().MaxFolderConcurrency()),
|
||||||
fatalChan: make(chan error),
|
fatalChan: make(chan error),
|
||||||
started: make(chan struct{}),
|
started: make(chan struct{}),
|
||||||
|
keyGen: keyGen,
|
||||||
|
|
||||||
// fields protected by fmut
|
// fields protected by fmut
|
||||||
fmut: sync.NewRWMutex(),
|
fmut: sync.NewRWMutex(),
|
||||||
@ -1462,7 +1462,7 @@ func (m *model) ccCheckEncryption(fcfg config.FolderConfiguration, folderDevice
|
|||||||
}
|
}
|
||||||
|
|
||||||
if isEncryptedRemote {
|
if isEncryptedRemote {
|
||||||
passwordToken := protocol.PasswordToken(fcfg.ID, folderDevice.EncryptionPassword)
|
passwordToken := protocol.PasswordToken(m.keyGen, fcfg.ID, folderDevice.EncryptionPassword)
|
||||||
match := false
|
match := false
|
||||||
if hasTokenLocal {
|
if hasTokenLocal {
|
||||||
match = bytes.Equal(passwordToken, ccDeviceInfos.local.EncryptionPasswordToken)
|
match = bytes.Equal(passwordToken, ccDeviceInfos.local.EncryptionPasswordToken)
|
||||||
@ -2483,7 +2483,7 @@ func (m *model) generateClusterConfig(device protocol.DeviceID) (protocol.Cluste
|
|||||||
if deviceCfg.DeviceID == m.id && hasEncryptionToken {
|
if deviceCfg.DeviceID == m.id && hasEncryptionToken {
|
||||||
protocolDevice.EncryptionPasswordToken = encryptionToken
|
protocolDevice.EncryptionPasswordToken = encryptionToken
|
||||||
} else if folderDevice.EncryptionPassword != "" {
|
} else if folderDevice.EncryptionPassword != "" {
|
||||||
protocolDevice.EncryptionPasswordToken = protocol.PasswordToken(folderCfg.ID, folderDevice.EncryptionPassword)
|
protocolDevice.EncryptionPasswordToken = protocol.PasswordToken(m.keyGen, folderCfg.ID, folderDevice.EncryptionPassword)
|
||||||
if folderDevice.DeviceID == device {
|
if folderDevice.DeviceID == device {
|
||||||
passwords[folderCfg.ID] = folderDevice.EncryptionPassword
|
passwords[folderCfg.ID] = folderDevice.EncryptionPassword
|
||||||
}
|
}
|
||||||
@ -3264,7 +3264,7 @@ func readEncryptionToken(cfg config.FolderConfiguration) ([]byte, error) {
|
|||||||
|
|
||||||
func writeEncryptionToken(token []byte, cfg config.FolderConfiguration) error {
|
func writeEncryptionToken(token []byte, cfg config.FolderConfiguration) error {
|
||||||
tokenName := encryptionTokenPath(cfg)
|
tokenName := encryptionTokenPath(cfg)
|
||||||
fd, err := cfg.Filesystem(nil).OpenFile(tokenName, fs.OptReadWrite|fs.OptCreate, 0666)
|
fd, err := cfg.Filesystem(nil).OpenFile(tokenName, fs.OptReadWrite|fs.OptCreate, 0o666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -271,7 +271,7 @@ func BenchmarkRequestOut(b *testing.B) {
|
|||||||
|
|
||||||
fc := newFakeConnection(device1, m)
|
fc := newFakeConnection(device1, m)
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
fc.addFile(f.Name, 0644, protocol.FileInfoTypeFile, []byte("some data to return"))
|
fc.addFile(f.Name, 0o644, protocol.FileInfoTypeFile, []byte("some data to return"))
|
||||||
}
|
}
|
||||||
m.AddConnection(fc, protocol.Hello{})
|
m.AddConnection(fc, protocol.Hello{})
|
||||||
must(b, m.Index(device1, "default", files))
|
must(b, m.Index(device1, "default", files))
|
||||||
@ -296,7 +296,7 @@ func BenchmarkRequestInSingleFile(b *testing.B) {
|
|||||||
rand.Read(buf)
|
rand.Read(buf)
|
||||||
mustRemove(b, defaultFs.RemoveAll("request"))
|
mustRemove(b, defaultFs.RemoveAll("request"))
|
||||||
defer func() { mustRemove(b, defaultFs.RemoveAll("request")) }()
|
defer func() { mustRemove(b, defaultFs.RemoveAll("request")) }()
|
||||||
must(b, defaultFs.MkdirAll("request/for/a/file/in/a/couple/of/dirs", 0755))
|
must(b, defaultFs.MkdirAll("request/for/a/file/in/a/couple/of/dirs", 0o755))
|
||||||
writeFile(b, defaultFs, "request/for/a/file/in/a/couple/of/dirs/128k", buf)
|
writeFile(b, defaultFs, "request/for/a/file/in/a/couple/of/dirs/128k", buf)
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
@ -1148,8 +1148,8 @@ func TestAutoAcceptNameConflict(t *testing.T) {
|
|||||||
|
|
||||||
id := srand.String(8)
|
id := srand.String(8)
|
||||||
label := srand.String(8)
|
label := srand.String(8)
|
||||||
testOs.MkdirAll(id, 0777)
|
testOs.MkdirAll(id, 0o777)
|
||||||
testOs.MkdirAll(label, 0777)
|
testOs.MkdirAll(label, 0o777)
|
||||||
defer os.RemoveAll(id)
|
defer os.RemoveAll(id)
|
||||||
defer os.RemoveAll(label)
|
defer os.RemoveAll(label)
|
||||||
m, cancel := newState(t, defaultAutoAcceptCfg)
|
m, cancel := newState(t, defaultAutoAcceptCfg)
|
||||||
@ -1198,7 +1198,7 @@ func TestAutoAcceptFallsBackToID(t *testing.T) {
|
|||||||
id := srand.String(8)
|
id := srand.String(8)
|
||||||
label := srand.String(8)
|
label := srand.String(8)
|
||||||
t.Log(id, label)
|
t.Log(id, label)
|
||||||
testOs.MkdirAll(label, 0777)
|
testOs.MkdirAll(label, 0o777)
|
||||||
defer os.RemoveAll(label)
|
defer os.RemoveAll(label)
|
||||||
defer os.RemoveAll(id)
|
defer os.RemoveAll(id)
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
@ -1330,7 +1330,8 @@ func TestAutoAcceptEnc(t *testing.T) {
|
|||||||
Folders: []protocol.Folder{{
|
Folders: []protocol.Folder{{
|
||||||
ID: id,
|
ID: id,
|
||||||
Label: id,
|
Label: id,
|
||||||
}}}
|
}},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Earlier tests might cause the connection to get closed, thus ClusterConfig
|
// Earlier tests might cause the connection to get closed, thus ClusterConfig
|
||||||
@ -1486,7 +1487,7 @@ func changeIgnores(t *testing.T, m *testModel, expected []string) {
|
|||||||
func TestIgnores(t *testing.T) {
|
func TestIgnores(t *testing.T) {
|
||||||
// Assure a clean start state
|
// Assure a clean start state
|
||||||
mustRemove(t, defaultFs.RemoveAll(config.DefaultMarkerName))
|
mustRemove(t, defaultFs.RemoveAll(config.DefaultMarkerName))
|
||||||
mustRemove(t, defaultFs.MkdirAll(config.DefaultMarkerName, 0644))
|
mustRemove(t, defaultFs.MkdirAll(config.DefaultMarkerName, 0o644))
|
||||||
writeFile(t, defaultFs, ".stignore", []byte(".*\nquux\n"))
|
writeFile(t, defaultFs, ".stignore", []byte(".*\nquux\n"))
|
||||||
|
|
||||||
m := setupModel(t, defaultCfgWrapper)
|
m := setupModel(t, defaultCfgWrapper)
|
||||||
@ -1548,7 +1549,7 @@ func TestIgnores(t *testing.T) {
|
|||||||
func TestEmptyIgnores(t *testing.T) {
|
func TestEmptyIgnores(t *testing.T) {
|
||||||
// Assure a clean start state
|
// Assure a clean start state
|
||||||
mustRemove(t, defaultFs.RemoveAll(config.DefaultMarkerName))
|
mustRemove(t, defaultFs.RemoveAll(config.DefaultMarkerName))
|
||||||
must(t, defaultFs.MkdirAll(config.DefaultMarkerName, 0644))
|
must(t, defaultFs.MkdirAll(config.DefaultMarkerName, 0o644))
|
||||||
|
|
||||||
m := setupModel(t, defaultCfgWrapper)
|
m := setupModel(t, defaultCfgWrapper)
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
@ -1634,7 +1635,7 @@ func TestROScanRecovery(t *testing.T) {
|
|||||||
|
|
||||||
waitForState(t, sub, "default", "folder path missing")
|
waitForState(t, sub, "default", "folder path missing")
|
||||||
|
|
||||||
testOs.Mkdir(fcfg.Path, 0700)
|
testOs.Mkdir(fcfg.Path, 0o700)
|
||||||
|
|
||||||
waitForState(t, sub, "default", config.ErrMarkerMissing.Error())
|
waitForState(t, sub, "default", config.ErrMarkerMissing.Error())
|
||||||
|
|
||||||
@ -1687,7 +1688,7 @@ func TestRWScanRecovery(t *testing.T) {
|
|||||||
|
|
||||||
waitForState(t, sub, "default", "folder path missing")
|
waitForState(t, sub, "default", "folder path missing")
|
||||||
|
|
||||||
testOs.Mkdir(fcfg.Path, 0700)
|
testOs.Mkdir(fcfg.Path, 0o700)
|
||||||
|
|
||||||
waitForState(t, sub, "default", config.ErrMarkerMissing.Error())
|
waitForState(t, sub, "default", config.ErrMarkerMissing.Error())
|
||||||
|
|
||||||
@ -2147,10 +2148,10 @@ func TestIssue2782(t *testing.T) {
|
|||||||
if err := os.RemoveAll(testDir); err != nil {
|
if err := os.RemoveAll(testDir); err != nil {
|
||||||
t.Skip(err)
|
t.Skip(err)
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(testDir+"/syncdir", 0755); err != nil {
|
if err := os.MkdirAll(testDir+"/syncdir", 0o755); err != nil {
|
||||||
t.Skip(err)
|
t.Skip(err)
|
||||||
}
|
}
|
||||||
if err := os.WriteFile(testDir+"/syncdir/file", []byte("hello, world\n"), 0644); err != nil {
|
if err := os.WriteFile(testDir+"/syncdir/file", []byte("hello, world\n"), 0o644); err != nil {
|
||||||
t.Skip(err)
|
t.Skip(err)
|
||||||
}
|
}
|
||||||
if err := os.Symlink("syncdir", testDir+"/synclink"); err != nil {
|
if err := os.Symlink("syncdir", testDir+"/synclink"); err != nil {
|
||||||
@ -2480,7 +2481,7 @@ func TestIssue2571(t *testing.T) {
|
|||||||
defer os.RemoveAll(testFs.URI())
|
defer os.RemoveAll(testFs.URI())
|
||||||
|
|
||||||
for _, dir := range []string{"toLink", "linkTarget"} {
|
for _, dir := range []string{"toLink", "linkTarget"} {
|
||||||
must(t, testFs.MkdirAll(dir, 0775))
|
must(t, testFs.MkdirAll(dir, 0o775))
|
||||||
fd, err := testFs.Create(filepath.Join(dir, "a"))
|
fd, err := testFs.Create(filepath.Join(dir, "a"))
|
||||||
must(t, err)
|
must(t, err)
|
||||||
fd.Close()
|
fd.Close()
|
||||||
@ -2518,8 +2519,8 @@ func TestIssue4573(t *testing.T) {
|
|||||||
testFs := fcfg.Filesystem(nil)
|
testFs := fcfg.Filesystem(nil)
|
||||||
defer os.RemoveAll(testFs.URI())
|
defer os.RemoveAll(testFs.URI())
|
||||||
|
|
||||||
must(t, testFs.MkdirAll("inaccessible", 0755))
|
must(t, testFs.MkdirAll("inaccessible", 0o755))
|
||||||
defer testFs.Chmod("inaccessible", 0777)
|
defer testFs.Chmod("inaccessible", 0o777)
|
||||||
|
|
||||||
file := filepath.Join("inaccessible", "a")
|
file := filepath.Join("inaccessible", "a")
|
||||||
fd, err := testFs.Create(file)
|
fd, err := testFs.Create(file)
|
||||||
@ -2529,7 +2530,7 @@ func TestIssue4573(t *testing.T) {
|
|||||||
m := setupModel(t, w)
|
m := setupModel(t, w)
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
|
|
||||||
must(t, testFs.Chmod("inaccessible", 0000))
|
must(t, testFs.Chmod("inaccessible", 0o000))
|
||||||
|
|
||||||
m.ScanFolder("default")
|
m.ScanFolder("default")
|
||||||
|
|
||||||
@ -2561,7 +2562,7 @@ func TestInternalScan(t *testing.T) {
|
|||||||
for _, dir := range baseDirs {
|
for _, dir := range baseDirs {
|
||||||
sub := filepath.Join(dir, "subDir")
|
sub := filepath.Join(dir, "subDir")
|
||||||
for _, dir := range []string{dir, sub} {
|
for _, dir := range []string{dir, sub} {
|
||||||
if err := testFs.MkdirAll(dir, 0775); err != nil {
|
if err := testFs.MkdirAll(dir, 0o775); err != nil {
|
||||||
t.Fatalf("%v: %v", dir, err)
|
t.Fatalf("%v: %v", dir, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2633,7 +2634,7 @@ func TestCustomMarkerName(t *testing.T) {
|
|||||||
|
|
||||||
waitForState(t, sub, "default", "folder path missing")
|
waitForState(t, sub, "default", "folder path missing")
|
||||||
|
|
||||||
testOs.Mkdir(fcfg.Path, 0700)
|
testOs.Mkdir(fcfg.Path, 0o700)
|
||||||
fd := testOs.Create(filepath.Join(fcfg.Path, "myfile"))
|
fd := testOs.Create(filepath.Join(fcfg.Path, "myfile"))
|
||||||
fd.Close()
|
fd.Close()
|
||||||
|
|
||||||
@ -2646,7 +2647,7 @@ func TestRemoveDirWithContent(t *testing.T) {
|
|||||||
tfs := fcfg.Filesystem(nil)
|
tfs := fcfg.Filesystem(nil)
|
||||||
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
||||||
|
|
||||||
tfs.MkdirAll("dirwith", 0755)
|
tfs.MkdirAll("dirwith", 0o755)
|
||||||
content := filepath.Join("dirwith", "content")
|
content := filepath.Join("dirwith", "content")
|
||||||
fd, err := tfs.Create(content)
|
fd, err := tfs.Create(content)
|
||||||
must(t, err)
|
must(t, err)
|
||||||
@ -2712,7 +2713,7 @@ func TestIssue4475(t *testing.T) {
|
|||||||
// This should result in the directory being recreated and added to the
|
// This should result in the directory being recreated and added to the
|
||||||
// db locally.
|
// db locally.
|
||||||
|
|
||||||
must(t, testFs.MkdirAll("delDir", 0755))
|
must(t, testFs.MkdirAll("delDir", 0o755))
|
||||||
|
|
||||||
m.ScanFolder("default")
|
m.ScanFolder("default")
|
||||||
|
|
||||||
@ -2721,7 +2722,7 @@ func TestIssue4475(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fileName := filepath.Join("delDir", "file")
|
fileName := filepath.Join("delDir", "file")
|
||||||
conn.addFile(fileName, 0644, protocol.FileInfoTypeFile, nil)
|
conn.addFile(fileName, 0o644, protocol.FileInfoTypeFile, nil)
|
||||||
conn.sendIndexUpdate()
|
conn.sendIndexUpdate()
|
||||||
|
|
||||||
// Is there something we could trigger on instead of just waiting?
|
// Is there something we could trigger on instead of just waiting?
|
||||||
@ -2805,7 +2806,7 @@ func TestVersionRestore(t *testing.T) {
|
|||||||
file = filepath.FromSlash(file)
|
file = filepath.FromSlash(file)
|
||||||
}
|
}
|
||||||
dir := filepath.Dir(file)
|
dir := filepath.Dir(file)
|
||||||
must(t, filesystem.MkdirAll(dir, 0755))
|
must(t, filesystem.MkdirAll(dir, 0o755))
|
||||||
if fd, err := filesystem.Create(file); err != nil {
|
if fd, err := filesystem.Create(file); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if _, err := fd.Write([]byte(file)); err != nil {
|
} else if _, err := fd.Write([]byte(file)); err != nil {
|
||||||
@ -3185,7 +3186,7 @@ func TestConnCloseOnRestart(t *testing.T) {
|
|||||||
|
|
||||||
br := &testutils.BlockingRW{}
|
br := &testutils.BlockingRW{}
|
||||||
nw := &testutils.NoopRW{}
|
nw := &testutils.NoopRW{}
|
||||||
m.AddConnection(protocol.NewConnection(device1, br, nw, testutils.NoopCloser{}, m, new(protocolmocks.ConnectionInfo), protocol.CompressionNever, nil), protocol.Hello{})
|
m.AddConnection(protocol.NewConnection(device1, br, nw, testutils.NoopCloser{}, m, new(protocolmocks.ConnectionInfo), protocol.CompressionNever, nil, m.keyGen), protocol.Hello{})
|
||||||
m.pmut.RLock()
|
m.pmut.RLock()
|
||||||
if len(m.closed) != 1 {
|
if len(m.closed) != 1 {
|
||||||
t.Fatalf("Expected just one conn (len(m.conn) == %v)", len(m.conn))
|
t.Fatalf("Expected just one conn (len(m.conn) == %v)", len(m.conn))
|
||||||
@ -3654,7 +3655,7 @@ func TestBlockListMap(t *testing.T) {
|
|||||||
|
|
||||||
// Change type
|
// Change type
|
||||||
must(t, ffs.Remove("four"))
|
must(t, ffs.Remove("four"))
|
||||||
must(t, ffs.Mkdir("four", 0644))
|
must(t, ffs.Mkdir("four", 0o644))
|
||||||
|
|
||||||
m.ScanFolders()
|
m.ScanFolders()
|
||||||
|
|
||||||
@ -3933,7 +3934,7 @@ func TestIssue6961(t *testing.T) {
|
|||||||
// Remote, invalid (receive-only) and existing file
|
// Remote, invalid (receive-only) and existing file
|
||||||
must(t, m.Index(device2, fcfg.ID, []protocol.FileInfo{{Name: name, RawInvalid: true, Sequence: 1}}))
|
must(t, m.Index(device2, fcfg.ID, []protocol.FileInfo{{Name: name, RawInvalid: true, Sequence: 1}}))
|
||||||
// Create a local file
|
// Create a local file
|
||||||
if fd, err := tfs.OpenFile(name, fs.OptCreate, 0666); err != nil {
|
if fd, err := tfs.OpenFile(name, fs.OptCreate, 0o666); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else {
|
} else {
|
||||||
fd.Close()
|
fd.Close()
|
||||||
@ -4038,7 +4039,7 @@ func TestCcCheckEncryption(t *testing.T) {
|
|||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
|
|
||||||
pw := "foo"
|
pw := "foo"
|
||||||
token := protocol.PasswordToken(fcfg.ID, pw)
|
token := protocol.PasswordToken(m.keyGen, fcfg.ID, pw)
|
||||||
m.folderEncryptionPasswordTokens[fcfg.ID] = token
|
m.folderEncryptionPasswordTokens[fcfg.ID] = token
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
|
@ -55,7 +55,7 @@ func TestRequestSimple(t *testing.T) {
|
|||||||
|
|
||||||
// Send an update for the test file, wait for it to sync and be reported back.
|
// Send an update for the test file, wait for it to sync and be reported back.
|
||||||
contents := []byte("test file contents\n")
|
contents := []byte("test file contents\n")
|
||||||
fc.addFile("testfile", 0644, protocol.FileInfoTypeFile, contents)
|
fc.addFile("testfile", 0o644, protocol.FileInfoTypeFile, contents)
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
select {
|
select {
|
||||||
case <-done:
|
case <-done:
|
||||||
@ -101,7 +101,7 @@ func TestSymlinkTraversalRead(t *testing.T) {
|
|||||||
|
|
||||||
// Send an update for the symlink, wait for it to sync and be reported back.
|
// Send an update for the symlink, wait for it to sync and be reported back.
|
||||||
contents := []byte("..")
|
contents := []byte("..")
|
||||||
fc.addFile("symlink", 0644, protocol.FileInfoTypeSymlink, contents)
|
fc.addFile("symlink", 0o644, protocol.FileInfoTypeSymlink, contents)
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
<-done
|
<-done
|
||||||
|
|
||||||
@ -151,7 +151,7 @@ func TestSymlinkTraversalWrite(t *testing.T) {
|
|||||||
|
|
||||||
// Send an update for the symlink, wait for it to sync and be reported back.
|
// Send an update for the symlink, wait for it to sync and be reported back.
|
||||||
contents := []byte("..")
|
contents := []byte("..")
|
||||||
fc.addFile("symlink", 0644, protocol.FileInfoTypeSymlink, contents)
|
fc.addFile("symlink", 0o644, protocol.FileInfoTypeSymlink, contents)
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
<-done
|
<-done
|
||||||
|
|
||||||
@ -159,9 +159,9 @@ func TestSymlinkTraversalWrite(t *testing.T) {
|
|||||||
// blocks for any of them to come back, or index entries. Hopefully none
|
// blocks for any of them to come back, or index entries. Hopefully none
|
||||||
// of that should happen.
|
// of that should happen.
|
||||||
contents = []byte("testdata testdata\n")
|
contents = []byte("testdata testdata\n")
|
||||||
fc.addFile("symlink/testfile", 0644, protocol.FileInfoTypeFile, contents)
|
fc.addFile("symlink/testfile", 0o644, protocol.FileInfoTypeFile, contents)
|
||||||
fc.addFile("symlink/testdir", 0644, protocol.FileInfoTypeDirectory, contents)
|
fc.addFile("symlink/testdir", 0o644, protocol.FileInfoTypeDirectory, contents)
|
||||||
fc.addFile("symlink/testsyml", 0644, protocol.FileInfoTypeSymlink, contents)
|
fc.addFile("symlink/testsyml", 0o644, protocol.FileInfoTypeSymlink, contents)
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@ -203,7 +203,7 @@ func TestRequestCreateTmpSymlink(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Send an update for the test file, wait for it to sync and be reported back.
|
// Send an update for the test file, wait for it to sync and be reported back.
|
||||||
fc.addFile(name, 0644, protocol.FileInfoTypeSymlink, []byte(".."))
|
fc.addFile(name, 0o644, protocol.FileInfoTypeSymlink, []byte(".."))
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@ -257,7 +257,7 @@ func TestRequestVersioningSymlinkAttack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Send an update for the test file, wait for it to sync and be reported back.
|
// Send an update for the test file, wait for it to sync and be reported back.
|
||||||
fc.addFile("foo", 0644, protocol.FileInfoTypeSymlink, []byte(tmpdir))
|
fc.addFile("foo", 0o644, protocol.FileInfoTypeSymlink, []byte(tmpdir))
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
waitForIdx()
|
waitForIdx()
|
||||||
|
|
||||||
@ -267,8 +267,8 @@ func TestRequestVersioningSymlinkAttack(t *testing.T) {
|
|||||||
waitForIdx()
|
waitForIdx()
|
||||||
|
|
||||||
// Recreate foo and a file in it with some data
|
// Recreate foo and a file in it with some data
|
||||||
fc.updateFile("foo", 0755, protocol.FileInfoTypeDirectory, nil)
|
fc.updateFile("foo", 0o755, protocol.FileInfoTypeDirectory, nil)
|
||||||
fc.addFile("foo/test", 0644, protocol.FileInfoTypeFile, []byte("testtesttest"))
|
fc.addFile("foo/test", 0o644, protocol.FileInfoTypeFile, []byte("testtesttest"))
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
waitForIdx()
|
waitForIdx()
|
||||||
|
|
||||||
@ -286,7 +286,6 @@ func TestRequestVersioningSymlinkAttack(t *testing.T) {
|
|||||||
func TestPullInvalidIgnoredSO(t *testing.T) {
|
func TestPullInvalidIgnoredSO(t *testing.T) {
|
||||||
t.Skip("flaky")
|
t.Skip("flaky")
|
||||||
pullInvalidIgnored(t, config.FolderTypeSendOnly)
|
pullInvalidIgnored(t, config.FolderTypeSendOnly)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPullInvalidIgnoredSR(t *testing.T) {
|
func TestPullInvalidIgnoredSR(t *testing.T) {
|
||||||
@ -322,11 +321,11 @@ func pullInvalidIgnored(t *testing.T, ft config.FolderType) {
|
|||||||
ign := "ignoredNonExisting"
|
ign := "ignoredNonExisting"
|
||||||
ignExisting := "ignoredExisting"
|
ignExisting := "ignoredExisting"
|
||||||
|
|
||||||
fc.addFile(invIgn, 0644, protocol.FileInfoTypeFile, contents)
|
fc.addFile(invIgn, 0o644, protocol.FileInfoTypeFile, contents)
|
||||||
fc.addFile(invDel, 0644, protocol.FileInfoTypeFile, contents)
|
fc.addFile(invDel, 0o644, protocol.FileInfoTypeFile, contents)
|
||||||
fc.deleteFile(invDel)
|
fc.deleteFile(invDel)
|
||||||
fc.addFile(ign, 0644, protocol.FileInfoTypeFile, contents)
|
fc.addFile(ign, 0o644, protocol.FileInfoTypeFile, contents)
|
||||||
fc.addFile(ignExisting, 0644, protocol.FileInfoTypeFile, contents)
|
fc.addFile(ignExisting, 0o644, protocol.FileInfoTypeFile, contents)
|
||||||
writeFile(t, fss, ignExisting, otherContents)
|
writeFile(t, fss, ignExisting, otherContents)
|
||||||
|
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
@ -549,8 +548,8 @@ func TestParentDeletion(t *testing.T) {
|
|||||||
child := filepath.Join(parent, "bar")
|
child := filepath.Join(parent, "bar")
|
||||||
|
|
||||||
received := make(chan []protocol.FileInfo)
|
received := make(chan []protocol.FileInfo)
|
||||||
fc.addFile(parent, 0777, protocol.FileInfoTypeDirectory, nil)
|
fc.addFile(parent, 0o777, protocol.FileInfoTypeDirectory, nil)
|
||||||
fc.addFile(child, 0777, protocol.FileInfoTypeDirectory, nil)
|
fc.addFile(child, 0o777, protocol.FileInfoTypeDirectory, nil)
|
||||||
fc.setIndexFn(func(_ context.Context, folder string, fs []protocol.FileInfo) error {
|
fc.setIndexFn(func(_ context.Context, folder string, fs []protocol.FileInfo) error {
|
||||||
received <- fs
|
received <- fs
|
||||||
return nil
|
return nil
|
||||||
@ -588,7 +587,7 @@ func TestParentDeletion(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Recreate the child dir on the remote
|
// Recreate the child dir on the remote
|
||||||
fc.updateFile(child, 0777, protocol.FileInfoTypeDirectory, nil)
|
fc.updateFile(child, 0o777, protocol.FileInfoTypeDirectory, nil)
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
|
|
||||||
// Wait for the child dir to be recreated and sent to the remote
|
// Wait for the child dir to be recreated and sent to the remote
|
||||||
@ -634,7 +633,7 @@ func TestRequestSymlinkWindows(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
fc.addFile("link", 0644, protocol.FileInfoTypeSymlink, nil)
|
fc.addFile("link", 0o644, protocol.FileInfoTypeSymlink, nil)
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@ -714,7 +713,7 @@ func TestRequestRemoteRenameChanged(t *testing.T) {
|
|||||||
b: []byte("bData"),
|
b: []byte("bData"),
|
||||||
}
|
}
|
||||||
for _, n := range [2]string{a, b} {
|
for _, n := range [2]string{a, b} {
|
||||||
fc.addFile(n, 0644, protocol.FileInfoTypeFile, data[n])
|
fc.addFile(n, 0o644, protocol.FileInfoTypeFile, data[n])
|
||||||
}
|
}
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
select {
|
select {
|
||||||
@ -772,7 +771,7 @@ func TestRequestRemoteRenameChanged(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
fd, err := tfs.OpenFile(b, fs.OptReadWrite, 0644)
|
fd, err := tfs.OpenFile(b, fs.OptReadWrite, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -784,7 +783,7 @@ func TestRequestRemoteRenameChanged(t *testing.T) {
|
|||||||
|
|
||||||
// rename
|
// rename
|
||||||
fc.deleteFile(a)
|
fc.deleteFile(a)
|
||||||
fc.updateFile(b, 0644, protocol.FileInfoTypeFile, data[a])
|
fc.updateFile(b, 0o644, protocol.FileInfoTypeFile, data[a])
|
||||||
// Make sure the remote file for b is newer and thus stays global -> local conflict
|
// Make sure the remote file for b is newer and thus stays global -> local conflict
|
||||||
fc.mut.Lock()
|
fc.mut.Lock()
|
||||||
for i := range fc.files {
|
for i := range fc.files {
|
||||||
@ -843,7 +842,7 @@ func TestRequestRemoteRenameConflict(t *testing.T) {
|
|||||||
b: []byte("bData"),
|
b: []byte("bData"),
|
||||||
}
|
}
|
||||||
for _, n := range [2]string{a, b} {
|
for _, n := range [2]string{a, b} {
|
||||||
fc.addFile(n, 0644, protocol.FileInfoTypeFile, data[n])
|
fc.addFile(n, 0o644, protocol.FileInfoTypeFile, data[n])
|
||||||
}
|
}
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
select {
|
select {
|
||||||
@ -859,7 +858,7 @@ func TestRequestRemoteRenameConflict(t *testing.T) {
|
|||||||
must(t, equalContents(filepath.Join(tmpDir, n), data[n]))
|
must(t, equalContents(filepath.Join(tmpDir, n), data[n]))
|
||||||
}
|
}
|
||||||
|
|
||||||
fd, err := tfs.OpenFile(b, fs.OptReadWrite, 0644)
|
fd, err := tfs.OpenFile(b, fs.OptReadWrite, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -883,7 +882,7 @@ func TestRequestRemoteRenameConflict(t *testing.T) {
|
|||||||
|
|
||||||
// rename
|
// rename
|
||||||
fc.deleteFile(a)
|
fc.deleteFile(a)
|
||||||
fc.updateFile(b, 0644, protocol.FileInfoTypeFile, data[a])
|
fc.updateFile(b, 0o644, protocol.FileInfoTypeFile, data[a])
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
select {
|
select {
|
||||||
case <-recv:
|
case <-recv:
|
||||||
@ -933,7 +932,7 @@ func TestRequestDeleteChanged(t *testing.T) {
|
|||||||
// setup
|
// setup
|
||||||
a := "a"
|
a := "a"
|
||||||
data := []byte("aData")
|
data := []byte("aData")
|
||||||
fc.addFile(a, 0644, protocol.FileInfoTypeFile, data)
|
fc.addFile(a, 0o644, protocol.FileInfoTypeFile, data)
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
select {
|
select {
|
||||||
case <-done:
|
case <-done:
|
||||||
@ -952,7 +951,7 @@ func TestRequestDeleteChanged(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
fd, err := tfs.OpenFile(a, fs.OptReadWrite, 0644)
|
fd, err := tfs.OpenFile(a, fs.OptReadWrite, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -999,7 +998,7 @@ func TestNeedFolderFiles(t *testing.T) {
|
|||||||
data := []byte("foo")
|
data := []byte("foo")
|
||||||
num := 20
|
num := 20
|
||||||
for i := 0; i < num; i++ {
|
for i := 0; i < num; i++ {
|
||||||
fc.addFile(strconv.Itoa(i), 0644, protocol.FileInfoTypeFile, data)
|
fc.addFile(strconv.Itoa(i), 0o644, protocol.FileInfoTypeFile, data)
|
||||||
}
|
}
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
|
|
||||||
@ -1146,7 +1145,7 @@ func TestRequestLastFileProgress(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
contents := []byte("test file contents\n")
|
contents := []byte("test file contents\n")
|
||||||
fc.addFile("testfile", 0644, protocol.FileInfoTypeFile, contents)
|
fc.addFile("testfile", 0o644, protocol.FileInfoTypeFile, contents)
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@ -1280,7 +1279,7 @@ func TestRequestIndexSenderClusterConfigBeforeStart(t *testing.T) {
|
|||||||
dir2 := "bar"
|
dir2 := "bar"
|
||||||
|
|
||||||
// Initialise db with an entry and then stop everything again
|
// Initialise db with an entry and then stop everything again
|
||||||
must(t, tfs.Mkdir(dir1, 0777))
|
must(t, tfs.Mkdir(dir1, 0o777))
|
||||||
m := newModel(t, w, myID, "syncthing", "dev", nil)
|
m := newModel(t, w, myID, "syncthing", "dev", nil)
|
||||||
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
||||||
m.ServeBackground()
|
m.ServeBackground()
|
||||||
@ -1290,7 +1289,7 @@ func TestRequestIndexSenderClusterConfigBeforeStart(t *testing.T) {
|
|||||||
|
|
||||||
// Add connection (sends incoming cluster config) before starting the new model
|
// Add connection (sends incoming cluster config) before starting the new model
|
||||||
m = &testModel{
|
m = &testModel{
|
||||||
model: NewModel(m.cfg, m.id, m.clientName, m.clientVersion, m.db, m.protectedFiles, m.evLogger).(*model),
|
model: NewModel(m.cfg, m.id, m.clientName, m.clientVersion, m.db, m.protectedFiles, m.evLogger, protocol.NewKeyGenerator()).(*model),
|
||||||
evCancel: m.evCancel,
|
evCancel: m.evCancel,
|
||||||
stopped: make(chan struct{}),
|
stopped: make(chan struct{}),
|
||||||
}
|
}
|
||||||
@ -1326,7 +1325,7 @@ func TestRequestIndexSenderClusterConfigBeforeStart(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check that an index is sent for the newly added item
|
// Check that an index is sent for the newly added item
|
||||||
must(t, tfs.Mkdir(dir2, 0777))
|
must(t, tfs.Mkdir(dir2, 0o777))
|
||||||
m.ScanFolders()
|
m.ScanFolders()
|
||||||
select {
|
select {
|
||||||
case <-timeout:
|
case <-timeout:
|
||||||
@ -1346,8 +1345,9 @@ func TestRequestReceiveEncrypted(t *testing.T) {
|
|||||||
fcfg.Type = config.FolderTypeReceiveEncrypted
|
fcfg.Type = config.FolderTypeReceiveEncrypted
|
||||||
setFolder(t, w, fcfg)
|
setFolder(t, w, fcfg)
|
||||||
|
|
||||||
encToken := protocol.PasswordToken(fcfg.ID, "pw")
|
keyGen := protocol.NewKeyGenerator()
|
||||||
must(t, tfs.Mkdir(config.DefaultMarkerName, 0777))
|
encToken := protocol.PasswordToken(keyGen, fcfg.ID, "pw")
|
||||||
|
must(t, tfs.Mkdir(config.DefaultMarkerName, 0o777))
|
||||||
must(t, writeEncryptionToken(encToken, fcfg))
|
must(t, writeEncryptionToken(encToken, fcfg))
|
||||||
|
|
||||||
m := setupModel(t, w)
|
m := setupModel(t, w)
|
||||||
@ -1407,7 +1407,7 @@ func TestRequestReceiveEncrypted(t *testing.T) {
|
|||||||
name := "foo"
|
name := "foo"
|
||||||
data := make([]byte, 2000)
|
data := make([]byte, 2000)
|
||||||
rand.Read(data)
|
rand.Read(data)
|
||||||
fc.addFile(name, 0664, protocol.FileInfoTypeFile, data)
|
fc.addFile(name, 0o664, protocol.FileInfoTypeFile, data)
|
||||||
fc.sendIndexUpdate()
|
fc.sendIndexUpdate()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@ -1467,7 +1467,7 @@ func TestRequestGlobalInvalidToValid(t *testing.T) {
|
|||||||
|
|
||||||
// Setup device with valid file, do not send index yet
|
// Setup device with valid file, do not send index yet
|
||||||
contents := []byte("test file contents\n")
|
contents := []byte("test file contents\n")
|
||||||
fc.addFile(name, 0644, protocol.FileInfoTypeFile, contents)
|
fc.addFile(name, 0o644, protocol.FileInfoTypeFile, contents)
|
||||||
|
|
||||||
// Third device ignoring the same file
|
// Third device ignoring the same file
|
||||||
fc.mut.Lock()
|
fc.mut.Lock()
|
||||||
|
@ -154,7 +154,7 @@ func newModel(t testing.TB, cfg config.Wrapper, id protocol.DeviceID, clientName
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
m := NewModel(cfg, id, clientName, clientVersion, ldb, protectedFiles, evLogger).(*model)
|
m := NewModel(cfg, id, clientName, clientVersion, ldb, protectedFiles, evLogger, protocol.NewKeyGenerator()).(*model)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
go evLogger.Serve(ctx)
|
go evLogger.Serve(ctx)
|
||||||
return &testModel{
|
return &testModel{
|
||||||
|
@ -60,9 +60,9 @@ func benchmarkRequestsTLS(b *testing.B, conn0, conn1 net.Conn) {
|
|||||||
|
|
||||||
func benchmarkRequestsConnPair(b *testing.B, conn0, conn1 net.Conn) {
|
func benchmarkRequestsConnPair(b *testing.B, conn0, conn1 net.Conn) {
|
||||||
// Start up Connections on them
|
// Start up Connections on them
|
||||||
c0 := NewConnection(LocalDeviceID, conn0, conn0, testutils.NoopCloser{}, new(fakeModel), new(mockedConnectionInfo), CompressionMetadata, nil)
|
c0 := NewConnection(LocalDeviceID, conn0, conn0, testutils.NoopCloser{}, new(fakeModel), new(mockedConnectionInfo), CompressionMetadata, nil, testKeyGen)
|
||||||
c0.Start()
|
c0.Start()
|
||||||
c1 := NewConnection(LocalDeviceID, conn1, conn1, testutils.NoopCloser{}, new(fakeModel), new(mockedConnectionInfo), CompressionMetadata, nil)
|
c1 := NewConnection(LocalDeviceID, conn1, conn1, testutils.NoopCloser{}, new(fakeModel), new(mockedConnectionInfo), CompressionMetadata, nil, testKeyGen)
|
||||||
c1.Start()
|
c1.Start()
|
||||||
|
|
||||||
// Satisfy the assertions in the protocol by sending an initial cluster config
|
// Satisfy the assertions in the protocol by sending an initial cluster config
|
||||||
|
@ -17,6 +17,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/gogo/protobuf/proto"
|
"github.com/gogo/protobuf/proto"
|
||||||
|
lru "github.com/hashicorp/golang-lru/v2"
|
||||||
"github.com/miscreant/miscreant.go"
|
"github.com/miscreant/miscreant.go"
|
||||||
"github.com/syncthing/syncthing/lib/rand"
|
"github.com/syncthing/syncthing/lib/rand"
|
||||||
"github.com/syncthing/syncthing/lib/sha256"
|
"github.com/syncthing/syncthing/lib/sha256"
|
||||||
@ -34,6 +35,8 @@ const (
|
|||||||
maxPathComponent = 200 // characters
|
maxPathComponent = 200 // characters
|
||||||
encryptedDirExtension = ".syncthing-enc" // for top level dirs
|
encryptedDirExtension = ".syncthing-enc" // for top level dirs
|
||||||
miscreantAlgo = "AES-SIV"
|
miscreantAlgo = "AES-SIV"
|
||||||
|
folderKeyCacheEntries = 1000
|
||||||
|
fileKeyCacheEntries = 5000
|
||||||
)
|
)
|
||||||
|
|
||||||
// The encryptedModel sits between the encrypted device and the model. It
|
// The encryptedModel sits between the encrypted device and the model. It
|
||||||
@ -42,12 +45,21 @@ const (
|
|||||||
type encryptedModel struct {
|
type encryptedModel struct {
|
||||||
model Model
|
model Model
|
||||||
folderKeys *folderKeyRegistry
|
folderKeys *folderKeyRegistry
|
||||||
|
keyGen *KeyGenerator
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEncryptedModel(model Model, folderKeys *folderKeyRegistry, keyGen *KeyGenerator) encryptedModel {
|
||||||
|
return encryptedModel{
|
||||||
|
model: model,
|
||||||
|
folderKeys: folderKeys,
|
||||||
|
keyGen: keyGen,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e encryptedModel) Index(deviceID DeviceID, folder string, files []FileInfo) error {
|
func (e encryptedModel) Index(deviceID DeviceID, folder string, files []FileInfo) error {
|
||||||
if folderKey, ok := e.folderKeys.get(folder); ok {
|
if folderKey, ok := e.folderKeys.get(folder); ok {
|
||||||
// incoming index data to be decrypted
|
// incoming index data to be decrypted
|
||||||
if err := decryptFileInfos(files, folderKey); err != nil {
|
if err := decryptFileInfos(e.keyGen, files, folderKey); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -57,7 +69,7 @@ func (e encryptedModel) Index(deviceID DeviceID, folder string, files []FileInfo
|
|||||||
func (e encryptedModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) error {
|
func (e encryptedModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) error {
|
||||||
if folderKey, ok := e.folderKeys.get(folder); ok {
|
if folderKey, ok := e.folderKeys.get(folder); ok {
|
||||||
// incoming index data to be decrypted
|
// incoming index data to be decrypted
|
||||||
if err := decryptFileInfos(files, folderKey); err != nil {
|
if err := decryptFileInfos(e.keyGen, files, folderKey); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -86,7 +98,7 @@ func (e encryptedModel) Request(deviceID DeviceID, folder, name string, blockNo,
|
|||||||
|
|
||||||
// Decrypt the block hash.
|
// Decrypt the block hash.
|
||||||
|
|
||||||
fileKey := FileKey(realName, folderKey)
|
fileKey := e.keyGen.FileKey(realName, folderKey)
|
||||||
var additional [8]byte
|
var additional [8]byte
|
||||||
binary.BigEndian.PutUint64(additional[:], uint64(realOffset))
|
binary.BigEndian.PutUint64(additional[:], uint64(realOffset))
|
||||||
realHash, err := decryptDeterministic(hash, fileKey, additional[:])
|
realHash, err := decryptDeterministic(hash, fileKey, additional[:])
|
||||||
@ -145,6 +157,16 @@ type encryptedConnection struct {
|
|||||||
ConnectionInfo
|
ConnectionInfo
|
||||||
conn *rawConnection
|
conn *rawConnection
|
||||||
folderKeys *folderKeyRegistry
|
folderKeys *folderKeyRegistry
|
||||||
|
keyGen *KeyGenerator
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEncryptedConnection(ci ConnectionInfo, conn *rawConnection, folderKeys *folderKeyRegistry, keyGen *KeyGenerator) encryptedConnection {
|
||||||
|
return encryptedConnection{
|
||||||
|
ConnectionInfo: ci,
|
||||||
|
conn: conn,
|
||||||
|
folderKeys: folderKeys,
|
||||||
|
keyGen: keyGen,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e encryptedConnection) Start() {
|
func (e encryptedConnection) Start() {
|
||||||
@ -161,14 +183,14 @@ func (e encryptedConnection) ID() DeviceID {
|
|||||||
|
|
||||||
func (e encryptedConnection) Index(ctx context.Context, folder string, files []FileInfo) error {
|
func (e encryptedConnection) Index(ctx context.Context, folder string, files []FileInfo) error {
|
||||||
if folderKey, ok := e.folderKeys.get(folder); ok {
|
if folderKey, ok := e.folderKeys.get(folder); ok {
|
||||||
encryptFileInfos(files, folderKey)
|
encryptFileInfos(e.keyGen, files, folderKey)
|
||||||
}
|
}
|
||||||
return e.conn.Index(ctx, folder, files)
|
return e.conn.Index(ctx, folder, files)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e encryptedConnection) IndexUpdate(ctx context.Context, folder string, files []FileInfo) error {
|
func (e encryptedConnection) IndexUpdate(ctx context.Context, folder string, files []FileInfo) error {
|
||||||
if folderKey, ok := e.folderKeys.get(folder); ok {
|
if folderKey, ok := e.folderKeys.get(folder); ok {
|
||||||
encryptFileInfos(files, folderKey)
|
encryptFileInfos(e.keyGen, files, folderKey)
|
||||||
}
|
}
|
||||||
return e.conn.IndexUpdate(ctx, folder, files)
|
return e.conn.IndexUpdate(ctx, folder, files)
|
||||||
}
|
}
|
||||||
@ -200,7 +222,7 @@ func (e encryptedConnection) Request(ctx context.Context, folder string, name st
|
|||||||
|
|
||||||
// Return the decrypted block (or an error if it fails decryption)
|
// Return the decrypted block (or an error if it fails decryption)
|
||||||
|
|
||||||
fileKey := FileKey(name, folderKey)
|
fileKey := e.keyGen.FileKey(name, folderKey)
|
||||||
bs, err = DecryptBytes(bs, fileKey)
|
bs, err = DecryptBytes(bs, fileKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -232,16 +254,16 @@ func (e encryptedConnection) Statistics() Statistics {
|
|||||||
return e.conn.Statistics()
|
return e.conn.Statistics()
|
||||||
}
|
}
|
||||||
|
|
||||||
func encryptFileInfos(files []FileInfo, folderKey *[keySize]byte) {
|
func encryptFileInfos(keyGen *KeyGenerator, files []FileInfo, folderKey *[keySize]byte) {
|
||||||
for i, fi := range files {
|
for i, fi := range files {
|
||||||
files[i] = encryptFileInfo(fi, folderKey)
|
files[i] = encryptFileInfo(keyGen, fi, folderKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// encryptFileInfo encrypts a FileInfo and wraps it into a new fake FileInfo
|
// encryptFileInfo encrypts a FileInfo and wraps it into a new fake FileInfo
|
||||||
// with an encrypted name.
|
// with an encrypted name.
|
||||||
func encryptFileInfo(fi FileInfo, folderKey *[keySize]byte) FileInfo {
|
func encryptFileInfo(keyGen *KeyGenerator, fi FileInfo, folderKey *[keySize]byte) FileInfo {
|
||||||
fileKey := FileKey(fi.Name, folderKey)
|
fileKey := keyGen.FileKey(fi.Name, folderKey)
|
||||||
|
|
||||||
// The entire FileInfo is encrypted with a random nonce, and concatenated
|
// The entire FileInfo is encrypted with a random nonce, and concatenated
|
||||||
// with that nonce.
|
// with that nonce.
|
||||||
@ -319,7 +341,7 @@ func encryptFileInfo(fi FileInfo, folderKey *[keySize]byte) FileInfo {
|
|||||||
enc := FileInfo{
|
enc := FileInfo{
|
||||||
Name: encryptName(fi.Name, folderKey),
|
Name: encryptName(fi.Name, folderKey),
|
||||||
Type: typ,
|
Type: typ,
|
||||||
Permissions: 0644,
|
Permissions: 0o644,
|
||||||
ModifiedS: 1234567890, // Sat Feb 14 00:31:30 CET 2009
|
ModifiedS: 1234567890, // Sat Feb 14 00:31:30 CET 2009
|
||||||
Deleted: fi.Deleted,
|
Deleted: fi.Deleted,
|
||||||
RawInvalid: fi.IsInvalid(),
|
RawInvalid: fi.IsInvalid(),
|
||||||
@ -336,9 +358,9 @@ func encryptFileInfo(fi FileInfo, folderKey *[keySize]byte) FileInfo {
|
|||||||
return enc
|
return enc
|
||||||
}
|
}
|
||||||
|
|
||||||
func decryptFileInfos(files []FileInfo, folderKey *[keySize]byte) error {
|
func decryptFileInfos(keyGen *KeyGenerator, files []FileInfo, folderKey *[keySize]byte) error {
|
||||||
for i, fi := range files {
|
for i, fi := range files {
|
||||||
decFI, err := DecryptFileInfo(fi, folderKey)
|
decFI, err := DecryptFileInfo(keyGen, fi, folderKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -349,13 +371,13 @@ func decryptFileInfos(files []FileInfo, folderKey *[keySize]byte) error {
|
|||||||
|
|
||||||
// DecryptFileInfo extracts the encrypted portion of a FileInfo, decrypts it
|
// DecryptFileInfo extracts the encrypted portion of a FileInfo, decrypts it
|
||||||
// and returns that.
|
// and returns that.
|
||||||
func DecryptFileInfo(fi FileInfo, folderKey *[keySize]byte) (FileInfo, error) {
|
func DecryptFileInfo(keyGen *KeyGenerator, fi FileInfo, folderKey *[keySize]byte) (FileInfo, error) {
|
||||||
realName, err := decryptName(fi.Name, folderKey)
|
realName, err := decryptName(fi.Name, folderKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return FileInfo{}, err
|
return FileInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fileKey := FileKey(realName, folderKey)
|
fileKey := keyGen.FileKey(realName, folderKey)
|
||||||
dec, err := DecryptBytes(fi.Encrypted, fileKey)
|
dec, err := DecryptBytes(fi.Encrypted, fileKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return FileInfo{}, err
|
return FileInfo{}, err
|
||||||
@ -476,10 +498,10 @@ func randomNonce() *[nonceSize]byte {
|
|||||||
|
|
||||||
// keysFromPasswords converts a set of folder ID to password into a set of
|
// keysFromPasswords converts a set of folder ID to password into a set of
|
||||||
// folder ID to encryption key, using our key derivation function.
|
// folder ID to encryption key, using our key derivation function.
|
||||||
func keysFromPasswords(passwords map[string]string) map[string]*[keySize]byte {
|
func keysFromPasswords(keyGen *KeyGenerator, passwords map[string]string) map[string]*[keySize]byte {
|
||||||
res := make(map[string]*[keySize]byte, len(passwords))
|
res := make(map[string]*[keySize]byte, len(passwords))
|
||||||
for folder, password := range passwords {
|
for folder, password := range passwords {
|
||||||
res[folder] = KeyFromPassword(folder, password)
|
res[folder] = keyGen.KeyFromPassword(folder, password)
|
||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
@ -488,9 +510,35 @@ func knownBytes(folderID string) []byte {
|
|||||||
return []byte("syncthing" + folderID)
|
return []byte("syncthing" + folderID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type KeyGenerator struct {
|
||||||
|
mut sync.Mutex
|
||||||
|
folderKeys *lru.TwoQueueCache[folderKeyCacheKey, *[keySize]byte]
|
||||||
|
fileKeys *lru.TwoQueueCache[fileKeyCacheKey, *[keySize]byte]
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewKeyGenerator() *KeyGenerator {
|
||||||
|
folderKeys, _ := lru.New2Q[folderKeyCacheKey, *[keySize]byte](folderKeyCacheEntries)
|
||||||
|
fileKeys, _ := lru.New2Q[fileKeyCacheKey, *[keySize]byte](fileKeyCacheEntries)
|
||||||
|
return &KeyGenerator{
|
||||||
|
folderKeys: folderKeys,
|
||||||
|
fileKeys: fileKeys,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type folderKeyCacheKey struct {
|
||||||
|
folderID string
|
||||||
|
password string
|
||||||
|
}
|
||||||
|
|
||||||
// KeyFromPassword uses key derivation to generate a stronger key from a
|
// KeyFromPassword uses key derivation to generate a stronger key from a
|
||||||
// probably weak password.
|
// probably weak password.
|
||||||
func KeyFromPassword(folderID, password string) *[keySize]byte {
|
func (g *KeyGenerator) KeyFromPassword(folderID, password string) *[keySize]byte {
|
||||||
|
cacheKey := folderKeyCacheKey{folderID, password}
|
||||||
|
g.mut.Lock()
|
||||||
|
defer g.mut.Unlock()
|
||||||
|
if key, ok := g.folderKeys.Get(cacheKey); ok {
|
||||||
|
return key
|
||||||
|
}
|
||||||
bs, err := scrypt.Key([]byte(password), knownBytes(folderID), 32768, 8, 1, keySize)
|
bs, err := scrypt.Key([]byte(password), knownBytes(folderID), 32768, 8, 1, keySize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("key derivation failure: " + err.Error())
|
panic("key derivation failure: " + err.Error())
|
||||||
@ -500,23 +548,36 @@ func KeyFromPassword(folderID, password string) *[keySize]byte {
|
|||||||
}
|
}
|
||||||
var key [keySize]byte
|
var key [keySize]byte
|
||||||
copy(key[:], bs)
|
copy(key[:], bs)
|
||||||
|
g.folderKeys.Add(cacheKey, &key)
|
||||||
return &key
|
return &key
|
||||||
}
|
}
|
||||||
|
|
||||||
var hkdfSalt = []byte("syncthing")
|
var hkdfSalt = []byte("syncthing")
|
||||||
|
|
||||||
func FileKey(filename string, folderKey *[keySize]byte) *[keySize]byte {
|
type fileKeyCacheKey struct {
|
||||||
|
file string
|
||||||
|
key [keySize]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *KeyGenerator) FileKey(filename string, folderKey *[keySize]byte) *[keySize]byte {
|
||||||
|
g.mut.Lock()
|
||||||
|
defer g.mut.Unlock()
|
||||||
|
cacheKey := fileKeyCacheKey{filename, *folderKey}
|
||||||
|
if key, ok := g.fileKeys.Get(cacheKey); ok {
|
||||||
|
return key
|
||||||
|
}
|
||||||
kdf := hkdf.New(sha256.New, append(folderKey[:], filename...), hkdfSalt, nil)
|
kdf := hkdf.New(sha256.New, append(folderKey[:], filename...), hkdfSalt, nil)
|
||||||
var fileKey [keySize]byte
|
var fileKey [keySize]byte
|
||||||
n, err := io.ReadFull(kdf, fileKey[:])
|
n, err := io.ReadFull(kdf, fileKey[:])
|
||||||
if err != nil || n != keySize {
|
if err != nil || n != keySize {
|
||||||
panic("hkdf failure")
|
panic("hkdf failure")
|
||||||
}
|
}
|
||||||
|
g.fileKeys.Add(cacheKey, &fileKey)
|
||||||
return &fileKey
|
return &fileKey
|
||||||
}
|
}
|
||||||
|
|
||||||
func PasswordToken(folderID, password string) []byte {
|
func PasswordToken(keyGen *KeyGenerator, folderID, password string) []byte {
|
||||||
return encryptDeterministic(knownBytes(folderID), KeyFromPassword(folderID, password), nil)
|
return encryptDeterministic(knownBytes(folderID), keyGen.KeyFromPassword(folderID, password), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// slashify inserts slashes (and file extension) in the string to create an
|
// slashify inserts slashes (and file extension) in the string to create an
|
||||||
@ -593,13 +654,15 @@ func IsEncryptedParent(pathComponents []string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type folderKeyRegistry struct {
|
type folderKeyRegistry struct {
|
||||||
|
keyGen *KeyGenerator
|
||||||
keys map[string]*[keySize]byte // folder ID -> key
|
keys map[string]*[keySize]byte // folder ID -> key
|
||||||
mut sync.RWMutex
|
mut sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFolderKeyRegistry(passwords map[string]string) *folderKeyRegistry {
|
func newFolderKeyRegistry(keyGen *KeyGenerator, passwords map[string]string) *folderKeyRegistry {
|
||||||
return &folderKeyRegistry{
|
return &folderKeyRegistry{
|
||||||
keys: keysFromPasswords(passwords),
|
keyGen: keyGen,
|
||||||
|
keys: keysFromPasswords(keyGen, passwords),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -612,6 +675,6 @@ func (r *folderKeyRegistry) get(folder string) (*[keySize]byte, bool) {
|
|||||||
|
|
||||||
func (r *folderKeyRegistry) setPasswords(passwords map[string]string) {
|
func (r *folderKeyRegistry) setPasswords(passwords map[string]string) {
|
||||||
r.mut.Lock()
|
r.mut.Lock()
|
||||||
r.keys = keysFromPasswords(passwords)
|
r.keys = keysFromPasswords(r.keyGen, passwords)
|
||||||
r.mut.Unlock()
|
r.mut.Unlock()
|
||||||
}
|
}
|
||||||
|
@ -12,13 +12,13 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/rand"
|
"github.com/syncthing/syncthing/lib/rand"
|
||||||
"github.com/syncthing/syncthing/lib/sha256"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var testKeyGen = NewKeyGenerator()
|
||||||
|
|
||||||
func TestEnDecryptName(t *testing.T) {
|
func TestEnDecryptName(t *testing.T) {
|
||||||
pattern := regexp.MustCompile(
|
pattern := regexp.MustCompile(
|
||||||
fmt.Sprintf("^[0-9A-V]%s/[0-9A-V]{2}/([0-9A-V]{%d}/)*[0-9A-V]{1,%d}$",
|
fmt.Sprintf("^[0-9A-V]%s/[0-9A-V]{2}/([0-9A-V]{%d}/)*[0-9A-V]{1,%d}$",
|
||||||
@ -72,13 +72,13 @@ func TestEnDecryptName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyDerivation(t *testing.T) {
|
func TestKeyDerivation(t *testing.T) {
|
||||||
folderKey := KeyFromPassword("my folder", "my password")
|
folderKey := testKeyGen.KeyFromPassword("my folder", "my password")
|
||||||
encryptedName := encryptDeterministic([]byte("filename.txt"), folderKey, nil)
|
encryptedName := encryptDeterministic([]byte("filename.txt"), folderKey, nil)
|
||||||
if base32Hex.EncodeToString(encryptedName) != "3T5957I4IOA20VEIEER6JSQG0PEPIRV862II3K7LOF75Q" {
|
if base32Hex.EncodeToString(encryptedName) != "3T5957I4IOA20VEIEER6JSQG0PEPIRV862II3K7LOF75Q" {
|
||||||
t.Error("encrypted name mismatch")
|
t.Error("encrypted name mismatch")
|
||||||
}
|
}
|
||||||
|
|
||||||
fileKey := FileKey("filename.txt", folderKey)
|
fileKey := testKeyGen.FileKey("filename.txt", folderKey)
|
||||||
// fmt.Println(base32Hex.EncodeToString(encryptBytes([]byte("hello world"), fileKey))) => A1IPD...
|
// fmt.Println(base32Hex.EncodeToString(encryptBytes([]byte("hello world"), fileKey))) => A1IPD...
|
||||||
const encrypted = `A1IPD28ISL7VNPRSSSQM2L31L3IJPC08283RO89J5UG0TI9P38DO9RFGK12DK0KD7PKQP6U51UL2B6H96O`
|
const encrypted = `A1IPD28ISL7VNPRSSSQM2L31L3IJPC08283RO89J5UG0TI9P38DO9RFGK12DK0KD7PKQP6U51UL2B6H96O`
|
||||||
bs, _ := base32Hex.DecodeString(encrypted)
|
bs, _ := base32Hex.DecodeString(encrypted)
|
||||||
@ -137,7 +137,7 @@ func encFileInfo() FileInfo {
|
|||||||
return FileInfo{
|
return FileInfo{
|
||||||
Name: "hello",
|
Name: "hello",
|
||||||
Size: 45,
|
Size: 45,
|
||||||
Permissions: 0755,
|
Permissions: 0o755,
|
||||||
ModifiedS: 8080,
|
ModifiedS: 8080,
|
||||||
Sequence: 1000,
|
Sequence: 1000,
|
||||||
Blocks: []BlockInfo{
|
Blocks: []BlockInfo{
|
||||||
@ -159,7 +159,7 @@ func TestEnDecryptFileInfo(t *testing.T) {
|
|||||||
var key [32]byte
|
var key [32]byte
|
||||||
fi := encFileInfo()
|
fi := encFileInfo()
|
||||||
|
|
||||||
enc := encryptFileInfo(fi, &key)
|
enc := encryptFileInfo(testKeyGen, fi, &key)
|
||||||
if bytes.Equal(enc.Blocks[0].Hash, enc.Blocks[1].Hash) {
|
if bytes.Equal(enc.Blocks[0].Hash, enc.Blocks[1].Hash) {
|
||||||
t.Error("block hashes should not repeat when on different offsets")
|
t.Error("block hashes should not repeat when on different offsets")
|
||||||
}
|
}
|
||||||
@ -169,7 +169,7 @@ func TestEnDecryptFileInfo(t *testing.T) {
|
|||||||
if enc.Sequence != fi.Sequence {
|
if enc.Sequence != fi.Sequence {
|
||||||
t.Error("encrypted fileinfo didn't maintain sequence number")
|
t.Error("encrypted fileinfo didn't maintain sequence number")
|
||||||
}
|
}
|
||||||
again := encryptFileInfo(fi, &key)
|
again := encryptFileInfo(testKeyGen, fi, &key)
|
||||||
if !bytes.Equal(enc.Blocks[0].Hash, again.Blocks[0].Hash) {
|
if !bytes.Equal(enc.Blocks[0].Hash, again.Blocks[0].Hash) {
|
||||||
t.Error("block hashes should remain stable (0)")
|
t.Error("block hashes should remain stable (0)")
|
||||||
}
|
}
|
||||||
@ -180,7 +180,7 @@ func TestEnDecryptFileInfo(t *testing.T) {
|
|||||||
// Simulate the remote setting the sequence number when writing to db
|
// Simulate the remote setting the sequence number when writing to db
|
||||||
enc.Sequence = 10
|
enc.Sequence = 10
|
||||||
|
|
||||||
dec, err := DecryptFileInfo(enc, &key)
|
dec, err := DecryptFileInfo(testKeyGen, enc, &key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
@ -201,7 +201,7 @@ func TestEncryptedFileInfoConsistency(t *testing.T) {
|
|||||||
}
|
}
|
||||||
files[1].SetIgnored()
|
files[1].SetIgnored()
|
||||||
for i, f := range files {
|
for i, f := range files {
|
||||||
enc := encryptFileInfo(f, &key)
|
enc := encryptFileInfo(testKeyGen, f, &key)
|
||||||
if err := checkFileInfoConsistency(enc); err != nil {
|
if err := checkFileInfoConsistency(enc); err != nil {
|
||||||
t.Errorf("%v: %v", i, err)
|
t.Errorf("%v: %v", i, err)
|
||||||
}
|
}
|
||||||
@ -235,22 +235,3 @@ func TestIsEncryptedParent(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var benchmarkFileKey struct {
|
|
||||||
key [keySize]byte
|
|
||||||
sync.Once
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkFileKey(b *testing.B) {
|
|
||||||
benchmarkFileKey.Do(func() {
|
|
||||||
sha256.SelectAlgo()
|
|
||||||
rand.Read(benchmarkFileKey.key[:])
|
|
||||||
})
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
b.ReportAllocs()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
FileKey("a_kind_of_long_filename.ext", &benchmarkFileKey.key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -63,9 +63,7 @@ var sha256OfEmptyBlock = map[int][sha256.Size]byte{
|
|||||||
16 << MiB: {0x8, 0xa, 0xcf, 0x35, 0xa5, 0x7, 0xac, 0x98, 0x49, 0xcf, 0xcb, 0xa4, 0x7d, 0xc2, 0xad, 0x83, 0xe0, 0x1b, 0x75, 0x66, 0x3a, 0x51, 0x62, 0x79, 0xc8, 0xb9, 0xd2, 0x43, 0xb7, 0x19, 0x64, 0x3e},
|
16 << MiB: {0x8, 0xa, 0xcf, 0x35, 0xa5, 0x7, 0xac, 0x98, 0x49, 0xcf, 0xcb, 0xa4, 0x7d, 0xc2, 0xad, 0x83, 0xe0, 0x1b, 0x75, 0x66, 0x3a, 0x51, 0x62, 0x79, 0xc8, 0xb9, 0xd2, 0x43, 0xb7, 0x19, 0x64, 0x3e},
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var errNotCompressible = errors.New("not compressible")
|
||||||
errNotCompressible = errors.New("not compressible")
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
for blockSize := MinBlockSize; blockSize <= MaxBlockSize; blockSize *= 2 {
|
for blockSize := MinBlockSize; blockSize <= MaxBlockSize; blockSize *= 2 {
|
||||||
@ -231,16 +229,16 @@ const (
|
|||||||
// Should not be modified in production code, just for testing.
|
// Should not be modified in production code, just for testing.
|
||||||
var CloseTimeout = 10 * time.Second
|
var CloseTimeout = 10 * time.Second
|
||||||
|
|
||||||
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, closer io.Closer, receiver Model, connInfo ConnectionInfo, compress Compression, passwords map[string]string) Connection {
|
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, closer io.Closer, receiver Model, connInfo ConnectionInfo, compress Compression, passwords map[string]string, keyGen *KeyGenerator) Connection {
|
||||||
// Encryption / decryption is first (outermost) before conversion to
|
// Encryption / decryption is first (outermost) before conversion to
|
||||||
// native path formats.
|
// native path formats.
|
||||||
nm := makeNative(receiver)
|
nm := makeNative(receiver)
|
||||||
em := &encryptedModel{model: nm, folderKeys: newFolderKeyRegistry(passwords)}
|
em := newEncryptedModel(nm, newFolderKeyRegistry(keyGen, passwords), keyGen)
|
||||||
|
|
||||||
// We do the wire format conversion first (outermost) so that the
|
// We do the wire format conversion first (outermost) so that the
|
||||||
// metadata is in wire format when it reaches the encryption step.
|
// metadata is in wire format when it reaches the encryption step.
|
||||||
rc := newRawConnection(deviceID, reader, writer, closer, em, connInfo, compress)
|
rc := newRawConnection(deviceID, reader, writer, closer, em, connInfo, compress)
|
||||||
ec := encryptedConnection{ConnectionInfo: rc, conn: rc, folderKeys: em.folderKeys}
|
ec := newEncryptedConnection(rc, rc, em.folderKeys, keyGen)
|
||||||
wc := wireFormatConnection{ec}
|
wc := wireFormatConnection{ec}
|
||||||
|
|
||||||
return wc
|
return wc
|
||||||
|
@ -32,10 +32,10 @@ func TestPing(t *testing.T) {
|
|||||||
ar, aw := io.Pipe()
|
ar, aw := io.Pipe()
|
||||||
br, bw := io.Pipe()
|
br, bw := io.Pipe()
|
||||||
|
|
||||||
c0 := getRawConnection(NewConnection(c0ID, ar, bw, testutils.NoopCloser{}, newTestModel(), new(mockedConnectionInfo), CompressionAlways, nil))
|
c0 := getRawConnection(NewConnection(c0ID, ar, bw, testutils.NoopCloser{}, newTestModel(), new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||||
c0.Start()
|
c0.Start()
|
||||||
defer closeAndWait(c0, ar, bw)
|
defer closeAndWait(c0, ar, bw)
|
||||||
c1 := getRawConnection(NewConnection(c1ID, br, aw, testutils.NoopCloser{}, newTestModel(), new(mockedConnectionInfo), CompressionAlways, nil))
|
c1 := getRawConnection(NewConnection(c1ID, br, aw, testutils.NoopCloser{}, newTestModel(), new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||||
c1.Start()
|
c1.Start()
|
||||||
defer closeAndWait(c1, ar, bw)
|
defer closeAndWait(c1, ar, bw)
|
||||||
c0.ClusterConfig(ClusterConfig{})
|
c0.ClusterConfig(ClusterConfig{})
|
||||||
@ -58,10 +58,10 @@ func TestClose(t *testing.T) {
|
|||||||
ar, aw := io.Pipe()
|
ar, aw := io.Pipe()
|
||||||
br, bw := io.Pipe()
|
br, bw := io.Pipe()
|
||||||
|
|
||||||
c0 := getRawConnection(NewConnection(c0ID, ar, bw, testutils.NoopCloser{}, m0, new(mockedConnectionInfo), CompressionAlways, nil))
|
c0 := getRawConnection(NewConnection(c0ID, ar, bw, testutils.NoopCloser{}, m0, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||||
c0.Start()
|
c0.Start()
|
||||||
defer closeAndWait(c0, ar, bw)
|
defer closeAndWait(c0, ar, bw)
|
||||||
c1 := NewConnection(c1ID, br, aw, testutils.NoopCloser{}, m1, new(mockedConnectionInfo), CompressionAlways, nil)
|
c1 := NewConnection(c1ID, br, aw, testutils.NoopCloser{}, m1, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen)
|
||||||
c1.Start()
|
c1.Start()
|
||||||
defer closeAndWait(c1, ar, bw)
|
defer closeAndWait(c1, ar, bw)
|
||||||
c0.ClusterConfig(ClusterConfig{})
|
c0.ClusterConfig(ClusterConfig{})
|
||||||
@ -103,7 +103,7 @@ func TestCloseOnBlockingSend(t *testing.T) {
|
|||||||
m := newTestModel()
|
m := newTestModel()
|
||||||
|
|
||||||
rw := testutils.NewBlockingRW()
|
rw := testutils.NewBlockingRW()
|
||||||
c := getRawConnection(NewConnection(c0ID, rw, rw, testutils.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil))
|
c := getRawConnection(NewConnection(c0ID, rw, rw, testutils.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||||
c.Start()
|
c.Start()
|
||||||
defer closeAndWait(c, rw)
|
defer closeAndWait(c, rw)
|
||||||
|
|
||||||
@ -154,10 +154,10 @@ func TestCloseRace(t *testing.T) {
|
|||||||
ar, aw := io.Pipe()
|
ar, aw := io.Pipe()
|
||||||
br, bw := io.Pipe()
|
br, bw := io.Pipe()
|
||||||
|
|
||||||
c0 := getRawConnection(NewConnection(c0ID, ar, bw, testutils.NoopCloser{}, m0, new(mockedConnectionInfo), CompressionNever, nil))
|
c0 := getRawConnection(NewConnection(c0ID, ar, bw, testutils.NoopCloser{}, m0, new(mockedConnectionInfo), CompressionNever, nil, testKeyGen))
|
||||||
c0.Start()
|
c0.Start()
|
||||||
defer closeAndWait(c0, ar, bw)
|
defer closeAndWait(c0, ar, bw)
|
||||||
c1 := NewConnection(c1ID, br, aw, testutils.NoopCloser{}, m1, new(mockedConnectionInfo), CompressionNever, nil)
|
c1 := NewConnection(c1ID, br, aw, testutils.NoopCloser{}, m1, new(mockedConnectionInfo), CompressionNever, nil, testKeyGen)
|
||||||
c1.Start()
|
c1.Start()
|
||||||
defer closeAndWait(c1, ar, bw)
|
defer closeAndWait(c1, ar, bw)
|
||||||
c0.ClusterConfig(ClusterConfig{})
|
c0.ClusterConfig(ClusterConfig{})
|
||||||
@ -194,7 +194,7 @@ func TestClusterConfigFirst(t *testing.T) {
|
|||||||
m := newTestModel()
|
m := newTestModel()
|
||||||
|
|
||||||
rw := testutils.NewBlockingRW()
|
rw := testutils.NewBlockingRW()
|
||||||
c := getRawConnection(NewConnection(c0ID, rw, &testutils.NoopRW{}, testutils.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil))
|
c := getRawConnection(NewConnection(c0ID, rw, &testutils.NoopRW{}, testutils.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||||
c.Start()
|
c.Start()
|
||||||
defer closeAndWait(c, rw)
|
defer closeAndWait(c, rw)
|
||||||
|
|
||||||
@ -246,7 +246,7 @@ func TestCloseTimeout(t *testing.T) {
|
|||||||
m := newTestModel()
|
m := newTestModel()
|
||||||
|
|
||||||
rw := testutils.NewBlockingRW()
|
rw := testutils.NewBlockingRW()
|
||||||
c := getRawConnection(NewConnection(c0ID, rw, rw, testutils.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil))
|
c := getRawConnection(NewConnection(c0ID, rw, rw, testutils.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||||
c.Start()
|
c.Start()
|
||||||
defer closeAndWait(c, rw)
|
defer closeAndWait(c, rw)
|
||||||
|
|
||||||
@ -432,8 +432,8 @@ func testMarshal(t *testing.T, prefix string, m1, m2 message) bool {
|
|||||||
bs1, _ := json.MarshalIndent(m1, "", " ")
|
bs1, _ := json.MarshalIndent(m1, "", " ")
|
||||||
bs2, _ := json.MarshalIndent(m2, "", " ")
|
bs2, _ := json.MarshalIndent(m2, "", " ")
|
||||||
if !bytes.Equal(bs1, bs2) {
|
if !bytes.Equal(bs1, bs2) {
|
||||||
os.WriteFile(prefix+"-1.txt", bs1, 0644)
|
os.WriteFile(prefix+"-1.txt", bs1, 0o644)
|
||||||
os.WriteFile(prefix+"-2.txt", bs2, 0644)
|
os.WriteFile(prefix+"-2.txt", bs2, 0o644)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -794,16 +794,16 @@ func TestIsEquivalent(t *testing.T) {
|
|||||||
|
|
||||||
// Difference in permissions is not OK.
|
// Difference in permissions is not OK.
|
||||||
{
|
{
|
||||||
a: FileInfo{Permissions: 0444},
|
a: FileInfo{Permissions: 0o444},
|
||||||
b: FileInfo{Permissions: 0666},
|
b: FileInfo{Permissions: 0o666},
|
||||||
ignPerms: b(false),
|
ignPerms: b(false),
|
||||||
eq: false,
|
eq: false,
|
||||||
},
|
},
|
||||||
|
|
||||||
// ... unless we say it is
|
// ... unless we say it is
|
||||||
{
|
{
|
||||||
a: FileInfo{Permissions: 0666},
|
a: FileInfo{Permissions: 0o666},
|
||||||
b: FileInfo{Permissions: 0444},
|
b: FileInfo{Permissions: 0o444},
|
||||||
ignPerms: b(true),
|
ignPerms: b(true),
|
||||||
eq: true,
|
eq: true,
|
||||||
},
|
},
|
||||||
@ -852,8 +852,8 @@ func TestIsEquivalent(t *testing.T) {
|
|||||||
// On windows we only check the user writable bit of the permission
|
// On windows we only check the user writable bit of the permission
|
||||||
// set, so these are equivalent.
|
// set, so these are equivalent.
|
||||||
cases = append(cases, testCase{
|
cases = append(cases, testCase{
|
||||||
a: FileInfo{Permissions: 0777},
|
a: FileInfo{Permissions: 0o777},
|
||||||
b: FileInfo{Permissions: 0600},
|
b: FileInfo{Permissions: 0o600},
|
||||||
ignPerms: b(false),
|
ignPerms: b(false),
|
||||||
eq: true,
|
eq: true,
|
||||||
})
|
})
|
||||||
@ -899,7 +899,7 @@ func TestClusterConfigAfterClose(t *testing.T) {
|
|||||||
m := newTestModel()
|
m := newTestModel()
|
||||||
|
|
||||||
rw := testutils.NewBlockingRW()
|
rw := testutils.NewBlockingRW()
|
||||||
c := getRawConnection(NewConnection(c0ID, rw, rw, testutils.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil))
|
c := getRawConnection(NewConnection(c0ID, rw, rw, testutils.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||||
c.Start()
|
c.Start()
|
||||||
defer closeAndWait(c, rw)
|
defer closeAndWait(c, rw)
|
||||||
|
|
||||||
@ -923,7 +923,7 @@ func TestDispatcherToCloseDeadlock(t *testing.T) {
|
|||||||
// the model callbacks (ClusterConfig).
|
// the model callbacks (ClusterConfig).
|
||||||
m := newTestModel()
|
m := newTestModel()
|
||||||
rw := testutils.NewBlockingRW()
|
rw := testutils.NewBlockingRW()
|
||||||
c := getRawConnection(NewConnection(c0ID, rw, &testutils.NoopRW{}, testutils.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil))
|
c := getRawConnection(NewConnection(c0ID, rw, &testutils.NoopRW{}, testutils.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||||
m.ccFn = func(devID DeviceID, cc ClusterConfig) {
|
m.ccFn = func(devID DeviceID, cc ClusterConfig) {
|
||||||
c.Close(errManual)
|
c.Close(errManual)
|
||||||
}
|
}
|
||||||
|
@ -248,7 +248,8 @@ func (a *App) startup() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
m := model.NewModel(a.cfg, a.myID, "syncthing", build.Version, a.ll, protectedFiles, a.evLogger)
|
keyGen := protocol.NewKeyGenerator()
|
||||||
|
m := model.NewModel(a.cfg, a.myID, "syncthing", build.Version, a.ll, protectedFiles, a.evLogger, keyGen)
|
||||||
|
|
||||||
if a.opts.DeadlockTimeoutS > 0 {
|
if a.opts.DeadlockTimeoutS > 0 {
|
||||||
m.StartDeadlockDetector(time.Duration(a.opts.DeadlockTimeoutS) * time.Second)
|
m.StartDeadlockDetector(time.Duration(a.opts.DeadlockTimeoutS) * time.Second)
|
||||||
@ -283,7 +284,7 @@ func (a *App) startup() error {
|
|||||||
|
|
||||||
connRegistry := registry.New()
|
connRegistry := registry.New()
|
||||||
discoveryManager := discover.NewManager(a.myID, a.cfg, a.cert, a.evLogger, addrLister, connRegistry)
|
discoveryManager := discover.NewManager(a.myID, a.cfg, a.cert, a.evLogger, addrLister, connRegistry)
|
||||||
connectionsService := connections.NewService(a.cfg, a.myID, m, tlsCfg, discoveryManager, bepProtocolName, tlsDefaultCommonName, a.evLogger, connRegistry)
|
connectionsService := connections.NewService(a.cfg, a.myID, m, tlsCfg, discoveryManager, bepProtocolName, tlsDefaultCommonName, a.evLogger, connRegistry, keyGen)
|
||||||
|
|
||||||
addrLister.AddressLister = connectionsService
|
addrLister.AddressLister = connectionsService
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user