lib/protocol: Further interface refactor (#9396)

This is a symmetric change to #9375 -- where that PR changed the
protocol->model interface, this changes the model->protocol one.
This commit is contained in:
Jakob Borg 2024-08-24 12:45:10 +02:00 committed by GitHub
parent 7df75e681d
commit 5342bec1b7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 170 additions and 221 deletions

View File

@ -30,8 +30,8 @@ func newFakeConnection(id protocol.DeviceID, model Model) *fakeConnection {
model: model, model: model,
closed: make(chan struct{}), closed: make(chan struct{}),
} }
f.RequestCalls(func(ctx context.Context, folder, name string, blockNo int, offset int64, size int, hash []byte, weakHash uint32, fromTemporary bool) ([]byte, error) { f.RequestCalls(func(ctx context.Context, req *protocol.Request) ([]byte, error) {
return f.fileData[name], nil return f.fileData[req.Name], nil
}) })
f.DeviceIDReturns(id) f.DeviceIDReturns(id)
f.ConnectionIDReturns(rand.String(16)) f.ConnectionIDReturns(rand.String(16))
@ -60,14 +60,16 @@ type fakeConnection struct {
} }
func (f *fakeConnection) setIndexFn(fn func(_ context.Context, folder string, fs []protocol.FileInfo) error) { func (f *fakeConnection) setIndexFn(fn func(_ context.Context, folder string, fs []protocol.FileInfo) error) {
f.IndexCalls(fn) f.IndexCalls(func(ctx context.Context, idx *protocol.Index) error { return fn(ctx, idx.Folder, idx.Files) })
f.IndexUpdateCalls(fn) f.IndexUpdateCalls(func(ctx context.Context, idxUp *protocol.IndexUpdate) error {
return fn(ctx, idxUp.Folder, idxUp.Files)
})
} }
func (f *fakeConnection) DownloadProgress(_ context.Context, folder string, updates []protocol.FileDownloadProgressUpdate) { func (f *fakeConnection) DownloadProgress(_ context.Context, dp *protocol.DownloadProgress) {
f.downloadProgressMessages = append(f.downloadProgressMessages, downloadProgressMessage{ f.downloadProgressMessages = append(f.downloadProgressMessages, downloadProgressMessage{
folder: folder, folder: dp.Folder,
updates: updates, updates: dp.Updates,
}) })
} }

View File

@ -228,9 +228,9 @@ func (s *indexHandler) sendIndexTo(ctx context.Context, fset *db.FileSet) error
l.Debugf("%v: Sending %d files (<%d bytes)", s, len(fs), batch.Size()) l.Debugf("%v: Sending %d files (<%d bytes)", s, len(fs), batch.Size())
if initial { if initial {
initial = false initial = false
return s.conn.Index(ctx, s.folder, fs) return s.conn.Index(ctx, &protocol.Index{Folder: s.folder, Files: fs})
} }
return s.conn.IndexUpdate(ctx, s.folder, fs) return s.conn.IndexUpdate(ctx, &protocol.IndexUpdate{Folder: s.folder, Files: fs})
}) })
var err error var err error

View File

@ -40,10 +40,10 @@ func TestIndexhandlerConcurrency(t *testing.T) {
c2.Start() c2.Start()
defer c2.Close(io.EOF) defer c2.Close(io.EOF)
c1.ClusterConfig(protocol.ClusterConfig{}) c1.ClusterConfig(&protocol.ClusterConfig{})
c2.ClusterConfig(protocol.ClusterConfig{}) c2.ClusterConfig(&protocol.ClusterConfig{})
c1.Index(ctx, "foo", nil) c1.Index(ctx, &protocol.Index{Folder: "foo"})
c2.Index(ctx, "foo", nil) c2.Index(ctx, &protocol.Index{Folder: "foo"})
const msgs = 5e2 const msgs = 5e2
const files = 1e3 const files = 1e3
@ -64,7 +64,7 @@ func TestIndexhandlerConcurrency(t *testing.T) {
}) })
b1 := db.NewFileInfoBatch(func(fs []protocol.FileInfo) error { b1 := db.NewFileInfoBatch(func(fs []protocol.FileInfo) error {
return c1.IndexUpdate(ctx, "foo", fs) return c1.IndexUpdate(ctx, &protocol.IndexUpdate{Folder: "foo", Files: fs})
}) })
sentEntries := 0 sentEntries := 0
for i := 0; i < msgs; i++ { for i := 0; i < msgs; i++ {

View File

@ -2414,7 +2414,7 @@ func (m *model) promoteConnections() {
if conn.Statistics().StartedAt.IsZero() { if conn.Statistics().StartedAt.IsZero() {
conn.SetFolderPasswords(passwords) conn.SetFolderPasswords(passwords)
conn.Start() conn.Start()
conn.ClusterConfig(protocol.ClusterConfig{Secondary: true}) conn.ClusterConfig(&protocol.ClusterConfig{Secondary: true})
} }
} }
} }
@ -2469,7 +2469,7 @@ func (m *model) RequestGlobal(ctx context.Context, deviceID protocol.DeviceID, f
} }
l.Debugf("%v REQ(out): %s (%s): %q / %q b=%d o=%d s=%d h=%x wh=%x ft=%t", m, deviceID.Short(), conn, folder, name, blockNo, offset, size, hash, weakHash, fromTemporary) l.Debugf("%v REQ(out): %s (%s): %q / %q b=%d o=%d s=%d h=%x wh=%x ft=%t", m, deviceID.Short(), conn, folder, name, blockNo, offset, size, hash, weakHash, fromTemporary)
return conn.Request(ctx, folder, name, blockNo, offset, size, hash, weakHash, fromTemporary) return conn.Request(ctx, &protocol.Request{Folder: folder, Name: name, BlockNo: blockNo, Offset: offset, Size: size, Hash: hash, WeakHash: weakHash, FromTemporary: fromTemporary})
} }
// requestConnectionForDevice returns a connection to the given device, to // requestConnectionForDevice returns a connection to the given device, to
@ -2586,14 +2586,14 @@ func (m *model) numHashers(folder string) int {
// generateClusterConfig returns a ClusterConfigMessage that is correct and the // generateClusterConfig returns a ClusterConfigMessage that is correct and the
// set of folder passwords for the given peer device // set of folder passwords for the given peer device
func (m *model) generateClusterConfig(device protocol.DeviceID) (protocol.ClusterConfig, map[string]string) { func (m *model) generateClusterConfig(device protocol.DeviceID) (*protocol.ClusterConfig, map[string]string) {
m.mut.RLock() m.mut.RLock()
defer m.mut.RUnlock() defer m.mut.RUnlock()
return m.generateClusterConfigRLocked(device) return m.generateClusterConfigRLocked(device)
} }
func (m *model) generateClusterConfigRLocked(device protocol.DeviceID) (protocol.ClusterConfig, map[string]string) { func (m *model) generateClusterConfigRLocked(device protocol.DeviceID) (*protocol.ClusterConfig, map[string]string) {
var message protocol.ClusterConfig message := &protocol.ClusterConfig{}
folders := m.cfg.FolderList() folders := m.cfg.FolderList()
passwords := make(map[string]string, len(folders)) passwords := make(map[string]string, len(folders))
for _, folderCfg := range folders { for _, folderCfg := range folders {

View File

@ -3631,11 +3631,11 @@ func testConfigChangeTriggersClusterConfigs(t *testing.T, expectFirst, expectSec
cc1 := make(chan struct{}, 1) cc1 := make(chan struct{}, 1)
cc2 := make(chan struct{}, 1) cc2 := make(chan struct{}, 1)
fc1 := newFakeConnection(device1, m) fc1 := newFakeConnection(device1, m)
fc1.ClusterConfigCalls(func(_ protocol.ClusterConfig) { fc1.ClusterConfigCalls(func(_ *protocol.ClusterConfig) {
cc1 <- struct{}{} cc1 <- struct{}{}
}) })
fc2 := newFakeConnection(device2, m) fc2 := newFakeConnection(device2, m)
fc2.ClusterConfigCalls(func(_ protocol.ClusterConfig) { fc2.ClusterConfigCalls(func(_ *protocol.ClusterConfig) {
cc2 <- struct{}{} cc2 <- struct{}{}
}) })
m.AddConnection(fc1, protocol.Hello{}) m.AddConnection(fc1, protocol.Hello{})

View File

@ -39,7 +39,7 @@ type progressUpdate struct {
} }
func (p progressUpdate) send(ctx context.Context) { func (p progressUpdate) send(ctx context.Context) {
p.conn.DownloadProgress(ctx, p.folder, p.updates) p.conn.DownloadProgress(ctx, &protocol.DownloadProgress{Folder: p.folder, Updates: p.updates})
} }
// NewProgressEmitter creates a new progress emitter which emits // NewProgressEmitter creates a new progress emitter which emits
@ -334,7 +334,7 @@ func (t *ProgressEmitter) clearLocked() {
} }
for _, folder := range state.folders() { for _, folder := range state.folders() {
if updates := state.cleanup(folder); len(updates) > 0 { if updates := state.cleanup(folder); len(updates) > 0 {
conn.DownloadProgress(context.Background(), folder, updates) conn.DownloadProgress(context.Background(), &protocol.DownloadProgress{Folder: folder, Updates: updates})
} }
} }
} }

View File

@ -143,11 +143,11 @@ func TestSymlinkTraversalWrite(t *testing.T) {
} }
return nil return nil
}) })
fc.RequestCalls(func(ctx context.Context, folder, name string, blockNo int, offset int64, size int, hash []byte, weakHash uint32, fromTemporary bool) ([]byte, error) { fc.RequestCalls(func(ctx context.Context, req *protocol.Request) ([]byte, error) {
if name != "symlink" && strings.HasPrefix(name, "symlink") { if req.Name != "symlink" && strings.HasPrefix(req.Name, "symlink") {
badReq <- name badReq <- req.Name
} }
return fc.fileData[name], nil return fc.fileData[req.Name], nil
}) })
// Send an update for the symlink, wait for it to sync and be reported back. // Send an update for the symlink, wait for it to sync and be reported back.
@ -338,7 +338,7 @@ func pullInvalidIgnored(t *testing.T, ft config.FolderType) {
}) })
// Make sure pulling doesn't interfere, as index updates are racy and // Make sure pulling doesn't interfere, as index updates are racy and
// thus we cannot distinguish between scan and pull results. // thus we cannot distinguish between scan and pull results.
fc.RequestCalls(func(ctx context.Context, folder, name string, blockNo int, offset int64, size int, hash []byte, weakHash uint32, fromTemporary bool) ([]byte, error) { fc.RequestCalls(func(_ context.Context, _ *protocol.Request) ([]byte, error) {
return nil, nil return nil, nil
}) })
@ -926,7 +926,7 @@ func TestNeedFolderFiles(t *testing.T) {
defer sub.Unsubscribe() defer sub.Unsubscribe()
errPreventSync := errors.New("you aren't getting any of this") errPreventSync := errors.New("you aren't getting any of this")
fc.RequestCalls(func(ctx context.Context, folder, name string, blockNo int, offset int64, size int, hash []byte, weakHash uint32, fromTemporary bool) ([]byte, error) { fc.RequestCalls(func(_ context.Context, _ *protocol.Request) ([]byte, error) {
return nil, errPreventSync return nil, errPreventSync
}) })
@ -1065,9 +1065,9 @@ func TestRequestLastFileProgress(t *testing.T) {
done := make(chan struct{}) done := make(chan struct{})
fc.RequestCalls(func(ctx context.Context, folder, name string, blockNo int, offset int64, size int, hash []byte, weakHash uint32, fromTemporary bool) ([]byte, error) { fc.RequestCalls(func(_ context.Context, req *protocol.Request) ([]byte, error) {
defer close(done) defer close(done)
progress, queued, rest, err := m.NeedFolderFiles(folder, 1, 10) progress, queued, rest, err := m.NeedFolderFiles(req.Folder, 1, 10)
must(t, err) must(t, err)
if len(queued)+len(rest) != 0 { if len(queued)+len(rest) != 0 {
t.Error(`There should not be any queued or "rest" items`) t.Error(`There should not be any queued or "rest" items`)
@ -1075,7 +1075,7 @@ func TestRequestLastFileProgress(t *testing.T) {
if len(progress) != 1 { if len(progress) != 1 {
t.Error("Expected exactly one item in progress.") t.Error("Expected exactly one item in progress.")
} }
return fc.fileData[name], nil return fc.fileData[req.Name], nil
}) })
contents := []byte("test file contents\n") contents := []byte("test file contents\n")
@ -1232,7 +1232,7 @@ func TestRequestIndexSenderClusterConfigBeforeStart(t *testing.T) {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) // Must be the last thing to be deferred, thus first to run. defer close(done) // Must be the last thing to be deferred, thus first to run.
indexChan := make(chan []protocol.FileInfo, 1) indexChan := make(chan []protocol.FileInfo, 1)
ccChan := make(chan protocol.ClusterConfig, 1) ccChan := make(chan *protocol.ClusterConfig, 1)
fc.setIndexFn(func(_ context.Context, folder string, fs []protocol.FileInfo) error { fc.setIndexFn(func(_ context.Context, folder string, fs []protocol.FileInfo) error {
select { select {
case indexChan <- fs: case indexChan <- fs:
@ -1240,7 +1240,7 @@ func TestRequestIndexSenderClusterConfigBeforeStart(t *testing.T) {
} }
return nil return nil
}) })
fc.ClusterConfigCalls(func(cc protocol.ClusterConfig) { fc.ClusterConfigCalls(func(cc *protocol.ClusterConfig) {
select { select {
case ccChan <- cc: case ccChan <- cc:
case <-done: case <-done:

View File

@ -66,8 +66,8 @@ func benchmarkRequestsConnPair(b *testing.B, conn0, conn1 net.Conn) {
c1.Start() c1.Start()
// Satisfy the assertions in the protocol by sending an initial cluster config // Satisfy the assertions in the protocol by sending an initial cluster config
c0.ClusterConfig(ClusterConfig{}) c0.ClusterConfig(&ClusterConfig{})
c1.ClusterConfig(ClusterConfig{}) c1.ClusterConfig(&ClusterConfig{})
// Report some useful stats and reset the timer for the actual test // Report some useful stats and reset the timer for the actual test
b.ReportAllocs() b.ReportAllocs()
@ -82,9 +82,9 @@ func benchmarkRequestsConnPair(b *testing.B, conn0, conn1 net.Conn) {
// Use c0 and c1 for each alternating request, so we get as much // Use c0 and c1 for each alternating request, so we get as much
// data flowing in both directions. // data flowing in both directions.
if i%2 == 0 { if i%2 == 0 {
buf, err = c0.Request(context.Background(), "folder", "file", i, int64(i), 128<<10, nil, 0, false) buf, err = c0.Request(context.Background(), &Request{Folder: "folder", Name: "file", BlockNo: i, Offset: int64(i), Size: 128 << 10})
} else { } else {
buf, err = c1.Request(context.Background(), "folder", "file", i, int64(i), 128<<10, nil, 0, false) buf, err = c1.Request(context.Background(), &Request{Folder: "folder", Name: "file", BlockNo: i, Offset: int64(i), Size: 128 << 10})
} }
if err != nil { if err != nil {

View File

@ -193,48 +193,52 @@ func (e encryptedConnection) DeviceID() DeviceID {
return e.conn.DeviceID() return e.conn.DeviceID()
} }
func (e encryptedConnection) Index(ctx context.Context, folder string, files []FileInfo) error { func (e encryptedConnection) Index(ctx context.Context, idx *Index) error {
if folderKey, ok := e.folderKeys.get(folder); ok { if folderKey, ok := e.folderKeys.get(idx.Folder); ok {
encryptFileInfos(e.keyGen, files, folderKey) encryptFileInfos(e.keyGen, idx.Files, folderKey)
} }
return e.conn.Index(ctx, folder, files) return e.conn.Index(ctx, idx)
} }
func (e encryptedConnection) IndexUpdate(ctx context.Context, folder string, files []FileInfo) error { func (e encryptedConnection) IndexUpdate(ctx context.Context, idxUp *IndexUpdate) error {
if folderKey, ok := e.folderKeys.get(folder); ok { if folderKey, ok := e.folderKeys.get(idxUp.Folder); ok {
encryptFileInfos(e.keyGen, files, folderKey) encryptFileInfos(e.keyGen, idxUp.Files, folderKey)
} }
return e.conn.IndexUpdate(ctx, folder, files) return e.conn.IndexUpdate(ctx, idxUp)
} }
func (e encryptedConnection) Request(ctx context.Context, folder string, name string, blockNo int, offset int64, size int, hash []byte, weakHash uint32, fromTemporary bool) ([]byte, error) { func (e encryptedConnection) Request(ctx context.Context, req *Request) ([]byte, error) {
folderKey, ok := e.folderKeys.get(folder) folderKey, ok := e.folderKeys.get(req.Folder)
if !ok { if !ok {
return e.conn.Request(ctx, folder, name, blockNo, offset, size, hash, weakHash, fromTemporary) return e.conn.Request(ctx, req)
} }
// Encrypt / adjust the request parameters. // Encrypt / adjust the request parameters.
origSize := size origSize := req.Size
if size < minPaddedSize { origName := req.Name
if req.Size < minPaddedSize {
// Make a request for minPaddedSize data instead of the smaller // Make a request for minPaddedSize data instead of the smaller
// block. We'll chop of the extra data later. // block. We'll chop of the extra data later.
size = minPaddedSize req.Size = minPaddedSize
} }
encName := encryptName(name, folderKey) encName := encryptName(req.Name, folderKey)
encOffset := offset + int64(blockNo*blockOverhead) encOffset := req.Offset + int64(req.BlockNo*blockOverhead)
encSize := size + blockOverhead encSize := req.Size + blockOverhead
// Perform that request, getting back and encrypted block. // Perform that request, getting back an encrypted block.
bs, err := e.conn.Request(ctx, folder, encName, blockNo, encOffset, encSize, nil, 0, false) req.Name = encName
req.Offset = encOffset
req.Size = encSize
bs, err := e.conn.Request(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Return the decrypted block (or an error if it fails decryption) // Return the decrypted block (or an error if it fails decryption)
fileKey := e.keyGen.FileKey(name, folderKey) fileKey := e.keyGen.FileKey(origName, folderKey)
bs, err = DecryptBytes(bs, fileKey) bs, err = DecryptBytes(bs, fileKey)
if err != nil { if err != nil {
return nil, err return nil, err
@ -242,15 +246,15 @@ func (e encryptedConnection) Request(ctx context.Context, folder string, name st
return bs[:origSize], nil return bs[:origSize], nil
} }
func (e encryptedConnection) DownloadProgress(ctx context.Context, folder string, updates []FileDownloadProgressUpdate) { func (e encryptedConnection) DownloadProgress(ctx context.Context, dp *DownloadProgress) {
if _, ok := e.folderKeys.get(folder); !ok { if _, ok := e.folderKeys.get(dp.Folder); !ok {
e.conn.DownloadProgress(ctx, folder, updates) e.conn.DownloadProgress(ctx, dp)
} }
// No need to send these // No need to send these
} }
func (e encryptedConnection) ClusterConfig(config ClusterConfig) { func (e encryptedConnection) ClusterConfig(config *ClusterConfig) {
e.conn.ClusterConfig(config) e.conn.ClusterConfig(config)
} }

View File

@ -26,10 +26,10 @@ type Connection struct {
closedReturnsOnCall map[int]struct { closedReturnsOnCall map[int]struct {
result1 <-chan struct{} result1 <-chan struct{}
} }
ClusterConfigStub func(protocol.ClusterConfig) ClusterConfigStub func(*protocol.ClusterConfig)
clusterConfigMutex sync.RWMutex clusterConfigMutex sync.RWMutex
clusterConfigArgsForCall []struct { clusterConfigArgsForCall []struct {
arg1 protocol.ClusterConfig arg1 *protocol.ClusterConfig
} }
ConnectionIDStub func() string ConnectionIDStub func() string
connectionIDMutex sync.RWMutex connectionIDMutex sync.RWMutex
@ -61,12 +61,11 @@ type Connection struct {
deviceIDReturnsOnCall map[int]struct { deviceIDReturnsOnCall map[int]struct {
result1 protocol.DeviceID result1 protocol.DeviceID
} }
DownloadProgressStub func(context.Context, string, []protocol.FileDownloadProgressUpdate) DownloadProgressStub func(context.Context, *protocol.DownloadProgress)
downloadProgressMutex sync.RWMutex downloadProgressMutex sync.RWMutex
downloadProgressArgsForCall []struct { downloadProgressArgsForCall []struct {
arg1 context.Context arg1 context.Context
arg2 string arg2 *protocol.DownloadProgress
arg3 []protocol.FileDownloadProgressUpdate
} }
EstablishedAtStub func() time.Time EstablishedAtStub func() time.Time
establishedAtMutex sync.RWMutex establishedAtMutex sync.RWMutex
@ -78,12 +77,11 @@ type Connection struct {
establishedAtReturnsOnCall map[int]struct { establishedAtReturnsOnCall map[int]struct {
result1 time.Time result1 time.Time
} }
IndexStub func(context.Context, string, []protocol.FileInfo) error IndexStub func(context.Context, *protocol.Index) error
indexMutex sync.RWMutex indexMutex sync.RWMutex
indexArgsForCall []struct { indexArgsForCall []struct {
arg1 context.Context arg1 context.Context
arg2 string arg2 *protocol.Index
arg3 []protocol.FileInfo
} }
indexReturns struct { indexReturns struct {
result1 error result1 error
@ -91,12 +89,11 @@ type Connection struct {
indexReturnsOnCall map[int]struct { indexReturnsOnCall map[int]struct {
result1 error result1 error
} }
IndexUpdateStub func(context.Context, string, []protocol.FileInfo) error IndexUpdateStub func(context.Context, *protocol.IndexUpdate) error
indexUpdateMutex sync.RWMutex indexUpdateMutex sync.RWMutex
indexUpdateArgsForCall []struct { indexUpdateArgsForCall []struct {
arg1 context.Context arg1 context.Context
arg2 string arg2 *protocol.IndexUpdate
arg3 []protocol.FileInfo
} }
indexUpdateReturns struct { indexUpdateReturns struct {
result1 error result1 error
@ -134,18 +131,11 @@ type Connection struct {
remoteAddrReturnsOnCall map[int]struct { remoteAddrReturnsOnCall map[int]struct {
result1 net.Addr result1 net.Addr
} }
RequestStub func(context.Context, string, string, int, int64, int, []byte, uint32, bool) ([]byte, error) RequestStub func(context.Context, *protocol.Request) ([]byte, error)
requestMutex sync.RWMutex requestMutex sync.RWMutex
requestArgsForCall []struct { requestArgsForCall []struct {
arg1 context.Context arg1 context.Context
arg2 string arg2 *protocol.Request
arg3 string
arg4 int
arg5 int64
arg6 int
arg7 []byte
arg8 uint32
arg9 bool
} }
requestReturns struct { requestReturns struct {
result1 []byte result1 []byte
@ -293,10 +283,10 @@ func (fake *Connection) ClosedReturnsOnCall(i int, result1 <-chan struct{}) {
}{result1} }{result1}
} }
func (fake *Connection) ClusterConfig(arg1 protocol.ClusterConfig) { func (fake *Connection) ClusterConfig(arg1 *protocol.ClusterConfig) {
fake.clusterConfigMutex.Lock() fake.clusterConfigMutex.Lock()
fake.clusterConfigArgsForCall = append(fake.clusterConfigArgsForCall, struct { fake.clusterConfigArgsForCall = append(fake.clusterConfigArgsForCall, struct {
arg1 protocol.ClusterConfig arg1 *protocol.ClusterConfig
}{arg1}) }{arg1})
stub := fake.ClusterConfigStub stub := fake.ClusterConfigStub
fake.recordInvocation("ClusterConfig", []interface{}{arg1}) fake.recordInvocation("ClusterConfig", []interface{}{arg1})
@ -312,13 +302,13 @@ func (fake *Connection) ClusterConfigCallCount() int {
return len(fake.clusterConfigArgsForCall) return len(fake.clusterConfigArgsForCall)
} }
func (fake *Connection) ClusterConfigCalls(stub func(protocol.ClusterConfig)) { func (fake *Connection) ClusterConfigCalls(stub func(*protocol.ClusterConfig)) {
fake.clusterConfigMutex.Lock() fake.clusterConfigMutex.Lock()
defer fake.clusterConfigMutex.Unlock() defer fake.clusterConfigMutex.Unlock()
fake.ClusterConfigStub = stub fake.ClusterConfigStub = stub
} }
func (fake *Connection) ClusterConfigArgsForCall(i int) protocol.ClusterConfig { func (fake *Connection) ClusterConfigArgsForCall(i int) *protocol.ClusterConfig {
fake.clusterConfigMutex.RLock() fake.clusterConfigMutex.RLock()
defer fake.clusterConfigMutex.RUnlock() defer fake.clusterConfigMutex.RUnlock()
argsForCall := fake.clusterConfigArgsForCall[i] argsForCall := fake.clusterConfigArgsForCall[i]
@ -484,23 +474,17 @@ func (fake *Connection) DeviceIDReturnsOnCall(i int, result1 protocol.DeviceID)
}{result1} }{result1}
} }
func (fake *Connection) DownloadProgress(arg1 context.Context, arg2 string, arg3 []protocol.FileDownloadProgressUpdate) { func (fake *Connection) DownloadProgress(arg1 context.Context, arg2 *protocol.DownloadProgress) {
var arg3Copy []protocol.FileDownloadProgressUpdate
if arg3 != nil {
arg3Copy = make([]protocol.FileDownloadProgressUpdate, len(arg3))
copy(arg3Copy, arg3)
}
fake.downloadProgressMutex.Lock() fake.downloadProgressMutex.Lock()
fake.downloadProgressArgsForCall = append(fake.downloadProgressArgsForCall, struct { fake.downloadProgressArgsForCall = append(fake.downloadProgressArgsForCall, struct {
arg1 context.Context arg1 context.Context
arg2 string arg2 *protocol.DownloadProgress
arg3 []protocol.FileDownloadProgressUpdate }{arg1, arg2})
}{arg1, arg2, arg3Copy})
stub := fake.DownloadProgressStub stub := fake.DownloadProgressStub
fake.recordInvocation("DownloadProgress", []interface{}{arg1, arg2, arg3Copy}) fake.recordInvocation("DownloadProgress", []interface{}{arg1, arg2})
fake.downloadProgressMutex.Unlock() fake.downloadProgressMutex.Unlock()
if stub != nil { if stub != nil {
fake.DownloadProgressStub(arg1, arg2, arg3) fake.DownloadProgressStub(arg1, arg2)
} }
} }
@ -510,17 +494,17 @@ func (fake *Connection) DownloadProgressCallCount() int {
return len(fake.downloadProgressArgsForCall) return len(fake.downloadProgressArgsForCall)
} }
func (fake *Connection) DownloadProgressCalls(stub func(context.Context, string, []protocol.FileDownloadProgressUpdate)) { func (fake *Connection) DownloadProgressCalls(stub func(context.Context, *protocol.DownloadProgress)) {
fake.downloadProgressMutex.Lock() fake.downloadProgressMutex.Lock()
defer fake.downloadProgressMutex.Unlock() defer fake.downloadProgressMutex.Unlock()
fake.DownloadProgressStub = stub fake.DownloadProgressStub = stub
} }
func (fake *Connection) DownloadProgressArgsForCall(i int) (context.Context, string, []protocol.FileDownloadProgressUpdate) { func (fake *Connection) DownloadProgressArgsForCall(i int) (context.Context, *protocol.DownloadProgress) {
fake.downloadProgressMutex.RLock() fake.downloadProgressMutex.RLock()
defer fake.downloadProgressMutex.RUnlock() defer fake.downloadProgressMutex.RUnlock()
argsForCall := fake.downloadProgressArgsForCall[i] argsForCall := fake.downloadProgressArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 return argsForCall.arg1, argsForCall.arg2
} }
func (fake *Connection) EstablishedAt() time.Time { func (fake *Connection) EstablishedAt() time.Time {
@ -576,25 +560,19 @@ func (fake *Connection) EstablishedAtReturnsOnCall(i int, result1 time.Time) {
}{result1} }{result1}
} }
func (fake *Connection) Index(arg1 context.Context, arg2 string, arg3 []protocol.FileInfo) error { func (fake *Connection) Index(arg1 context.Context, arg2 *protocol.Index) error {
var arg3Copy []protocol.FileInfo
if arg3 != nil {
arg3Copy = make([]protocol.FileInfo, len(arg3))
copy(arg3Copy, arg3)
}
fake.indexMutex.Lock() fake.indexMutex.Lock()
ret, specificReturn := fake.indexReturnsOnCall[len(fake.indexArgsForCall)] ret, specificReturn := fake.indexReturnsOnCall[len(fake.indexArgsForCall)]
fake.indexArgsForCall = append(fake.indexArgsForCall, struct { fake.indexArgsForCall = append(fake.indexArgsForCall, struct {
arg1 context.Context arg1 context.Context
arg2 string arg2 *protocol.Index
arg3 []protocol.FileInfo }{arg1, arg2})
}{arg1, arg2, arg3Copy})
stub := fake.IndexStub stub := fake.IndexStub
fakeReturns := fake.indexReturns fakeReturns := fake.indexReturns
fake.recordInvocation("Index", []interface{}{arg1, arg2, arg3Copy}) fake.recordInvocation("Index", []interface{}{arg1, arg2})
fake.indexMutex.Unlock() fake.indexMutex.Unlock()
if stub != nil { if stub != nil {
return stub(arg1, arg2, arg3) return stub(arg1, arg2)
} }
if specificReturn { if specificReturn {
return ret.result1 return ret.result1
@ -608,17 +586,17 @@ func (fake *Connection) IndexCallCount() int {
return len(fake.indexArgsForCall) return len(fake.indexArgsForCall)
} }
func (fake *Connection) IndexCalls(stub func(context.Context, string, []protocol.FileInfo) error) { func (fake *Connection) IndexCalls(stub func(context.Context, *protocol.Index) error) {
fake.indexMutex.Lock() fake.indexMutex.Lock()
defer fake.indexMutex.Unlock() defer fake.indexMutex.Unlock()
fake.IndexStub = stub fake.IndexStub = stub
} }
func (fake *Connection) IndexArgsForCall(i int) (context.Context, string, []protocol.FileInfo) { func (fake *Connection) IndexArgsForCall(i int) (context.Context, *protocol.Index) {
fake.indexMutex.RLock() fake.indexMutex.RLock()
defer fake.indexMutex.RUnlock() defer fake.indexMutex.RUnlock()
argsForCall := fake.indexArgsForCall[i] argsForCall := fake.indexArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 return argsForCall.arg1, argsForCall.arg2
} }
func (fake *Connection) IndexReturns(result1 error) { func (fake *Connection) IndexReturns(result1 error) {
@ -644,25 +622,19 @@ func (fake *Connection) IndexReturnsOnCall(i int, result1 error) {
}{result1} }{result1}
} }
func (fake *Connection) IndexUpdate(arg1 context.Context, arg2 string, arg3 []protocol.FileInfo) error { func (fake *Connection) IndexUpdate(arg1 context.Context, arg2 *protocol.IndexUpdate) error {
var arg3Copy []protocol.FileInfo
if arg3 != nil {
arg3Copy = make([]protocol.FileInfo, len(arg3))
copy(arg3Copy, arg3)
}
fake.indexUpdateMutex.Lock() fake.indexUpdateMutex.Lock()
ret, specificReturn := fake.indexUpdateReturnsOnCall[len(fake.indexUpdateArgsForCall)] ret, specificReturn := fake.indexUpdateReturnsOnCall[len(fake.indexUpdateArgsForCall)]
fake.indexUpdateArgsForCall = append(fake.indexUpdateArgsForCall, struct { fake.indexUpdateArgsForCall = append(fake.indexUpdateArgsForCall, struct {
arg1 context.Context arg1 context.Context
arg2 string arg2 *protocol.IndexUpdate
arg3 []protocol.FileInfo }{arg1, arg2})
}{arg1, arg2, arg3Copy})
stub := fake.IndexUpdateStub stub := fake.IndexUpdateStub
fakeReturns := fake.indexUpdateReturns fakeReturns := fake.indexUpdateReturns
fake.recordInvocation("IndexUpdate", []interface{}{arg1, arg2, arg3Copy}) fake.recordInvocation("IndexUpdate", []interface{}{arg1, arg2})
fake.indexUpdateMutex.Unlock() fake.indexUpdateMutex.Unlock()
if stub != nil { if stub != nil {
return stub(arg1, arg2, arg3) return stub(arg1, arg2)
} }
if specificReturn { if specificReturn {
return ret.result1 return ret.result1
@ -676,17 +648,17 @@ func (fake *Connection) IndexUpdateCallCount() int {
return len(fake.indexUpdateArgsForCall) return len(fake.indexUpdateArgsForCall)
} }
func (fake *Connection) IndexUpdateCalls(stub func(context.Context, string, []protocol.FileInfo) error) { func (fake *Connection) IndexUpdateCalls(stub func(context.Context, *protocol.IndexUpdate) error) {
fake.indexUpdateMutex.Lock() fake.indexUpdateMutex.Lock()
defer fake.indexUpdateMutex.Unlock() defer fake.indexUpdateMutex.Unlock()
fake.IndexUpdateStub = stub fake.IndexUpdateStub = stub
} }
func (fake *Connection) IndexUpdateArgsForCall(i int) (context.Context, string, []protocol.FileInfo) { func (fake *Connection) IndexUpdateArgsForCall(i int) (context.Context, *protocol.IndexUpdate) {
fake.indexUpdateMutex.RLock() fake.indexUpdateMutex.RLock()
defer fake.indexUpdateMutex.RUnlock() defer fake.indexUpdateMutex.RUnlock()
argsForCall := fake.indexUpdateArgsForCall[i] argsForCall := fake.indexUpdateArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 return argsForCall.arg1, argsForCall.arg2
} }
func (fake *Connection) IndexUpdateReturns(result1 error) { func (fake *Connection) IndexUpdateReturns(result1 error) {
@ -871,31 +843,19 @@ func (fake *Connection) RemoteAddrReturnsOnCall(i int, result1 net.Addr) {
}{result1} }{result1}
} }
func (fake *Connection) Request(arg1 context.Context, arg2 string, arg3 string, arg4 int, arg5 int64, arg6 int, arg7 []byte, arg8 uint32, arg9 bool) ([]byte, error) { func (fake *Connection) Request(arg1 context.Context, arg2 *protocol.Request) ([]byte, error) {
var arg7Copy []byte
if arg7 != nil {
arg7Copy = make([]byte, len(arg7))
copy(arg7Copy, arg7)
}
fake.requestMutex.Lock() fake.requestMutex.Lock()
ret, specificReturn := fake.requestReturnsOnCall[len(fake.requestArgsForCall)] ret, specificReturn := fake.requestReturnsOnCall[len(fake.requestArgsForCall)]
fake.requestArgsForCall = append(fake.requestArgsForCall, struct { fake.requestArgsForCall = append(fake.requestArgsForCall, struct {
arg1 context.Context arg1 context.Context
arg2 string arg2 *protocol.Request
arg3 string }{arg1, arg2})
arg4 int
arg5 int64
arg6 int
arg7 []byte
arg8 uint32
arg9 bool
}{arg1, arg2, arg3, arg4, arg5, arg6, arg7Copy, arg8, arg9})
stub := fake.RequestStub stub := fake.RequestStub
fakeReturns := fake.requestReturns fakeReturns := fake.requestReturns
fake.recordInvocation("Request", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6, arg7Copy, arg8, arg9}) fake.recordInvocation("Request", []interface{}{arg1, arg2})
fake.requestMutex.Unlock() fake.requestMutex.Unlock()
if stub != nil { if stub != nil {
return stub(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) return stub(arg1, arg2)
} }
if specificReturn { if specificReturn {
return ret.result1, ret.result2 return ret.result1, ret.result2
@ -909,17 +869,17 @@ func (fake *Connection) RequestCallCount() int {
return len(fake.requestArgsForCall) return len(fake.requestArgsForCall)
} }
func (fake *Connection) RequestCalls(stub func(context.Context, string, string, int, int64, int, []byte, uint32, bool) ([]byte, error)) { func (fake *Connection) RequestCalls(stub func(context.Context, *protocol.Request) ([]byte, error)) {
fake.requestMutex.Lock() fake.requestMutex.Lock()
defer fake.requestMutex.Unlock() defer fake.requestMutex.Unlock()
fake.RequestStub = stub fake.RequestStub = stub
} }
func (fake *Connection) RequestArgsForCall(i int) (context.Context, string, string, int, int64, int, []byte, uint32, bool) { func (fake *Connection) RequestArgsForCall(i int) (context.Context, *protocol.Request) {
fake.requestMutex.RLock() fake.requestMutex.RLock()
defer fake.requestMutex.RUnlock() defer fake.requestMutex.RUnlock()
argsForCall := fake.requestArgsForCall[i] argsForCall := fake.requestArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7, argsForCall.arg8, argsForCall.arg9 return argsForCall.arg1, argsForCall.arg2
} }
func (fake *Connection) RequestReturns(result1 []byte, result2 error) { func (fake *Connection) RequestReturns(result1 []byte, result2 error) {

View File

@ -154,30 +154,30 @@ type RequestResponse interface {
} }
type Connection interface { type Connection interface {
// Send an index message. The connection will read and marshal the // Send an Index message to the peer device. The message in the
// parameters asynchronously, so they should not be modified after // parameter may be altered by the connection and should not be used
// calling Index(). // further by the caller.
Index(ctx context.Context, folder string, files []FileInfo) error Index(ctx context.Context, idx *Index) error
// Send an index update message. The connection will read and marshal // Send an Index Update message to the peer device. The message in the
// the parameters asynchronously, so they should not be modified after // parameter may be altered by the connection and should not be used
// calling IndexUpdate(). // further by the caller.
IndexUpdate(ctx context.Context, folder string, files []FileInfo) error IndexUpdate(ctx context.Context, idxUp *IndexUpdate) error
// Send a request message. The connection will read and marshal the // Send a Request message to the peer device. The message in the
// parameters asynchronously, so they should not be modified after // parameter may be altered by the connection and should not be used
// calling Request(). // further by the caller.
Request(ctx context.Context, folder string, name string, blockNo int, offset int64, size int, hash []byte, weakHash uint32, fromTemporary bool) ([]byte, error) Request(ctx context.Context, req *Request) ([]byte, error)
// Send a cluster configuration message. The connection will read and // Send a Cluster Configuration message to the peer device. The message
// marshal the message asynchronously, so it should not be modified // in the parameter may be altered by the connection and should not be
// after calling ClusterConfig(). // used further by the caller.
ClusterConfig(config ClusterConfig) ClusterConfig(config *ClusterConfig)
// Send a download progress message. The connection will read and // Send a Download Progress message to the peer device. The message in
// marshal the parameters asynchronously, so they should not be modified // the parameter may be altered by the connection and should not be used
// after calling DownloadProgress(). // further by the caller.
DownloadProgress(ctx context.Context, folder string, updates []FileDownloadProgressUpdate) DownloadProgress(ctx context.Context, dp *DownloadProgress)
Start() Start()
SetFolderPasswords(passwords map[string]string) SetFolderPasswords(passwords map[string]string)
@ -185,6 +185,7 @@ type Connection interface {
DeviceID() DeviceID DeviceID() DeviceID
Statistics() Statistics Statistics() Statistics
Closed() <-chan struct{} Closed() <-chan struct{}
ConnectionInfo ConnectionInfo
} }
@ -349,39 +350,33 @@ func (c *rawConnection) DeviceID() DeviceID {
} }
// Index writes the list of file information to the connected peer device // Index writes the list of file information to the connected peer device
func (c *rawConnection) Index(ctx context.Context, folder string, idx []FileInfo) error { func (c *rawConnection) Index(ctx context.Context, idx *Index) error {
select { select {
case <-c.closed: case <-c.closed:
return ErrClosed return ErrClosed
default: default:
} }
c.idxMut.Lock() c.idxMut.Lock()
c.send(ctx, &Index{ c.send(ctx, idx, nil)
Folder: folder,
Files: idx,
}, nil)
c.idxMut.Unlock() c.idxMut.Unlock()
return nil return nil
} }
// IndexUpdate writes the list of file information to the connected peer device as an update // IndexUpdate writes the list of file information to the connected peer device as an update
func (c *rawConnection) IndexUpdate(ctx context.Context, folder string, idx []FileInfo) error { func (c *rawConnection) IndexUpdate(ctx context.Context, idxUp *IndexUpdate) error {
select { select {
case <-c.closed: case <-c.closed:
return ErrClosed return ErrClosed
default: default:
} }
c.idxMut.Lock() c.idxMut.Lock()
c.send(ctx, &IndexUpdate{ c.send(ctx, idxUp, nil)
Folder: folder,
Files: idx,
}, nil)
c.idxMut.Unlock() c.idxMut.Unlock()
return nil return nil
} }
// Request returns the bytes for the specified block after fetching them from the connected peer. // Request returns the bytes for the specified block after fetching them from the connected peer.
func (c *rawConnection) Request(ctx context.Context, folder string, name string, blockNo int, offset int64, size int, hash []byte, weakHash uint32, fromTemporary bool) ([]byte, error) { func (c *rawConnection) Request(ctx context.Context, req *Request) ([]byte, error) {
rc := make(chan asyncResult, 1) rc := make(chan asyncResult, 1)
c.awaitingMut.Lock() c.awaitingMut.Lock()
@ -394,17 +389,8 @@ func (c *rawConnection) Request(ctx context.Context, folder string, name string,
c.awaiting[id] = rc c.awaiting[id] = rc
c.awaitingMut.Unlock() c.awaitingMut.Unlock()
ok := c.send(ctx, &Request{ req.ID = id
ID: id, ok := c.send(ctx, req, nil)
Folder: folder,
Name: name,
Offset: offset,
Size: size,
BlockNo: blockNo,
Hash: hash,
WeakHash: weakHash,
FromTemporary: fromTemporary,
}, nil)
if !ok { if !ok {
return nil, ErrClosed return nil, ErrClosed
} }
@ -421,9 +407,9 @@ func (c *rawConnection) Request(ctx context.Context, folder string, name string,
} }
// ClusterConfig sends the cluster configuration message to the peer. // ClusterConfig sends the cluster configuration message to the peer.
func (c *rawConnection) ClusterConfig(config ClusterConfig) { func (c *rawConnection) ClusterConfig(config *ClusterConfig) {
select { select {
case c.clusterConfigBox <- &config: case c.clusterConfigBox <- config:
case <-c.closed: case <-c.closed:
} }
} }
@ -433,11 +419,8 @@ func (c *rawConnection) Closed() <-chan struct{} {
} }
// DownloadProgress sends the progress updates for the files that are currently being downloaded. // DownloadProgress sends the progress updates for the files that are currently being downloaded.
func (c *rawConnection) DownloadProgress(ctx context.Context, folder string, updates []FileDownloadProgressUpdate) { func (c *rawConnection) DownloadProgress(ctx context.Context, dp *DownloadProgress) {
c.send(ctx, &DownloadProgress{ c.send(ctx, dp, nil)
Folder: folder,
Updates: updates,
}, nil)
} }
func (c *rawConnection) ping() bool { func (c *rawConnection) ping() bool {

View File

@ -38,8 +38,8 @@ func TestPing(t *testing.T) {
c1 := getRawConnection(NewConnection(c1ID, br, aw, testutil.NoopCloser{}, newTestModel(), new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen)) c1 := getRawConnection(NewConnection(c1ID, br, aw, testutil.NoopCloser{}, newTestModel(), new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
c1.Start() c1.Start()
defer closeAndWait(c1, ar, bw) defer closeAndWait(c1, ar, bw)
c0.ClusterConfig(ClusterConfig{}) c0.ClusterConfig(&ClusterConfig{})
c1.ClusterConfig(ClusterConfig{}) c1.ClusterConfig(&ClusterConfig{})
if ok := c0.ping(); !ok { if ok := c0.ping(); !ok {
t.Error("c0 ping failed") t.Error("c0 ping failed")
@ -64,8 +64,8 @@ func TestClose(t *testing.T) {
c1 := NewConnection(c1ID, br, aw, testutil.NoopCloser{}, m1, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen) c1 := NewConnection(c1ID, br, aw, testutil.NoopCloser{}, m1, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen)
c1.Start() c1.Start()
defer closeAndWait(c1, ar, bw) defer closeAndWait(c1, ar, bw)
c0.ClusterConfig(ClusterConfig{}) c0.ClusterConfig(&ClusterConfig{})
c1.ClusterConfig(ClusterConfig{}) c1.ClusterConfig(&ClusterConfig{})
c0.internalClose(errManual) c0.internalClose(errManual)
@ -82,10 +82,10 @@ func TestClose(t *testing.T) {
ctx := context.Background() ctx := context.Background()
c0.Index(ctx, "default", nil) c0.Index(ctx, &Index{Folder: "default"})
c0.Index(ctx, "default", nil) c0.Index(ctx, &Index{Folder: "default"})
if _, err := c0.Request(ctx, "default", "foo", 0, 0, 0, nil, 0, false); err == nil { if _, err := c0.Request(ctx, &Request{Folder: "default", Name: "foo"}); err == nil {
t.Error("Request should return an error") t.Error("Request should return an error")
} }
} }
@ -111,7 +111,7 @@ func TestCloseOnBlockingSend(t *testing.T) {
wg.Add(1) wg.Add(1)
go func() { go func() {
c.ClusterConfig(ClusterConfig{}) c.ClusterConfig(&ClusterConfig{})
wg.Done() wg.Done()
}() }()
@ -160,10 +160,10 @@ func TestCloseRace(t *testing.T) {
c1 := NewConnection(c1ID, br, aw, testutil.NoopCloser{}, m1, new(mockedConnectionInfo), CompressionNever, nil, testKeyGen) c1 := NewConnection(c1ID, br, aw, testutil.NoopCloser{}, m1, new(mockedConnectionInfo), CompressionNever, nil, testKeyGen)
c1.Start() c1.Start()
defer closeAndWait(c1, ar, bw) defer closeAndWait(c1, ar, bw)
c0.ClusterConfig(ClusterConfig{}) c0.ClusterConfig(&ClusterConfig{})
c1.ClusterConfig(ClusterConfig{}) c1.ClusterConfig(&ClusterConfig{})
c1.Index(context.Background(), "default", nil) c1.Index(context.Background(), &Index{Folder: "default"})
select { select {
case <-indexReceived: case <-indexReceived:
case <-time.After(time.Second): case <-time.After(time.Second):
@ -205,7 +205,7 @@ func TestClusterConfigFirst(t *testing.T) {
// Allow some time for c.writerLoop to setup after c.Start // Allow some time for c.writerLoop to setup after c.Start
} }
c.ClusterConfig(ClusterConfig{}) c.ClusterConfig(&ClusterConfig{})
done := make(chan struct{}) done := make(chan struct{})
if ok := c.send(context.Background(), &Ping{}, done); !ok { if ok := c.send(context.Background(), &Ping{}, done); !ok {
@ -907,7 +907,7 @@ func TestClusterConfigAfterClose(t *testing.T) {
done := make(chan struct{}) done := make(chan struct{})
go func() { go func() {
c.ClusterConfig(ClusterConfig{}) c.ClusterConfig(&ClusterConfig{})
close(done) close(done)
}() }()

View File

@ -13,23 +13,23 @@ type wireFormatConnection struct {
Connection Connection
} }
func (c wireFormatConnection) Index(ctx context.Context, folder string, fs []FileInfo) error { func (c wireFormatConnection) Index(ctx context.Context, idx *Index) error {
for i := range fs { for i := range idx.Files {
fs[i].Name = norm.NFC.String(filepath.ToSlash(fs[i].Name)) idx.Files[i].Name = norm.NFC.String(filepath.ToSlash(idx.Files[i].Name))
} }
return c.Connection.Index(ctx, folder, fs) return c.Connection.Index(ctx, idx)
} }
func (c wireFormatConnection) IndexUpdate(ctx context.Context, folder string, fs []FileInfo) error { func (c wireFormatConnection) IndexUpdate(ctx context.Context, idxUp *IndexUpdate) error {
for i := range fs { for i := range idxUp.Files {
fs[i].Name = norm.NFC.String(filepath.ToSlash(fs[i].Name)) idxUp.Files[i].Name = norm.NFC.String(filepath.ToSlash(idxUp.Files[i].Name))
} }
return c.Connection.IndexUpdate(ctx, folder, fs) return c.Connection.IndexUpdate(ctx, idxUp)
} }
func (c wireFormatConnection) Request(ctx context.Context, folder string, name string, blockNo int, offset int64, size int, hash []byte, weakHash uint32, fromTemporary bool) ([]byte, error) { func (c wireFormatConnection) Request(ctx context.Context, req *Request) ([]byte, error) {
name = norm.NFC.String(filepath.ToSlash(name)) req.Name = norm.NFC.String(filepath.ToSlash(req.Name))
return c.Connection.Request(ctx, folder, name, blockNo, offset, size, hash, weakHash, fromTemporary) return c.Connection.Request(ctx, req)
} }