mirror of
https://github.com/octoleo/syncthing.git
synced 2025-01-31 19:08:30 +00:00
This commit is contained in:
parent
af13f0cd35
commit
a9764fc16c
@ -754,7 +754,11 @@ func (s *service) getDBNeed(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
page, perpage := getPagingParams(qs)
|
||||
|
||||
progress, queued, rest := s.model.NeedFolderFiles(folder, page, perpage)
|
||||
progress, queued, rest, err := s.model.NeedFolderFiles(folder, page, perpage)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Convert the struct to a more loose structure, and inject the size.
|
||||
sendJSON(w, map[string]interface{}{
|
||||
@ -779,13 +783,12 @@ func (s *service) getDBRemoteNeed(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
page, perpage := getPagingParams(qs)
|
||||
|
||||
snap, err := s.model.DBSnapshot(folder)
|
||||
files, err := s.model.RemoteNeedFolderFiles(folder, deviceID, page, perpage)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
defer snap.Release()
|
||||
files := snap.RemoteNeedFolderFiles(deviceID, page, perpage)
|
||||
|
||||
sendJSON(w, map[string]interface{}{
|
||||
"files": toJsonFileInfoSlice(files),
|
||||
"page": page,
|
||||
@ -800,13 +803,11 @@ func (s *service) getDBLocalChanged(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
page, perpage := getPagingParams(qs)
|
||||
|
||||
snap, err := s.model.DBSnapshot(folder)
|
||||
files, err := s.model.LocalChangedFolderFiles(folder, page, perpage)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
defer snap.Release()
|
||||
files := snap.LocalChangedFiles(page, perpage)
|
||||
|
||||
sendJSON(w, map[string]interface{}{
|
||||
"files": toJsonFileInfoSlice(files),
|
||||
|
@ -34,8 +34,16 @@ func (m *mockedModel) Override(folder string) {}
|
||||
|
||||
func (m *mockedModel) Revert(folder string) {}
|
||||
|
||||
func (m *mockedModel) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated) {
|
||||
return nil, nil, nil
|
||||
func (m *mockedModel) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated, error) {
|
||||
return nil, nil, nil, nil
|
||||
}
|
||||
|
||||
func (*mockedModel) RemoteNeedFolderFiles(folder string, device protocol.DeviceID, page, perpage int) ([]db.FileInfoTruncated, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (*mockedModel) LocalChangedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) FolderProgressBytesCompleted(_ string) int64 {
|
||||
|
@ -336,53 +336,6 @@ func (s *Snapshot) NeedSize(device protocol.DeviceID) Counts {
|
||||
return s.meta.Counts(device, needFlag)
|
||||
}
|
||||
|
||||
// LocalChangedFiles returns a paginated list of files that were changed locally.
|
||||
func (s *Snapshot) LocalChangedFiles(page, perpage int) []FileInfoTruncated {
|
||||
if s.ReceiveOnlyChangedSize().TotalItems() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
files := make([]FileInfoTruncated, 0, perpage)
|
||||
|
||||
skip := (page - 1) * perpage
|
||||
get := perpage
|
||||
|
||||
s.WithHaveTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
|
||||
if !f.IsReceiveOnlyChanged() {
|
||||
return true
|
||||
}
|
||||
if skip > 0 {
|
||||
skip--
|
||||
return true
|
||||
}
|
||||
ft := f.(FileInfoTruncated)
|
||||
files = append(files, ft)
|
||||
get--
|
||||
return get > 0
|
||||
})
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
// RemoteNeedFolderFiles returns paginated list of currently needed files in
|
||||
// progress, queued, and to be queued on next puller iteration, as well as the
|
||||
// total number of files currently needed.
|
||||
func (s *Snapshot) RemoteNeedFolderFiles(device protocol.DeviceID, page, perpage int) []FileInfoTruncated {
|
||||
files := make([]FileInfoTruncated, 0, perpage)
|
||||
skip := (page - 1) * perpage
|
||||
get := perpage
|
||||
s.WithNeedTruncated(device, func(f protocol.FileIntf) bool {
|
||||
if skip > 0 {
|
||||
skip--
|
||||
return true
|
||||
}
|
||||
files = append(files, f.(FileInfoTruncated))
|
||||
get--
|
||||
return get > 0
|
||||
})
|
||||
return files
|
||||
}
|
||||
|
||||
func (s *Snapshot) WithBlocksHash(hash []byte, fn Iterator) {
|
||||
opStr := fmt.Sprintf(`%s WithBlocksHash("%x")`, s.folder, hash)
|
||||
l.Debugf(opStr)
|
||||
|
@ -90,7 +90,9 @@ type Model interface {
|
||||
RestoreFolderVersions(folder string, versions map[string]time.Time) (map[string]string, error)
|
||||
|
||||
DBSnapshot(folder string) (*db.Snapshot, error)
|
||||
NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated)
|
||||
NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated, error)
|
||||
RemoteNeedFolderFiles(folder string, device protocol.DeviceID, page, perpage int) ([]db.FileInfoTruncated, error)
|
||||
LocalChangedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, error)
|
||||
FolderProgressBytesCompleted(folder string) int64
|
||||
|
||||
CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool)
|
||||
@ -891,7 +893,7 @@ func (m *model) FolderProgressBytesCompleted(folder string) int64 {
|
||||
|
||||
// NeedFolderFiles returns paginated list of currently needed files in
|
||||
// progress, queued, and to be queued on next puller iteration.
|
||||
func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated) {
|
||||
func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated, error) {
|
||||
m.fmut.RLock()
|
||||
rf, rfOk := m.folderFiles[folder]
|
||||
runner, runnerOk := m.folderRunners[folder]
|
||||
@ -899,7 +901,7 @@ func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfo
|
||||
m.fmut.RUnlock()
|
||||
|
||||
if !rfOk {
|
||||
return nil, nil, nil
|
||||
return nil, nil, nil, errFolderMissing
|
||||
}
|
||||
|
||||
snap := rf.Snapshot()
|
||||
@ -907,8 +909,7 @@ func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfo
|
||||
var progress, queued, rest []db.FileInfoTruncated
|
||||
var seen map[string]struct{}
|
||||
|
||||
skip := (page - 1) * perpage
|
||||
get := perpage
|
||||
p := newPager(page, perpage)
|
||||
|
||||
if runnerOk {
|
||||
progressNames, queuedNames, skipped := runner.Jobs(page, perpage)
|
||||
@ -931,11 +932,11 @@ func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfo
|
||||
}
|
||||
}
|
||||
|
||||
get -= len(seen)
|
||||
if get == 0 {
|
||||
return progress, queued, nil
|
||||
p.get -= len(seen)
|
||||
if p.get == 0 {
|
||||
return progress, queued, nil, nil
|
||||
}
|
||||
skip -= skipped
|
||||
p.toSkip -= skipped
|
||||
}
|
||||
|
||||
rest = make([]db.FileInfoTruncated, 0, perpage)
|
||||
@ -944,19 +945,107 @@ func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfo
|
||||
return true
|
||||
}
|
||||
|
||||
if skip > 0 {
|
||||
skip--
|
||||
if p.skip() {
|
||||
return true
|
||||
}
|
||||
ft := f.(db.FileInfoTruncated)
|
||||
if _, ok := seen[ft.Name]; !ok {
|
||||
rest = append(rest, ft)
|
||||
get--
|
||||
p.get--
|
||||
}
|
||||
return get > 0
|
||||
return p.get > 0
|
||||
})
|
||||
|
||||
return progress, queued, rest
|
||||
return progress, queued, rest, nil
|
||||
}
|
||||
|
||||
// RemoteNeedFolderFiles returns paginated list of currently needed files in
|
||||
// progress, queued, and to be queued on next puller iteration, as well as the
|
||||
// total number of files currently needed.
|
||||
func (m *model) RemoteNeedFolderFiles(folder string, device protocol.DeviceID, page, perpage int) ([]db.FileInfoTruncated, error) {
|
||||
m.fmut.RLock()
|
||||
rf, ok := m.folderFiles[folder]
|
||||
m.fmut.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return nil, errFolderMissing
|
||||
}
|
||||
|
||||
snap := rf.Snapshot()
|
||||
defer snap.Release()
|
||||
|
||||
files := make([]db.FileInfoTruncated, 0, perpage)
|
||||
p := newPager(page, perpage)
|
||||
snap.WithNeedTruncated(device, func(f protocol.FileIntf) bool {
|
||||
if p.skip() {
|
||||
return true
|
||||
}
|
||||
files = append(files, f.(db.FileInfoTruncated))
|
||||
return !p.done()
|
||||
})
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (m *model) LocalChangedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, error) {
|
||||
m.fmut.RLock()
|
||||
rf, ok := m.folderFiles[folder]
|
||||
cfg := m.folderCfgs[folder]
|
||||
m.fmut.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return nil, errFolderMissing
|
||||
}
|
||||
|
||||
snap := rf.Snapshot()
|
||||
defer snap.Release()
|
||||
|
||||
if snap.ReceiveOnlyChangedSize().TotalItems() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
p := newPager(page, perpage)
|
||||
recvEnc := cfg.Type == config.FolderTypeReceiveEncrypted
|
||||
files := make([]db.FileInfoTruncated, 0, perpage)
|
||||
|
||||
snap.WithHaveTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
|
||||
if !f.IsReceiveOnlyChanged() || (recvEnc && f.IsDeleted()) {
|
||||
return true
|
||||
}
|
||||
if p.skip() {
|
||||
return true
|
||||
}
|
||||
ft := f.(db.FileInfoTruncated)
|
||||
files = append(files, ft)
|
||||
return !p.done()
|
||||
})
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
type pager struct {
|
||||
toSkip, get int
|
||||
}
|
||||
|
||||
func newPager(page, perpage int) *pager {
|
||||
return &pager{
|
||||
toSkip: (page - 1) * perpage,
|
||||
get: perpage,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pager) skip() bool {
|
||||
if p.toSkip == 0 {
|
||||
return false
|
||||
}
|
||||
p.toSkip--
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *pager) done() bool {
|
||||
if p.get > 0 {
|
||||
p.get--
|
||||
}
|
||||
return p.get == 0
|
||||
}
|
||||
|
||||
// Index is called when a new device is connected and we receive their full index.
|
||||
|
@ -1002,14 +1002,16 @@ func TestNeedFolderFiles(t *testing.T) {
|
||||
t.Fatal("Timed out before receiving index")
|
||||
}
|
||||
|
||||
progress, queued, rest := m.NeedFolderFiles(fcfg.ID, 1, 100)
|
||||
progress, queued, rest, err := m.NeedFolderFiles(fcfg.ID, 1, 100)
|
||||
must(t, err)
|
||||
if got := len(progress) + len(queued) + len(rest); got != num {
|
||||
t.Errorf("Got %v needed items, expected %v", got, num)
|
||||
}
|
||||
|
||||
exp := 10
|
||||
for page := 1; page < 3; page++ {
|
||||
progress, queued, rest := m.NeedFolderFiles(fcfg.ID, page, exp)
|
||||
progress, queued, rest, err := m.NeedFolderFiles(fcfg.ID, page, exp)
|
||||
must(t, err)
|
||||
if got := len(progress) + len(queued) + len(rest); got != exp {
|
||||
t.Errorf("Got %v needed items on page %v, expected %v", got, page, exp)
|
||||
}
|
||||
@ -1128,7 +1130,8 @@ func TestRequestLastFileProgress(t *testing.T) {
|
||||
fc.mut.Lock()
|
||||
fc.requestFn = func(_ context.Context, folder, name string, _ int64, _ int, _ []byte, _ bool) ([]byte, error) {
|
||||
defer close(done)
|
||||
progress, queued, rest := m.NeedFolderFiles(folder, 1, 10)
|
||||
progress, queued, rest, err := m.NeedFolderFiles(folder, 1, 10)
|
||||
must(t, err)
|
||||
if len(queued)+len(rest) != 0 {
|
||||
t.Error(`There should not be any queued or "rest" items`)
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user