mirror of
https://github.com/octoleo/syncthing.git
synced 2024-11-15 09:44:06 +00:00
Rename Repository -> Folder, Node -> Device (fixes #739)
This commit is contained in:
parent
9d816694ba
commit
6c352dca74
@ -12,7 +12,7 @@ least the following:
|
|||||||
- What operating system, operating system version and version of
|
- What operating system, operating system version and version of
|
||||||
Syncthing you are running
|
Syncthing you are running
|
||||||
|
|
||||||
- The same for other connected nodes, where relevant
|
- The same for other connected devices, where relevant
|
||||||
|
|
||||||
- Screenshot if the issue concerns something visible in the GUI
|
- Screenshot if the issue concerns something visible in the GUI
|
||||||
|
|
||||||
|
@ -7,8 +7,8 @@ syncthing
|
|||||||
|
|
||||||
This is the `syncthing` project. The following are the project goals:
|
This is the `syncthing` project. The following are the project goals:
|
||||||
|
|
||||||
1. Define a protocol for synchronization of a file repository between a
|
1. Define a protocol for synchronization of a file folder between a
|
||||||
number of collaborating nodes. The protocol should be well defined,
|
number of collaborating devices. The protocol should be well defined,
|
||||||
unambiguous, easily understood, free to use, efficient, secure and
|
unambiguous, easily understood, free to use, efficient, secure and
|
||||||
language neutral. This is the [Block Exchange
|
language neutral. This is the [Block Exchange
|
||||||
Protocol](https://github.com/syncthing/syncthing/blob/master/protocol/PROTOCOL.md).
|
Protocol](https://github.com/syncthing/syncthing/blob/master/protocol/PROTOCOL.md).
|
||||||
|
@ -19,8 +19,8 @@ func main() {
|
|||||||
log.SetFlags(0)
|
log.SetFlags(0)
|
||||||
log.SetOutput(os.Stdout)
|
log.SetOutput(os.Stdout)
|
||||||
|
|
||||||
repo := flag.String("repo", "default", "Repository ID")
|
folder := flag.String("folder", "default", "Folder ID")
|
||||||
node := flag.String("node", "", "Node ID (blank for global)")
|
device := flag.String("device", "", "Device ID (blank for global)")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
db, err := leveldb.OpenFile(flag.Arg(0), nil)
|
db, err := leveldb.OpenFile(flag.Arg(0), nil)
|
||||||
@ -28,10 +28,10 @@ func main() {
|
|||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fs := files.NewSet(*repo, db)
|
fs := files.NewSet(*folder, db)
|
||||||
|
|
||||||
if *node == "" {
|
if *device == "" {
|
||||||
log.Printf("*** Global index for repo %q", *repo)
|
log.Printf("*** Global index for folder %q", *folder)
|
||||||
fs.WithGlobalTruncated(func(fi protocol.FileIntf) bool {
|
fs.WithGlobalTruncated(func(fi protocol.FileIntf) bool {
|
||||||
f := fi.(protocol.FileInfoTruncated)
|
f := fi.(protocol.FileInfoTruncated)
|
||||||
fmt.Println(f)
|
fmt.Println(f)
|
||||||
@ -39,11 +39,11 @@ func main() {
|
|||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
n, err := protocol.NodeIDFromString(*node)
|
n, err := protocol.DeviceIDFromString(*device)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
log.Printf("*** Have index for repo %q node %q", *repo, n)
|
log.Printf("*** Have index for folder %q device %q", *folder, n)
|
||||||
fs.WithHaveTruncated(n, func(fi protocol.FileIntf) bool {
|
fs.WithHaveTruncated(n, func(fi protocol.FileIntf) bool {
|
||||||
f := fi.(protocol.FileInfoTruncated)
|
f := fi.(protocol.FileInfoTruncated)
|
||||||
fmt.Println(f)
|
fmt.Println(f)
|
||||||
|
@ -88,12 +88,12 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
|
|||||||
getRestMux.HandleFunc("/rest/lang", restGetLang)
|
getRestMux.HandleFunc("/rest/lang", restGetLang)
|
||||||
getRestMux.HandleFunc("/rest/model", withModel(m, restGetModel))
|
getRestMux.HandleFunc("/rest/model", withModel(m, restGetModel))
|
||||||
getRestMux.HandleFunc("/rest/need", withModel(m, restGetNeed))
|
getRestMux.HandleFunc("/rest/need", withModel(m, restGetNeed))
|
||||||
getRestMux.HandleFunc("/rest/nodeid", restGetNodeID)
|
getRestMux.HandleFunc("/rest/deviceid", restGetDeviceID)
|
||||||
getRestMux.HandleFunc("/rest/report", withModel(m, restGetReport))
|
getRestMux.HandleFunc("/rest/report", withModel(m, restGetReport))
|
||||||
getRestMux.HandleFunc("/rest/system", restGetSystem)
|
getRestMux.HandleFunc("/rest/system", restGetSystem)
|
||||||
getRestMux.HandleFunc("/rest/upgrade", restGetUpgrade)
|
getRestMux.HandleFunc("/rest/upgrade", restGetUpgrade)
|
||||||
getRestMux.HandleFunc("/rest/version", restGetVersion)
|
getRestMux.HandleFunc("/rest/version", restGetVersion)
|
||||||
getRestMux.HandleFunc("/rest/stats/node", withModel(m, restGetNodeStats))
|
getRestMux.HandleFunc("/rest/stats/device", withModel(m, restGetDeviceStats))
|
||||||
|
|
||||||
// Debug endpoints, not for general use
|
// Debug endpoints, not for general use
|
||||||
getRestMux.HandleFunc("/rest/debug/peerCompletion", withModel(m, restGetPeerCompletion))
|
getRestMux.HandleFunc("/rest/debug/peerCompletion", withModel(m, restGetPeerCompletion))
|
||||||
@ -220,17 +220,17 @@ func restGetVersion(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
func restGetCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
func restGetCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||||
var qs = r.URL.Query()
|
var qs = r.URL.Query()
|
||||||
var repo = qs.Get("repo")
|
var folder = qs.Get("folder")
|
||||||
var nodeStr = qs.Get("node")
|
var deviceStr = qs.Get("device")
|
||||||
|
|
||||||
node, err := protocol.NodeIDFromString(nodeStr)
|
device, err := protocol.DeviceIDFromString(deviceStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), 500)
|
http.Error(w, err.Error(), 500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
res := map[string]float64{
|
res := map[string]float64{
|
||||||
"completion": m.Completion(node, repo),
|
"completion": m.Completion(device, folder),
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||||
@ -239,29 +239,29 @@ func restGetCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||||
var qs = r.URL.Query()
|
var qs = r.URL.Query()
|
||||||
var repo = qs.Get("repo")
|
var folder = qs.Get("folder")
|
||||||
var res = make(map[string]interface{})
|
var res = make(map[string]interface{})
|
||||||
|
|
||||||
for _, cr := range cfg.Repositories {
|
for _, cr := range cfg.Folders {
|
||||||
if cr.ID == repo {
|
if cr.ID == folder {
|
||||||
res["invalid"] = cr.Invalid
|
res["invalid"] = cr.Invalid
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
globalFiles, globalDeleted, globalBytes := m.GlobalSize(repo)
|
globalFiles, globalDeleted, globalBytes := m.GlobalSize(folder)
|
||||||
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
|
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
|
||||||
|
|
||||||
localFiles, localDeleted, localBytes := m.LocalSize(repo)
|
localFiles, localDeleted, localBytes := m.LocalSize(folder)
|
||||||
res["localFiles"], res["localDeleted"], res["localBytes"] = localFiles, localDeleted, localBytes
|
res["localFiles"], res["localDeleted"], res["localBytes"] = localFiles, localDeleted, localBytes
|
||||||
|
|
||||||
needFiles, needBytes := m.NeedSize(repo)
|
needFiles, needBytes := m.NeedSize(folder)
|
||||||
res["needFiles"], res["needBytes"] = needFiles, needBytes
|
res["needFiles"], res["needBytes"] = needFiles, needBytes
|
||||||
|
|
||||||
res["inSyncFiles"], res["inSyncBytes"] = globalFiles-needFiles, globalBytes-needBytes
|
res["inSyncFiles"], res["inSyncBytes"] = globalFiles-needFiles, globalBytes-needBytes
|
||||||
|
|
||||||
res["state"], res["stateChanged"] = m.State(repo)
|
res["state"], res["stateChanged"] = m.State(folder)
|
||||||
res["version"] = m.CurrentLocalVersion(repo) + m.RemoteLocalVersion(repo)
|
res["version"] = m.CurrentLocalVersion(folder) + m.RemoteLocalVersion(folder)
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||||
json.NewEncoder(w).Encode(res)
|
json.NewEncoder(w).Encode(res)
|
||||||
@ -269,15 +269,15 @@ func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
func restPostOverride(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
func restPostOverride(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||||
var qs = r.URL.Query()
|
var qs = r.URL.Query()
|
||||||
var repo = qs.Get("repo")
|
var folder = qs.Get("folder")
|
||||||
go m.Override(repo)
|
go m.Override(folder)
|
||||||
}
|
}
|
||||||
|
|
||||||
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||||
var qs = r.URL.Query()
|
var qs = r.URL.Query()
|
||||||
var repo = qs.Get("repo")
|
var folder = qs.Get("folder")
|
||||||
|
|
||||||
files := m.NeedFilesRepoLimited(repo, 100, 2500) // max 100 files or 2500 blocks
|
files := m.NeedFilesFolderLimited(folder, 100, 2500) // max 100 files or 2500 blocks
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||||
json.NewEncoder(w).Encode(files)
|
json.NewEncoder(w).Encode(files)
|
||||||
@ -289,8 +289,8 @@ func restGetConnections(m *model.Model, w http.ResponseWriter, r *http.Request)
|
|||||||
json.NewEncoder(w).Encode(res)
|
json.NewEncoder(w).Encode(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
func restGetNodeStats(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
func restGetDeviceStats(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||||
var res = m.NodeStatistics()
|
var res = m.DeviceStatistics()
|
||||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||||
json.NewEncoder(w).Encode(res)
|
json.NewEncoder(w).Encode(res)
|
||||||
}
|
}
|
||||||
@ -357,8 +357,8 @@ func restPostRestart(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func restPostReset(w http.ResponseWriter, r *http.Request) {
|
func restPostReset(w http.ResponseWriter, r *http.Request) {
|
||||||
flushResponse(`{"ok": "resetting repos"}`, w)
|
flushResponse(`{"ok": "resetting folders"}`, w)
|
||||||
resetRepositories()
|
resetFolders()
|
||||||
go restart()
|
go restart()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -431,10 +431,10 @@ func showGuiError(l logger.LogLevel, err string) {
|
|||||||
|
|
||||||
func restPostDiscoveryHint(w http.ResponseWriter, r *http.Request) {
|
func restPostDiscoveryHint(w http.ResponseWriter, r *http.Request) {
|
||||||
var qs = r.URL.Query()
|
var qs = r.URL.Query()
|
||||||
var node = qs.Get("node")
|
var device = qs.Get("device")
|
||||||
var addr = qs.Get("addr")
|
var addr = qs.Get("addr")
|
||||||
if len(node) != 0 && len(addr) != 0 && discoverer != nil {
|
if len(device) != 0 && len(addr) != 0 && discoverer != nil {
|
||||||
discoverer.Hint(node, []string{addr})
|
discoverer.Hint(device, []string{addr})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -451,7 +451,7 @@ func restGetIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|||||||
qs := r.URL.Query()
|
qs := r.URL.Query()
|
||||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||||
|
|
||||||
ignores, err := m.GetIgnores(qs.Get("repo"))
|
ignores, err := m.GetIgnores(qs.Get("folder"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), 500)
|
http.Error(w, err.Error(), 500)
|
||||||
return
|
return
|
||||||
@ -473,7 +473,7 @@ func restPostIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = m.SetIgnores(qs.Get("repo"), data["ignore"])
|
err = m.SetIgnores(qs.Get("folder"), data["ignore"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), 500)
|
http.Error(w, err.Error(), 500)
|
||||||
return
|
return
|
||||||
@ -519,10 +519,10 @@ func restGetUpgrade(w http.ResponseWriter, r *http.Request) {
|
|||||||
json.NewEncoder(w).Encode(res)
|
json.NewEncoder(w).Encode(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
func restGetNodeID(w http.ResponseWriter, r *http.Request) {
|
func restGetDeviceID(w http.ResponseWriter, r *http.Request) {
|
||||||
qs := r.URL.Query()
|
qs := r.URL.Query()
|
||||||
idStr := qs.Get("id")
|
idStr := qs.Get("id")
|
||||||
id, err := protocol.NodeIDFromString(idStr)
|
id, err := protocol.DeviceIDFromString(idStr)
|
||||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
json.NewEncoder(w).Encode(map[string]string{
|
json.NewEncoder(w).Encode(map[string]string{
|
||||||
@ -570,9 +570,9 @@ func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
func restPostScan(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
func restPostScan(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||||
qs := r.URL.Query()
|
qs := r.URL.Query()
|
||||||
repo := qs.Get("repo")
|
folder := qs.Get("folder")
|
||||||
sub := qs.Get("sub")
|
sub := qs.Get("sub")
|
||||||
err := m.ScanRepoSub(repo, sub)
|
err := m.ScanFolderSub(folder, sub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), 500)
|
http.Error(w, err.Error(), 500)
|
||||||
}
|
}
|
||||||
@ -595,21 +595,21 @@ func restGetPeerCompletion(m *model.Model, w http.ResponseWriter, r *http.Reques
|
|||||||
tot := map[string]float64{}
|
tot := map[string]float64{}
|
||||||
count := map[string]float64{}
|
count := map[string]float64{}
|
||||||
|
|
||||||
for _, repo := range cfg.Repositories {
|
for _, folder := range cfg.Folders {
|
||||||
for _, node := range repo.NodeIDs() {
|
for _, device := range folder.DeviceIDs() {
|
||||||
nodeStr := node.String()
|
deviceStr := device.String()
|
||||||
if m.ConnectedTo(node) {
|
if m.ConnectedTo(device) {
|
||||||
tot[nodeStr] += m.Completion(node, repo.ID)
|
tot[deviceStr] += m.Completion(device, folder.ID)
|
||||||
} else {
|
} else {
|
||||||
tot[nodeStr] = 0
|
tot[deviceStr] = 0
|
||||||
}
|
}
|
||||||
count[nodeStr]++
|
count[deviceStr]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
comp := map[string]int{}
|
comp := map[string]int{}
|
||||||
for node := range tot {
|
for device := range tot {
|
||||||
comp[node] = int(tot[node] / count[node])
|
comp[device] = int(tot[device] / count[device])
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||||
|
@ -84,7 +84,7 @@ func init() {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
cfg config.Configuration
|
cfg config.Configuration
|
||||||
myID protocol.NodeID
|
myID protocol.DeviceID
|
||||||
confDir string
|
confDir string
|
||||||
logFlags int = log.Ltime
|
logFlags int = log.Ltime
|
||||||
writeRateLimit *ratelimit.Bucket
|
writeRateLimit *ratelimit.Bucket
|
||||||
@ -208,7 +208,7 @@ func main() {
|
|||||||
cert, err := loadCert(dir, "")
|
cert, err := loadCert(dir, "")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
l.Warnln("Key exists; will not overwrite.")
|
l.Warnln("Key exists; will not overwrite.")
|
||||||
l.Infoln("Node ID:", protocol.NewNodeID(cert.Certificate[0]))
|
l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0]))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,7 +218,7 @@ func main() {
|
|||||||
l.Fatalln("load cert:", err)
|
l.Fatalln("load cert:", err)
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
l.Infoln("Node ID:", protocol.NewNodeID(cert.Certificate[0]))
|
l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0]))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -264,7 +264,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if reset {
|
if reset {
|
||||||
resetRepositories()
|
resetFolders()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -319,7 +319,7 @@ func syncthingMain() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
myID = protocol.NewNodeID(cert.Certificate[0])
|
myID = protocol.NewDeviceID(cert.Certificate[0])
|
||||||
l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))
|
l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))
|
||||||
|
|
||||||
l.Infoln(LongVersion)
|
l.Infoln(LongVersion)
|
||||||
@ -336,7 +336,7 @@ func syncthingMain() {
|
|||||||
|
|
||||||
cfg, err = config.Load(cfgFile, myID)
|
cfg, err = config.Load(cfgFile, myID)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
myCfg := cfg.GetNodeConfiguration(myID)
|
myCfg := cfg.GetDeviceConfiguration(myID)
|
||||||
if myCfg == nil || myCfg.Name == "" {
|
if myCfg == nil || myCfg.Name == "" {
|
||||||
myName, _ = os.Hostname()
|
myName, _ = os.Hostname()
|
||||||
} else {
|
} else {
|
||||||
@ -345,20 +345,20 @@ func syncthingMain() {
|
|||||||
} else {
|
} else {
|
||||||
l.Infoln("No config file; starting with empty defaults")
|
l.Infoln("No config file; starting with empty defaults")
|
||||||
myName, _ = os.Hostname()
|
myName, _ = os.Hostname()
|
||||||
defaultRepo := filepath.Join(getHomeDir(), "Sync")
|
defaultFolder := filepath.Join(getHomeDir(), "Sync")
|
||||||
|
|
||||||
cfg = config.New(cfgFile, myID)
|
cfg = config.New(cfgFile, myID)
|
||||||
cfg.Repositories = []config.RepositoryConfiguration{
|
cfg.Folders = []config.FolderConfiguration{
|
||||||
{
|
{
|
||||||
ID: "default",
|
ID: "default",
|
||||||
Directory: defaultRepo,
|
Directory: defaultFolder,
|
||||||
RescanIntervalS: 60,
|
RescanIntervalS: 60,
|
||||||
Nodes: []config.RepositoryNodeConfiguration{{NodeID: myID}},
|
Devices: []config.FolderDeviceConfiguration{{DeviceID: myID}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
cfg.Nodes = []config.NodeConfiguration{
|
cfg.Devices = []config.DeviceConfiguration{
|
||||||
{
|
{
|
||||||
NodeID: myID,
|
DeviceID: myID,
|
||||||
Addresses: []string{"dynamic"},
|
Addresses: []string{"dynamic"},
|
||||||
Name: myName,
|
Name: myName,
|
||||||
},
|
},
|
||||||
@ -422,48 +422,48 @@ func syncthingMain() {
|
|||||||
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
|
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove database entries for repos that no longer exist in the config
|
// Remove database entries for folders that no longer exist in the config
|
||||||
repoMap := cfg.RepoMap()
|
folderMap := cfg.FolderMap()
|
||||||
for _, repo := range files.ListRepos(db) {
|
for _, folder := range files.ListFolders(db) {
|
||||||
if _, ok := repoMap[repo]; !ok {
|
if _, ok := folderMap[folder]; !ok {
|
||||||
l.Infof("Cleaning data for dropped repo %q", repo)
|
l.Infof("Cleaning data for dropped folder %q", folder)
|
||||||
files.DropRepo(db, repo)
|
files.DropFolder(db, folder)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m := model.NewModel(confDir, &cfg, myName, "syncthing", Version, db)
|
m := model.NewModel(confDir, &cfg, myName, "syncthing", Version, db)
|
||||||
|
|
||||||
nextRepo:
|
nextFolder:
|
||||||
for i, repo := range cfg.Repositories {
|
for i, folder := range cfg.Folders {
|
||||||
if repo.Invalid != "" {
|
if folder.Invalid != "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
repo.Directory = expandTilde(repo.Directory)
|
folder.Directory = expandTilde(folder.Directory)
|
||||||
m.AddRepo(repo)
|
m.AddFolder(folder)
|
||||||
|
|
||||||
fi, err := os.Stat(repo.Directory)
|
fi, err := os.Stat(folder.Directory)
|
||||||
if m.CurrentLocalVersion(repo.ID) > 0 {
|
if m.CurrentLocalVersion(folder.ID) > 0 {
|
||||||
// Safety check. If the cached index contains files but the
|
// Safety check. If the cached index contains files but the
|
||||||
// repository doesn't exist, we have a problem. We would assume
|
// folder doesn't exist, we have a problem. We would assume
|
||||||
// that all files have been deleted which might not be the case,
|
// that all files have been deleted which might not be the case,
|
||||||
// so mark it as invalid instead.
|
// so mark it as invalid instead.
|
||||||
if err != nil || !fi.IsDir() {
|
if err != nil || !fi.IsDir() {
|
||||||
l.Warnf("Stopping repository %q - directory missing, but has files in index", repo.ID)
|
l.Warnf("Stopping folder %q - directory missing, but has files in index", folder.ID)
|
||||||
cfg.Repositories[i].Invalid = "repo directory missing"
|
cfg.Folders[i].Invalid = "folder directory missing"
|
||||||
continue nextRepo
|
continue nextFolder
|
||||||
}
|
}
|
||||||
} else if os.IsNotExist(err) {
|
} else if os.IsNotExist(err) {
|
||||||
// If we don't have any files in the index, and the directory
|
// If we don't have any files in the index, and the directory
|
||||||
// doesn't exist, try creating it.
|
// doesn't exist, try creating it.
|
||||||
err = os.MkdirAll(repo.Directory, 0700)
|
err = os.MkdirAll(folder.Directory, 0700)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If there was another error or we could not create the
|
// If there was another error or we could not create the
|
||||||
// directory, the repository is invalid.
|
// directory, the folder is invalid.
|
||||||
l.Warnf("Stopping repository %q - %v", err)
|
l.Warnf("Stopping folder %q - %v", err)
|
||||||
cfg.Repositories[i].Invalid = err.Error()
|
cfg.Folders[i].Invalid = err.Error()
|
||||||
continue nextRepo
|
continue nextFolder
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -507,33 +507,33 @@ nextRepo:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear out old indexes for other nodes. Otherwise we'll start up and
|
// Clear out old indexes for other devices. Otherwise we'll start up and
|
||||||
// start needing a bunch of files which are nowhere to be found. This
|
// start needing a bunch of files which are nowhere to be found. This
|
||||||
// needs to be changed when we correctly do persistent indexes.
|
// needs to be changed when we correctly do persistent indexes.
|
||||||
for _, repoCfg := range cfg.Repositories {
|
for _, folderCfg := range cfg.Folders {
|
||||||
if repoCfg.Invalid != "" {
|
if folderCfg.Invalid != "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, node := range repoCfg.NodeIDs() {
|
for _, device := range folderCfg.DeviceIDs() {
|
||||||
if node == myID {
|
if device == myID {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
m.Index(node, repoCfg.ID, nil)
|
m.Index(device, folderCfg.ID, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Walk the repository and update the local model before establishing any
|
// Walk the folder and update the local model before establishing any
|
||||||
// connections to other nodes.
|
// connections to other devices.
|
||||||
|
|
||||||
m.CleanRepos()
|
m.CleanFolders()
|
||||||
l.Infoln("Performing initial repository scan")
|
l.Infoln("Performing initial folder scan")
|
||||||
m.ScanRepos()
|
m.ScanFolders()
|
||||||
|
|
||||||
// Remove all .idx* files that don't belong to an active repo.
|
// Remove all .idx* files that don't belong to an active folder.
|
||||||
|
|
||||||
validIndexes := make(map[string]bool)
|
validIndexes := make(map[string]bool)
|
||||||
for _, repo := range cfg.Repositories {
|
for _, folder := range cfg.Folders {
|
||||||
dir := expandTilde(repo.Directory)
|
dir := expandTilde(folder.Directory)
|
||||||
id := fmt.Sprintf("%x", sha1.Sum([]byte(dir)))
|
id := fmt.Sprintf("%x", sha1.Sum([]byte(dir)))
|
||||||
validIndexes[id] = true
|
validIndexes[id] = true
|
||||||
}
|
}
|
||||||
@ -566,23 +566,23 @@ nextRepo:
|
|||||||
setupUPnP()
|
setupUPnP()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Routine to connect out to configured nodes
|
// Routine to connect out to configured devices
|
||||||
discoverer = discovery(externalPort)
|
discoverer = discovery(externalPort)
|
||||||
go listenConnect(myID, m, tlsCfg)
|
go listenConnect(myID, m, tlsCfg)
|
||||||
|
|
||||||
for _, repo := range cfg.Repositories {
|
for _, folder := range cfg.Folders {
|
||||||
if repo.Invalid != "" {
|
if folder.Invalid != "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Routine to pull blocks from other nodes to synchronize the local
|
// Routine to pull blocks from other devices to synchronize the local
|
||||||
// repository. Does not run when we are in read only (publish only) mode.
|
// folder. Does not run when we are in read only (publish only) mode.
|
||||||
if repo.ReadOnly {
|
if folder.ReadOnly {
|
||||||
l.Okf("Ready to synchronize %s (read only; no external updates accepted)", repo.ID)
|
l.Okf("Ready to synchronize %s (read only; no external updates accepted)", folder.ID)
|
||||||
m.StartRepoRO(repo.ID)
|
m.StartFolderRO(folder.ID)
|
||||||
} else {
|
} else {
|
||||||
l.Okf("Ready to synchronize %s (read-write)", repo.ID)
|
l.Okf("Ready to synchronize %s (read-write)", folder.ID)
|
||||||
m.StartRepoRW(repo.ID)
|
m.StartFolderRW(folder.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -595,9 +595,9 @@ nextRepo:
|
|||||||
defer pprof.StopCPUProfile()
|
defer pprof.StopCPUProfile()
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, node := range cfg.Nodes {
|
for _, device := range cfg.Devices {
|
||||||
if len(node.Name) > 0 {
|
if len(device.Name) > 0 {
|
||||||
l.Infof("Node %s is %q at %v", node.NodeID, node.Name, node.Addresses)
|
l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -668,7 +668,7 @@ func setupUPnP() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setupExternalPort(igd *upnp.IGD, port int) int {
|
func setupExternalPort(igd *upnp.IGD, port int) int {
|
||||||
// We seed the random number generator with the node ID to get a
|
// We seed the random number generator with the device ID to get a
|
||||||
// repeatable sequence of random external ports.
|
// repeatable sequence of random external ports.
|
||||||
rnd := rand.NewSource(certSeed(cert.Certificate[0]))
|
rnd := rand.NewSource(certSeed(cert.Certificate[0]))
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
@ -714,12 +714,12 @@ func renewUPnP(port int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func resetRepositories() {
|
func resetFolders() {
|
||||||
suffix := fmt.Sprintf(".syncthing-reset-%d", time.Now().UnixNano())
|
suffix := fmt.Sprintf(".syncthing-reset-%d", time.Now().UnixNano())
|
||||||
for _, repo := range cfg.Repositories {
|
for _, folder := range cfg.Folders {
|
||||||
if _, err := os.Stat(repo.Directory); err == nil {
|
if _, err := os.Stat(folder.Directory); err == nil {
|
||||||
l.Infof("Reset: Moving %s -> %s", repo.Directory, repo.Directory+suffix)
|
l.Infof("Reset: Moving %s -> %s", folder.Directory, folder.Directory+suffix)
|
||||||
os.Rename(repo.Directory, repo.Directory+suffix)
|
os.Rename(folder.Directory, folder.Directory+suffix)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -773,7 +773,7 @@ func shutdown() {
|
|||||||
stop <- exitSuccess
|
stop <- exitSuccess
|
||||||
}
|
}
|
||||||
|
|
||||||
func listenConnect(myID protocol.NodeID, m *model.Model, tlsCfg *tls.Config) {
|
func listenConnect(myID protocol.DeviceID, m *model.Model, tlsCfg *tls.Config) {
|
||||||
var conns = make(chan *tls.Conn)
|
var conns = make(chan *tls.Conn)
|
||||||
|
|
||||||
// Listen
|
// Listen
|
||||||
@ -793,7 +793,7 @@ next:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
remoteCert := certs[0]
|
remoteCert := certs[0]
|
||||||
remoteID := protocol.NewNodeID(remoteCert.Raw)
|
remoteID := protocol.NewDeviceID(remoteCert.Raw)
|
||||||
|
|
||||||
if remoteID == myID {
|
if remoteID == myID {
|
||||||
l.Infof("Connected to myself (%s) - should not happen", remoteID)
|
l.Infof("Connected to myself (%s) - should not happen", remoteID)
|
||||||
@ -802,17 +802,17 @@ next:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if m.ConnectedTo(remoteID) {
|
if m.ConnectedTo(remoteID) {
|
||||||
l.Infof("Connected to already connected node (%s)", remoteID)
|
l.Infof("Connected to already connected device (%s)", remoteID)
|
||||||
conn.Close()
|
conn.Close()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, nodeCfg := range cfg.Nodes {
|
for _, deviceCfg := range cfg.Devices {
|
||||||
if nodeCfg.NodeID == remoteID {
|
if deviceCfg.DeviceID == remoteID {
|
||||||
// Verify the name on the certificate. By default we set it to
|
// Verify the name on the certificate. By default we set it to
|
||||||
// "syncthing" when generating, but the user may have replaced
|
// "syncthing" when generating, but the user may have replaced
|
||||||
// the certificate and used another name.
|
// the certificate and used another name.
|
||||||
certName := nodeCfg.CertName
|
certName := deviceCfg.CertName
|
||||||
if certName == "" {
|
if certName == "" {
|
||||||
certName = "syncthing"
|
certName = "syncthing"
|
||||||
}
|
}
|
||||||
@ -839,13 +839,13 @@ next:
|
|||||||
}
|
}
|
||||||
|
|
||||||
name := fmt.Sprintf("%s-%s", conn.LocalAddr(), conn.RemoteAddr())
|
name := fmt.Sprintf("%s-%s", conn.LocalAddr(), conn.RemoteAddr())
|
||||||
protoConn := protocol.NewConnection(remoteID, rd, wr, m, name, nodeCfg.Compression)
|
protoConn := protocol.NewConnection(remoteID, rd, wr, m, name, deviceCfg.Compression)
|
||||||
|
|
||||||
l.Infof("Established secure connection to %s at %s", remoteID, name)
|
l.Infof("Established secure connection to %s at %s", remoteID, name)
|
||||||
if debugNet {
|
if debugNet {
|
||||||
l.Debugf("cipher suite %04X", conn.ConnectionState().CipherSuite)
|
l.Debugf("cipher suite %04X", conn.ConnectionState().CipherSuite)
|
||||||
}
|
}
|
||||||
events.Default.Log(events.NodeConnected, map[string]string{
|
events.Default.Log(events.DeviceConnected, map[string]string{
|
||||||
"id": remoteID.String(),
|
"id": remoteID.String(),
|
||||||
"addr": conn.RemoteAddr().String(),
|
"addr": conn.RemoteAddr().String(),
|
||||||
})
|
})
|
||||||
@ -855,11 +855,11 @@ next:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
events.Default.Log(events.NodeRejected, map[string]string{
|
events.Default.Log(events.DeviceRejected, map[string]string{
|
||||||
"node": remoteID.String(),
|
"device": remoteID.String(),
|
||||||
"address": conn.RemoteAddr().String(),
|
"address": conn.RemoteAddr().String(),
|
||||||
})
|
})
|
||||||
l.Infof("Connection from %s with unknown node ID %s; ignoring", conn.RemoteAddr(), remoteID)
|
l.Infof("Connection from %s with unknown device ID %s; ignoring", conn.RemoteAddr(), remoteID)
|
||||||
conn.Close()
|
conn.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -908,21 +908,21 @@ func listenTLS(conns chan *tls.Conn, addr string, tlsCfg *tls.Config) {
|
|||||||
func dialTLS(m *model.Model, conns chan *tls.Conn, tlsCfg *tls.Config) {
|
func dialTLS(m *model.Model, conns chan *tls.Conn, tlsCfg *tls.Config) {
|
||||||
var delay time.Duration = 1 * time.Second
|
var delay time.Duration = 1 * time.Second
|
||||||
for {
|
for {
|
||||||
nextNode:
|
nextDevice:
|
||||||
for _, nodeCfg := range cfg.Nodes {
|
for _, deviceCfg := range cfg.Devices {
|
||||||
if nodeCfg.NodeID == myID {
|
if deviceCfg.DeviceID == myID {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.ConnectedTo(nodeCfg.NodeID) {
|
if m.ConnectedTo(deviceCfg.DeviceID) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var addrs []string
|
var addrs []string
|
||||||
for _, addr := range nodeCfg.Addresses {
|
for _, addr := range deviceCfg.Addresses {
|
||||||
if addr == "dynamic" {
|
if addr == "dynamic" {
|
||||||
if discoverer != nil {
|
if discoverer != nil {
|
||||||
t := discoverer.Lookup(nodeCfg.NodeID)
|
t := discoverer.Lookup(deviceCfg.DeviceID)
|
||||||
if len(t) == 0 {
|
if len(t) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -943,7 +943,7 @@ func dialTLS(m *model.Model, conns chan *tls.Conn, tlsCfg *tls.Config) {
|
|||||||
addr = net.JoinHostPort(host, "22000")
|
addr = net.JoinHostPort(host, "22000")
|
||||||
}
|
}
|
||||||
if debugNet {
|
if debugNet {
|
||||||
l.Debugln("dial", nodeCfg.NodeID, addr)
|
l.Debugln("dial", deviceCfg.DeviceID, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
raddr, err := net.ResolveTCPAddr("tcp", addr)
|
raddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||||
@ -973,7 +973,7 @@ func dialTLS(m *model.Model, conns chan *tls.Conn, tlsCfg *tls.Config) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
conns <- tc
|
conns <- tc
|
||||||
continue nextNode
|
continue nextDevice
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,13 +31,13 @@ func reportData(m *model.Model) map[string]interface{} {
|
|||||||
res["version"] = Version
|
res["version"] = Version
|
||||||
res["longVersion"] = LongVersion
|
res["longVersion"] = LongVersion
|
||||||
res["platform"] = runtime.GOOS + "-" + runtime.GOARCH
|
res["platform"] = runtime.GOOS + "-" + runtime.GOARCH
|
||||||
res["numRepos"] = len(cfg.Repositories)
|
res["numFolders"] = len(cfg.Folders)
|
||||||
res["numNodes"] = len(cfg.Nodes)
|
res["numDevices"] = len(cfg.Devices)
|
||||||
|
|
||||||
var totFiles, maxFiles int
|
var totFiles, maxFiles int
|
||||||
var totBytes, maxBytes int64
|
var totBytes, maxBytes int64
|
||||||
for _, repo := range cfg.Repositories {
|
for _, folder := range cfg.Folders {
|
||||||
files, _, bytes := m.GlobalSize(repo.ID)
|
files, _, bytes := m.GlobalSize(folder.ID)
|
||||||
totFiles += files
|
totFiles += files
|
||||||
totBytes += bytes
|
totBytes += bytes
|
||||||
if files > maxFiles {
|
if files > maxFiles {
|
||||||
@ -49,9 +49,9 @@ func reportData(m *model.Model) map[string]interface{} {
|
|||||||
}
|
}
|
||||||
|
|
||||||
res["totFiles"] = totFiles
|
res["totFiles"] = totFiles
|
||||||
res["repoMaxFiles"] = maxFiles
|
res["folderMaxFiles"] = maxFiles
|
||||||
res["totMiB"] = totBytes / 1024 / 1024
|
res["totMiB"] = totBytes / 1024 / 1024
|
||||||
res["repoMaxMiB"] = maxBytes / 1024 / 1024
|
res["folderMaxMiB"] = maxBytes / 1024 / 1024
|
||||||
|
|
||||||
var mem runtime.MemStats
|
var mem runtime.MemStats
|
||||||
runtime.ReadMemStats(&mem)
|
runtime.ReadMemStats(&mem)
|
||||||
|
458
gui/app.js
458
gui/app.js
@ -83,11 +83,11 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
$scope.errors = [];
|
$scope.errors = [];
|
||||||
$scope.model = {};
|
$scope.model = {};
|
||||||
$scope.myID = '';
|
$scope.myID = '';
|
||||||
$scope.nodes = [];
|
$scope.devices = [];
|
||||||
$scope.protocolChanged = false;
|
$scope.protocolChanged = false;
|
||||||
$scope.reportData = {};
|
$scope.reportData = {};
|
||||||
$scope.reportPreview = false;
|
$scope.reportPreview = false;
|
||||||
$scope.repos = {};
|
$scope.folders = {};
|
||||||
$scope.seenError = '';
|
$scope.seenError = '';
|
||||||
$scope.upgradeInfo = {};
|
$scope.upgradeInfo = {};
|
||||||
$scope.stats = {};
|
$scope.stats = {};
|
||||||
@ -180,33 +180,33 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
|
|
||||||
$scope.$on('StateChanged', function (event, arg) {
|
$scope.$on('StateChanged', function (event, arg) {
|
||||||
var data = arg.data;
|
var data = arg.data;
|
||||||
if ($scope.model[data.repo]) {
|
if ($scope.model[data.folder]) {
|
||||||
$scope.model[data.repo].state = data.to;
|
$scope.model[data.folder].state = data.to;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
$scope.$on('LocalIndexUpdated', function (event, arg) {
|
$scope.$on('LocalIndexUpdated', function (event, arg) {
|
||||||
var data = arg.data;
|
var data = arg.data;
|
||||||
refreshRepo(data.repo);
|
refreshFolder(data.folder);
|
||||||
|
|
||||||
// Update completion status for all nodes that we share this repo with.
|
// Update completion status for all devices that we share this folder with.
|
||||||
$scope.repos[data.repo].Nodes.forEach(function (nodeCfg) {
|
$scope.folders[data.folder].Devices.forEach(function (deviceCfg) {
|
||||||
refreshCompletion(nodeCfg.NodeID, data.repo);
|
refreshCompletion(deviceCfg.DeviceID, data.folder);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
$scope.$on('RemoteIndexUpdated', function (event, arg) {
|
$scope.$on('RemoteIndexUpdated', function (event, arg) {
|
||||||
var data = arg.data;
|
var data = arg.data;
|
||||||
refreshRepo(data.repo);
|
refreshFolder(data.folder);
|
||||||
refreshCompletion(data.node, data.repo);
|
refreshCompletion(data.device, data.folder);
|
||||||
});
|
});
|
||||||
|
|
||||||
$scope.$on('NodeDisconnected', function (event, arg) {
|
$scope.$on('DeviceDisconnected', function (event, arg) {
|
||||||
delete $scope.connections[arg.data.id];
|
delete $scope.connections[arg.data.id];
|
||||||
refreshNodeStats();
|
refreshDeviceStats();
|
||||||
});
|
});
|
||||||
|
|
||||||
$scope.$on('NodeConnected', function (event, arg) {
|
$scope.$on('DeviceConnected', function (event, arg) {
|
||||||
if (!$scope.connections[arg.data.id]) {
|
if (!$scope.connections[arg.data.id]) {
|
||||||
$scope.connections[arg.data.id] = {
|
$scope.connections[arg.data.id] = {
|
||||||
inbps: 0,
|
inbps: 0,
|
||||||
@ -251,13 +251,13 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
|
|
||||||
var debouncedFuncs = {};
|
var debouncedFuncs = {};
|
||||||
|
|
||||||
function refreshRepo(repo) {
|
function refreshFolder(folder) {
|
||||||
var key = "refreshRepo" + repo;
|
var key = "refreshFolder" + folder;
|
||||||
if (!debouncedFuncs[key]) {
|
if (!debouncedFuncs[key]) {
|
||||||
debouncedFuncs[key] = debounce(function () {
|
debouncedFuncs[key] = debounce(function () {
|
||||||
$http.get(urlbase + '/model?repo=' + encodeURIComponent(repo)).success(function (data) {
|
$http.get(urlbase + '/model?folder=' + encodeURIComponent(folder)).success(function (data) {
|
||||||
$scope.model[repo] = data;
|
$scope.model[folder] = data;
|
||||||
console.log("refreshRepo", repo, data);
|
console.log("refreshFolder", folder, data);
|
||||||
});
|
});
|
||||||
}, 1000, true);
|
}, 1000, true);
|
||||||
}
|
}
|
||||||
@ -270,19 +270,19 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
$scope.config = config;
|
$scope.config = config;
|
||||||
$scope.config.Options.ListenStr = $scope.config.Options.ListenAddress.join(', ');
|
$scope.config.Options.ListenStr = $scope.config.Options.ListenAddress.join(', ');
|
||||||
|
|
||||||
$scope.nodes = $scope.config.Nodes;
|
$scope.devices = $scope.config.Devices;
|
||||||
$scope.nodes.forEach(function (nodeCfg) {
|
$scope.devices.forEach(function (deviceCfg) {
|
||||||
$scope.completion[nodeCfg.NodeID] = {
|
$scope.completion[deviceCfg.DeviceID] = {
|
||||||
_total: 100,
|
_total: 100,
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
$scope.nodes.sort(nodeCompare);
|
$scope.devices.sort(deviceCompare);
|
||||||
|
|
||||||
$scope.repos = repoMap($scope.config.Repositories);
|
$scope.folders = folderMap($scope.config.Folders);
|
||||||
Object.keys($scope.repos).forEach(function (repo) {
|
Object.keys($scope.folders).forEach(function (folder) {
|
||||||
refreshRepo(repo);
|
refreshFolder(folder);
|
||||||
$scope.repos[repo].Nodes.forEach(function (nodeCfg) {
|
$scope.folders[folder].Devices.forEach(function (deviceCfg) {
|
||||||
refreshCompletion(nodeCfg.NodeID, repo);
|
refreshCompletion(deviceCfg.DeviceID, folder);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -299,32 +299,32 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
function refreshCompletion(node, repo) {
|
function refreshCompletion(device, folder) {
|
||||||
if (node === $scope.myID) {
|
if (device === $scope.myID) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
var key = "refreshCompletion" + node + repo;
|
var key = "refreshCompletion" + device + folder;
|
||||||
if (!debouncedFuncs[key]) {
|
if (!debouncedFuncs[key]) {
|
||||||
debouncedFuncs[key] = debounce(function () {
|
debouncedFuncs[key] = debounce(function () {
|
||||||
$http.get(urlbase + '/completion?node=' + node + '&repo=' + encodeURIComponent(repo)).success(function (data) {
|
$http.get(urlbase + '/completion?device=' + device + '&folder=' + encodeURIComponent(folder)).success(function (data) {
|
||||||
if (!$scope.completion[node]) {
|
if (!$scope.completion[device]) {
|
||||||
$scope.completion[node] = {};
|
$scope.completion[device] = {};
|
||||||
}
|
}
|
||||||
$scope.completion[node][repo] = data.completion;
|
$scope.completion[device][folder] = data.completion;
|
||||||
|
|
||||||
var tot = 0,
|
var tot = 0,
|
||||||
cnt = 0;
|
cnt = 0;
|
||||||
for (var cmp in $scope.completion[node]) {
|
for (var cmp in $scope.completion[device]) {
|
||||||
if (cmp === "_total") {
|
if (cmp === "_total") {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
tot += $scope.completion[node][cmp];
|
tot += $scope.completion[device][cmp];
|
||||||
cnt += 1;
|
cnt += 1;
|
||||||
}
|
}
|
||||||
$scope.completion[node]._total = tot / cnt;
|
$scope.completion[device]._total = tot / cnt;
|
||||||
|
|
||||||
console.log("refreshCompletion", node, repo, $scope.completion[node]);
|
console.log("refreshCompletion", device, folder, $scope.completion[device]);
|
||||||
});
|
});
|
||||||
}, 1000, true);
|
}, 1000, true);
|
||||||
}
|
}
|
||||||
@ -373,14 +373,14 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
var refreshNodeStats = debounce(function () {
|
var refreshDeviceStats = debounce(function () {
|
||||||
$http.get(urlbase + "/stats/node").success(function (data) {
|
$http.get(urlbase + "/stats/device").success(function (data) {
|
||||||
$scope.stats = data;
|
$scope.stats = data;
|
||||||
for (var node in $scope.stats) {
|
for (var device in $scope.stats) {
|
||||||
$scope.stats[node].LastSeen = new Date($scope.stats[node].LastSeen);
|
$scope.stats[device].LastSeen = new Date($scope.stats[device].LastSeen);
|
||||||
$scope.stats[node].LastSeenDays = (new Date() - $scope.stats[node].LastSeen) / 1000 / 86400;
|
$scope.stats[device].LastSeenDays = (new Date() - $scope.stats[device].LastSeen) / 1000 / 86400;
|
||||||
}
|
}
|
||||||
console.log("refreshNodeStats", data);
|
console.log("refreshDeviceStats", data);
|
||||||
});
|
});
|
||||||
}, 500);
|
}, 500);
|
||||||
|
|
||||||
@ -388,7 +388,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
refreshSystem();
|
refreshSystem();
|
||||||
refreshConfig();
|
refreshConfig();
|
||||||
refreshConnectionStats();
|
refreshConnectionStats();
|
||||||
refreshNodeStats();
|
refreshDeviceStats();
|
||||||
|
|
||||||
$http.get(urlbase + '/version').success(function (data) {
|
$http.get(urlbase + '/version').success(function (data) {
|
||||||
$scope.version = data.version;
|
$scope.version = data.version;
|
||||||
@ -411,28 +411,28 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
refreshErrors();
|
refreshErrors();
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.repoStatus = function (repo) {
|
$scope.folderStatus = function (folder) {
|
||||||
if (typeof $scope.model[repo] === 'undefined') {
|
if (typeof $scope.model[folder] === 'undefined') {
|
||||||
return 'unknown';
|
return 'unknown';
|
||||||
}
|
}
|
||||||
|
|
||||||
if ($scope.model[repo].invalid !== '') {
|
if ($scope.model[folder].invalid !== '') {
|
||||||
return 'stopped';
|
return 'stopped';
|
||||||
}
|
}
|
||||||
|
|
||||||
return '' + $scope.model[repo].state;
|
return '' + $scope.model[folder].state;
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.repoClass = function (repo) {
|
$scope.folderClass = function (folder) {
|
||||||
if (typeof $scope.model[repo] === 'undefined') {
|
if (typeof $scope.model[folder] === 'undefined') {
|
||||||
return 'info';
|
return 'info';
|
||||||
}
|
}
|
||||||
|
|
||||||
if ($scope.model[repo].invalid !== '') {
|
if ($scope.model[folder].invalid !== '') {
|
||||||
return 'danger';
|
return 'danger';
|
||||||
}
|
}
|
||||||
|
|
||||||
var state = '' + $scope.model[repo].state;
|
var state = '' + $scope.model[folder].state;
|
||||||
if (state == 'idle') {
|
if (state == 'idle') {
|
||||||
return 'success';
|
return 'success';
|
||||||
}
|
}
|
||||||
@ -445,21 +445,21 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
return 'info';
|
return 'info';
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.syncPercentage = function (repo) {
|
$scope.syncPercentage = function (folder) {
|
||||||
if (typeof $scope.model[repo] === 'undefined') {
|
if (typeof $scope.model[folder] === 'undefined') {
|
||||||
return 100;
|
return 100;
|
||||||
}
|
}
|
||||||
if ($scope.model[repo].globalBytes === 0) {
|
if ($scope.model[folder].globalBytes === 0) {
|
||||||
return 100;
|
return 100;
|
||||||
}
|
}
|
||||||
|
|
||||||
var pct = 100 * $scope.model[repo].inSyncBytes / $scope.model[repo].globalBytes;
|
var pct = 100 * $scope.model[folder].inSyncBytes / $scope.model[folder].globalBytes;
|
||||||
return Math.floor(pct);
|
return Math.floor(pct);
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.nodeIcon = function (nodeCfg) {
|
$scope.deviceIcon = function (deviceCfg) {
|
||||||
if ($scope.connections[nodeCfg.NodeID]) {
|
if ($scope.connections[deviceCfg.DeviceID]) {
|
||||||
if ($scope.completion[nodeCfg.NodeID] && $scope.completion[nodeCfg.NodeID]._total === 100) {
|
if ($scope.completion[deviceCfg.DeviceID] && $scope.completion[deviceCfg.DeviceID]._total === 100) {
|
||||||
return 'ok';
|
return 'ok';
|
||||||
} else {
|
} else {
|
||||||
return 'refresh';
|
return 'refresh';
|
||||||
@ -469,9 +469,9 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
return 'minus';
|
return 'minus';
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.nodeClass = function (nodeCfg) {
|
$scope.deviceClass = function (deviceCfg) {
|
||||||
if ($scope.connections[nodeCfg.NodeID]) {
|
if ($scope.connections[deviceCfg.DeviceID]) {
|
||||||
if ($scope.completion[nodeCfg.NodeID] && $scope.completion[nodeCfg.NodeID]._total === 100) {
|
if ($scope.completion[deviceCfg.DeviceID] && $scope.completion[deviceCfg.DeviceID]._total === 100) {
|
||||||
return 'success';
|
return 'success';
|
||||||
} else {
|
} else {
|
||||||
return 'primary';
|
return 'primary';
|
||||||
@ -481,25 +481,25 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
return 'info';
|
return 'info';
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.nodeAddr = function (nodeCfg) {
|
$scope.deviceAddr = function (deviceCfg) {
|
||||||
var conn = $scope.connections[nodeCfg.NodeID];
|
var conn = $scope.connections[deviceCfg.DeviceID];
|
||||||
if (conn) {
|
if (conn) {
|
||||||
return conn.Address;
|
return conn.Address;
|
||||||
}
|
}
|
||||||
return '?';
|
return '?';
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.nodeCompletion = function (nodeCfg) {
|
$scope.deviceCompletion = function (deviceCfg) {
|
||||||
var conn = $scope.connections[nodeCfg.NodeID];
|
var conn = $scope.connections[deviceCfg.DeviceID];
|
||||||
if (conn) {
|
if (conn) {
|
||||||
return conn.Completion + '%';
|
return conn.Completion + '%';
|
||||||
}
|
}
|
||||||
return '';
|
return '';
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.findNode = function (nodeID) {
|
$scope.findDevice = function (deviceID) {
|
||||||
var matches = $scope.nodes.filter(function (n) {
|
var matches = $scope.devices.filter(function (n) {
|
||||||
return n.NodeID == nodeID;
|
return n.DeviceID == deviceID;
|
||||||
});
|
});
|
||||||
if (matches.length != 1) {
|
if (matches.length != 1) {
|
||||||
return undefined;
|
return undefined;
|
||||||
@ -507,32 +507,32 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
return matches[0];
|
return matches[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.nodeName = function (nodeCfg) {
|
$scope.deviceName = function (deviceCfg) {
|
||||||
if (typeof nodeCfg === 'undefined') {
|
if (typeof deviceCfg === 'undefined') {
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
if (nodeCfg.Name) {
|
if (deviceCfg.Name) {
|
||||||
return nodeCfg.Name;
|
return deviceCfg.Name;
|
||||||
}
|
}
|
||||||
return nodeCfg.NodeID.substr(0, 6);
|
return deviceCfg.DeviceID.substr(0, 6);
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.thisNodeName = function () {
|
$scope.thisDeviceName = function () {
|
||||||
var node = $scope.thisNode();
|
var device = $scope.thisDevice();
|
||||||
if (typeof node === 'undefined') {
|
if (typeof device === 'undefined') {
|
||||||
return "(unknown node)";
|
return "(unknown device)";
|
||||||
}
|
}
|
||||||
if (node.Name) {
|
if (device.Name) {
|
||||||
return node.Name;
|
return device.Name;
|
||||||
}
|
}
|
||||||
return node.NodeID.substr(0, 6);
|
return device.DeviceID.substr(0, 6);
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.editSettings = function () {
|
$scope.editSettings = function () {
|
||||||
// Make a working copy
|
// Make a working copy
|
||||||
$scope.tmpOptions = angular.copy($scope.config.Options);
|
$scope.tmpOptions = angular.copy($scope.config.Options);
|
||||||
$scope.tmpOptions.UREnabled = ($scope.tmpOptions.URAccepted > 0);
|
$scope.tmpOptions.UREnabled = ($scope.tmpOptions.URAccepted > 0);
|
||||||
$scope.tmpOptions.NodeName = $scope.thisNode().Name;
|
$scope.tmpOptions.DeviceName = $scope.thisDevice().Name;
|
||||||
$scope.tmpGUI = angular.copy($scope.config.GUI);
|
$scope.tmpGUI = angular.copy($scope.config.GUI);
|
||||||
$('#settings').modal();
|
$('#settings').modal();
|
||||||
};
|
};
|
||||||
@ -569,7 +569,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Apply new settings locally
|
// Apply new settings locally
|
||||||
$scope.thisNode().Name = $scope.tmpOptions.NodeName;
|
$scope.thisDevice().Name = $scope.tmpOptions.DeviceName;
|
||||||
$scope.config.Options = angular.copy($scope.tmpOptions);
|
$scope.config.Options = angular.copy($scope.tmpOptions);
|
||||||
$scope.config.GUI = angular.copy($scope.tmpGUI);
|
$scope.config.GUI = angular.copy($scope.tmpGUI);
|
||||||
$scope.config.Options.ListenAddress = $scope.config.Options.ListenStr.split(',').map(function (x) {
|
$scope.config.Options.ListenAddress = $scope.config.Options.ListenStr.split(',').map(function (x) {
|
||||||
@ -623,100 +623,100 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
$scope.configInSync = true;
|
$scope.configInSync = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.editNode = function (nodeCfg) {
|
$scope.editDevice = function (deviceCfg) {
|
||||||
$scope.currentNode = $.extend({}, nodeCfg);
|
$scope.currentDevice = $.extend({}, deviceCfg);
|
||||||
$scope.editingExisting = true;
|
$scope.editingExisting = true;
|
||||||
$scope.editingSelf = (nodeCfg.NodeID == $scope.myID);
|
$scope.editingSelf = (deviceCfg.DeviceID == $scope.myID);
|
||||||
$scope.currentNode.AddressesStr = nodeCfg.Addresses.join(', ');
|
$scope.currentDevice.AddressesStr = deviceCfg.Addresses.join(', ');
|
||||||
$scope.nodeEditor.$setPristine();
|
$scope.deviceEditor.$setPristine();
|
||||||
$('#editNode').modal();
|
$('#editDevice').modal();
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.idNode = function () {
|
$scope.idDevice = function () {
|
||||||
$('#idqr').modal('show');
|
$('#idqr').modal('show');
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.addNode = function () {
|
$scope.addDevice = function () {
|
||||||
$scope.currentNode = {
|
$scope.currentDevice = {
|
||||||
AddressesStr: 'dynamic',
|
AddressesStr: 'dynamic',
|
||||||
Compression: true,
|
Compression: true,
|
||||||
Introducer: true
|
Introducer: true
|
||||||
};
|
};
|
||||||
$scope.editingExisting = false;
|
$scope.editingExisting = false;
|
||||||
$scope.editingSelf = false;
|
$scope.editingSelf = false;
|
||||||
$scope.nodeEditor.$setPristine();
|
$scope.deviceEditor.$setPristine();
|
||||||
$('#editNode').modal();
|
$('#editDevice').modal();
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.deleteNode = function () {
|
$scope.deleteDevice = function () {
|
||||||
$('#editNode').modal('hide');
|
$('#editDevice').modal('hide');
|
||||||
if (!$scope.editingExisting) {
|
if (!$scope.editingExisting) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
$scope.nodes = $scope.nodes.filter(function (n) {
|
$scope.devices = $scope.devices.filter(function (n) {
|
||||||
return n.NodeID !== $scope.currentNode.NodeID;
|
return n.DeviceID !== $scope.currentDevice.DeviceID;
|
||||||
});
|
});
|
||||||
$scope.config.Nodes = $scope.nodes;
|
$scope.config.Devices = $scope.devices;
|
||||||
|
|
||||||
for (var id in $scope.repos) {
|
for (var id in $scope.folders) {
|
||||||
$scope.repos[id].Nodes = $scope.repos[id].Nodes.filter(function (n) {
|
$scope.folders[id].Devices = $scope.folders[id].Devices.filter(function (n) {
|
||||||
return n.NodeID !== $scope.currentNode.NodeID;
|
return n.DeviceID !== $scope.currentDevice.DeviceID;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
$scope.saveConfig();
|
$scope.saveConfig();
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.saveNode = function () {
|
$scope.saveDevice = function () {
|
||||||
var nodeCfg, done, i;
|
var deviceCfg, done, i;
|
||||||
|
|
||||||
$('#editNode').modal('hide');
|
$('#editDevice').modal('hide');
|
||||||
nodeCfg = $scope.currentNode;
|
deviceCfg = $scope.currentDevice;
|
||||||
nodeCfg.Addresses = nodeCfg.AddressesStr.split(',').map(function (x) {
|
deviceCfg.Addresses = deviceCfg.AddressesStr.split(',').map(function (x) {
|
||||||
return x.trim();
|
return x.trim();
|
||||||
});
|
});
|
||||||
|
|
||||||
done = false;
|
done = false;
|
||||||
for (i = 0; i < $scope.nodes.length; i++) {
|
for (i = 0; i < $scope.devices.length; i++) {
|
||||||
if ($scope.nodes[i].NodeID === nodeCfg.NodeID) {
|
if ($scope.devices[i].DeviceID === deviceCfg.DeviceID) {
|
||||||
$scope.nodes[i] = nodeCfg;
|
$scope.devices[i] = deviceCfg;
|
||||||
done = true;
|
done = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!done) {
|
if (!done) {
|
||||||
$scope.nodes.push(nodeCfg);
|
$scope.devices.push(deviceCfg);
|
||||||
}
|
}
|
||||||
|
|
||||||
$scope.nodes.sort(nodeCompare);
|
$scope.devices.sort(deviceCompare);
|
||||||
$scope.config.Nodes = $scope.nodes;
|
$scope.config.Devices = $scope.devices;
|
||||||
|
|
||||||
$scope.saveConfig();
|
$scope.saveConfig();
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.otherNodes = function () {
|
$scope.otherDevices = function () {
|
||||||
return $scope.nodes.filter(function (n) {
|
return $scope.devices.filter(function (n) {
|
||||||
return n.NodeID !== $scope.myID;
|
return n.DeviceID !== $scope.myID;
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.thisNode = function () {
|
$scope.thisDevice = function () {
|
||||||
var i, n;
|
var i, n;
|
||||||
|
|
||||||
for (i = 0; i < $scope.nodes.length; i++) {
|
for (i = 0; i < $scope.devices.length; i++) {
|
||||||
n = $scope.nodes[i];
|
n = $scope.devices[i];
|
||||||
if (n.NodeID === $scope.myID) {
|
if (n.DeviceID === $scope.myID) {
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.allNodes = function () {
|
$scope.allDevices = function () {
|
||||||
var nodes = $scope.otherNodes();
|
var devices = $scope.otherDevices();
|
||||||
nodes.push($scope.thisNode());
|
devices.push($scope.thisDevice());
|
||||||
return nodes;
|
return devices;
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.errorList = function () {
|
$scope.errorList = function () {
|
||||||
@ -730,134 +730,134 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
$http.post(urlbase + '/error/clear');
|
$http.post(urlbase + '/error/clear');
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.friendlyNodes = function (str) {
|
$scope.friendlyDevices = function (str) {
|
||||||
for (var i = 0; i < $scope.nodes.length; i++) {
|
for (var i = 0; i < $scope.devices.length; i++) {
|
||||||
var cfg = $scope.nodes[i];
|
var cfg = $scope.devices[i];
|
||||||
str = str.replace(cfg.NodeID, $scope.nodeName(cfg));
|
str = str.replace(cfg.DeviceID, $scope.deviceName(cfg));
|
||||||
}
|
}
|
||||||
return str;
|
return str;
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.repoList = function () {
|
$scope.folderList = function () {
|
||||||
return repoList($scope.repos);
|
return folderList($scope.folders);
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.editRepo = function (nodeCfg) {
|
$scope.editFolder = function (deviceCfg) {
|
||||||
$scope.currentRepo = angular.copy(nodeCfg);
|
$scope.currentFolder = angular.copy(deviceCfg);
|
||||||
$scope.currentRepo.selectedNodes = {};
|
$scope.currentFolder.selectedDevices = {};
|
||||||
$scope.currentRepo.Nodes.forEach(function (n) {
|
$scope.currentFolder.Devices.forEach(function (n) {
|
||||||
$scope.currentRepo.selectedNodes[n.NodeID] = true;
|
$scope.currentFolder.selectedDevices[n.DeviceID] = true;
|
||||||
});
|
});
|
||||||
if ($scope.currentRepo.Versioning && $scope.currentRepo.Versioning.Type === "simple") {
|
if ($scope.currentFolder.Versioning && $scope.currentFolder.Versioning.Type === "simple") {
|
||||||
$scope.currentRepo.simpleFileVersioning = true;
|
$scope.currentFolder.simpleFileVersioning = true;
|
||||||
$scope.currentRepo.FileVersioningSelector = "simple";
|
$scope.currentFolder.FileVersioningSelector = "simple";
|
||||||
$scope.currentRepo.simpleKeep = +$scope.currentRepo.Versioning.Params.keep;
|
$scope.currentFolder.simpleKeep = +$scope.currentFolder.Versioning.Params.keep;
|
||||||
} else if ($scope.currentRepo.Versioning && $scope.currentRepo.Versioning.Type === "staggered") {
|
} else if ($scope.currentFolder.Versioning && $scope.currentFolder.Versioning.Type === "staggered") {
|
||||||
$scope.currentRepo.staggeredFileVersioning = true;
|
$scope.currentFolder.staggeredFileVersioning = true;
|
||||||
$scope.currentRepo.FileVersioningSelector = "staggered";
|
$scope.currentFolder.FileVersioningSelector = "staggered";
|
||||||
$scope.currentRepo.staggeredMaxAge = Math.floor(+$scope.currentRepo.Versioning.Params.maxAge / 86400);
|
$scope.currentFolder.staggeredMaxAge = Math.floor(+$scope.currentFolder.Versioning.Params.maxAge / 86400);
|
||||||
$scope.currentRepo.staggeredCleanInterval = +$scope.currentRepo.Versioning.Params.cleanInterval;
|
$scope.currentFolder.staggeredCleanInterval = +$scope.currentFolder.Versioning.Params.cleanInterval;
|
||||||
$scope.currentRepo.staggeredVersionsPath = $scope.currentRepo.Versioning.Params.versionsPath;
|
$scope.currentFolder.staggeredVersionsPath = $scope.currentFolder.Versioning.Params.versionsPath;
|
||||||
} else {
|
} else {
|
||||||
$scope.currentRepo.FileVersioningSelector = "none";
|
$scope.currentFolder.FileVersioningSelector = "none";
|
||||||
}
|
}
|
||||||
$scope.currentRepo.simpleKeep = $scope.currentRepo.simpleKeep || 5;
|
$scope.currentFolder.simpleKeep = $scope.currentFolder.simpleKeep || 5;
|
||||||
$scope.currentRepo.staggeredCleanInterval = $scope.currentRepo.staggeredCleanInterval || 3600;
|
$scope.currentFolder.staggeredCleanInterval = $scope.currentFolder.staggeredCleanInterval || 3600;
|
||||||
$scope.currentRepo.staggeredVersionsPath = $scope.currentRepo.staggeredVersionsPath || "";
|
$scope.currentFolder.staggeredVersionsPath = $scope.currentFolder.staggeredVersionsPath || "";
|
||||||
|
|
||||||
// staggeredMaxAge can validly be zero, which we should not replace
|
// staggeredMaxAge can validly be zero, which we should not replace
|
||||||
// with the default value of 365. So only set the default if it's
|
// with the default value of 365. So only set the default if it's
|
||||||
// actually undefined.
|
// actually undefined.
|
||||||
if (typeof $scope.currentRepo.staggeredMaxAge === 'undefined') {
|
if (typeof $scope.currentFolder.staggeredMaxAge === 'undefined') {
|
||||||
$scope.currentRepo.staggeredMaxAge = 365;
|
$scope.currentFolder.staggeredMaxAge = 365;
|
||||||
}
|
}
|
||||||
|
|
||||||
$scope.editingExisting = true;
|
$scope.editingExisting = true;
|
||||||
$scope.repoEditor.$setPristine();
|
$scope.folderEditor.$setPristine();
|
||||||
$('#editRepo').modal();
|
$('#editFolder').modal();
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.addRepo = function () {
|
$scope.addFolder = function () {
|
||||||
$scope.currentRepo = {
|
$scope.currentFolder = {
|
||||||
selectedNodes: {}
|
selectedDevices: {}
|
||||||
};
|
};
|
||||||
$scope.currentRepo.RescanIntervalS = 60;
|
$scope.currentFolder.RescanIntervalS = 60;
|
||||||
$scope.currentRepo.FileVersioningSelector = "none";
|
$scope.currentFolder.FileVersioningSelector = "none";
|
||||||
$scope.currentRepo.simpleKeep = 5;
|
$scope.currentFolder.simpleKeep = 5;
|
||||||
$scope.currentRepo.staggeredMaxAge = 365;
|
$scope.currentFolder.staggeredMaxAge = 365;
|
||||||
$scope.currentRepo.staggeredCleanInterval = 3600;
|
$scope.currentFolder.staggeredCleanInterval = 3600;
|
||||||
$scope.currentRepo.staggeredVersionsPath = "";
|
$scope.currentFolder.staggeredVersionsPath = "";
|
||||||
$scope.editingExisting = false;
|
$scope.editingExisting = false;
|
||||||
$scope.repoEditor.$setPristine();
|
$scope.folderEditor.$setPristine();
|
||||||
$('#editRepo').modal();
|
$('#editFolder').modal();
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.saveRepo = function () {
|
$scope.saveFolder = function () {
|
||||||
var repoCfg, done, i;
|
var folderCfg, done, i;
|
||||||
|
|
||||||
$('#editRepo').modal('hide');
|
$('#editFolder').modal('hide');
|
||||||
repoCfg = $scope.currentRepo;
|
folderCfg = $scope.currentFolder;
|
||||||
repoCfg.Nodes = [];
|
folderCfg.Devices = [];
|
||||||
repoCfg.selectedNodes[$scope.myID] = true;
|
folderCfg.selectedDevices[$scope.myID] = true;
|
||||||
for (var nodeID in repoCfg.selectedNodes) {
|
for (var deviceID in folderCfg.selectedDevices) {
|
||||||
if (repoCfg.selectedNodes[nodeID] === true) {
|
if (folderCfg.selectedDevices[deviceID] === true) {
|
||||||
repoCfg.Nodes.push({
|
folderCfg.Devices.push({
|
||||||
NodeID: nodeID
|
DeviceID: deviceID
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete repoCfg.selectedNodes;
|
delete folderCfg.selectedDevices;
|
||||||
|
|
||||||
if (repoCfg.FileVersioningSelector === "simple") {
|
if (folderCfg.FileVersioningSelector === "simple") {
|
||||||
repoCfg.Versioning = {
|
folderCfg.Versioning = {
|
||||||
'Type': 'simple',
|
'Type': 'simple',
|
||||||
'Params': {
|
'Params': {
|
||||||
'keep': '' + repoCfg.simpleKeep,
|
'keep': '' + folderCfg.simpleKeep,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
delete repoCfg.simpleFileVersioning;
|
delete folderCfg.simpleFileVersioning;
|
||||||
delete repoCfg.simpleKeep;
|
delete folderCfg.simpleKeep;
|
||||||
} else if (repoCfg.FileVersioningSelector === "staggered") {
|
} else if (folderCfg.FileVersioningSelector === "staggered") {
|
||||||
repoCfg.Versioning = {
|
folderCfg.Versioning = {
|
||||||
'Type': 'staggered',
|
'Type': 'staggered',
|
||||||
'Params': {
|
'Params': {
|
||||||
'maxAge': '' + (repoCfg.staggeredMaxAge * 86400),
|
'maxAge': '' + (folderCfg.staggeredMaxAge * 86400),
|
||||||
'cleanInterval': '' + repoCfg.staggeredCleanInterval,
|
'cleanInterval': '' + folderCfg.staggeredCleanInterval,
|
||||||
'versionsPath': '' + repoCfg.staggeredVersionsPath,
|
'versionsPath': '' + folderCfg.staggeredVersionsPath,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
delete repoCfg.staggeredFileVersioning;
|
delete folderCfg.staggeredFileVersioning;
|
||||||
delete repoCfg.staggeredMaxAge;
|
delete folderCfg.staggeredMaxAge;
|
||||||
delete repoCfg.staggeredCleanInterval;
|
delete folderCfg.staggeredCleanInterval;
|
||||||
delete repoCfg.staggeredVersionsPath;
|
delete folderCfg.staggeredVersionsPath;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
delete repoCfg.Versioning;
|
delete folderCfg.Versioning;
|
||||||
}
|
}
|
||||||
|
|
||||||
$scope.repos[repoCfg.ID] = repoCfg;
|
$scope.folders[folderCfg.ID] = folderCfg;
|
||||||
$scope.config.Repositories = repoList($scope.repos);
|
$scope.config.Folders = folderList($scope.folders);
|
||||||
|
|
||||||
$scope.saveConfig();
|
$scope.saveConfig();
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.sharesRepo = function (repoCfg) {
|
$scope.sharesFolder = function (folderCfg) {
|
||||||
var names = [];
|
var names = [];
|
||||||
repoCfg.Nodes.forEach(function (node) {
|
folderCfg.Devices.forEach(function (device) {
|
||||||
names.push($scope.nodeName($scope.findNode(node.NodeID)));
|
names.push($scope.deviceName($scope.findDevice(device.DeviceID)));
|
||||||
});
|
});
|
||||||
names.sort();
|
names.sort();
|
||||||
return names.join(", ");
|
return names.join(", ");
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.deleteRepo = function () {
|
$scope.deleteFolder = function () {
|
||||||
$('#editRepo').modal('hide');
|
$('#editFolder').modal('hide');
|
||||||
if (!$scope.editingExisting) {
|
if (!$scope.editingExisting) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
delete $scope.repos[$scope.currentRepo.ID];
|
delete $scope.folders[$scope.currentFolder.ID];
|
||||||
$scope.config.Repositories = repoList($scope.repos);
|
$scope.config.Folders = folderList($scope.folders);
|
||||||
|
|
||||||
$scope.saveConfig();
|
$scope.saveConfig();
|
||||||
};
|
};
|
||||||
@ -868,18 +868,18 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
}
|
}
|
||||||
|
|
||||||
$('#editIgnoresButton').attr('disabled', 'disabled');
|
$('#editIgnoresButton').attr('disabled', 'disabled');
|
||||||
$http.get(urlbase + '/ignores?repo=' + encodeURIComponent($scope.currentRepo.ID))
|
$http.get(urlbase + '/ignores?folder=' + encodeURIComponent($scope.currentFolder.ID))
|
||||||
.success(function (data) {
|
.success(function (data) {
|
||||||
data.ignore = data.ignore || [];
|
data.ignore = data.ignore || [];
|
||||||
|
|
||||||
$('#editRepo').modal('hide');
|
$('#editFolder').modal('hide');
|
||||||
var textArea = $('#editIgnores textarea');
|
var textArea = $('#editIgnores textarea');
|
||||||
|
|
||||||
textArea.val(data.ignore.join('\n'));
|
textArea.val(data.ignore.join('\n'));
|
||||||
|
|
||||||
$('#editIgnores').modal()
|
$('#editIgnores').modal()
|
||||||
.on('hidden.bs.modal', function () {
|
.on('hidden.bs.modal', function () {
|
||||||
$('#editRepo').modal();
|
$('#editFolder').modal();
|
||||||
})
|
})
|
||||||
.on('shown.bs.modal', function () {
|
.on('shown.bs.modal', function () {
|
||||||
textArea.focus();
|
textArea.focus();
|
||||||
@ -895,7 +895,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
$http.post(urlbase + '/ignores?repo=' + encodeURIComponent($scope.currentRepo.ID), {
|
$http.post(urlbase + '/ignores?folder=' + encodeURIComponent($scope.currentFolder.ID), {
|
||||||
ignore: $('#editIgnores textarea').val().split('\n')
|
ignore: $('#editIgnores textarea').val().split('\n')
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
@ -923,10 +923,10 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
$('#ur').modal('hide');
|
$('#ur').modal('hide');
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.showNeed = function (repo) {
|
$scope.showNeed = function (folder) {
|
||||||
$scope.neededLoaded = false;
|
$scope.neededLoaded = false;
|
||||||
$('#needed').modal();
|
$('#needed').modal();
|
||||||
$http.get(urlbase + "/need?repo=" + encodeURIComponent(repo)).success(function (data) {
|
$http.get(urlbase + "/need?folder=" + encodeURIComponent(folder)).success(function (data) {
|
||||||
$scope.needed = data;
|
$scope.needed = data;
|
||||||
$scope.neededLoaded = true;
|
$scope.neededLoaded = true;
|
||||||
});
|
});
|
||||||
@ -947,8 +947,8 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.override = function (repo) {
|
$scope.override = function (folder) {
|
||||||
$http.post(urlbase + "/model/override?repo=" + encodeURIComponent(repo));
|
$http.post(urlbase + "/model/override?folder=" + encodeURIComponent(folder));
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.about = function () {
|
$scope.about = function () {
|
||||||
@ -959,34 +959,34 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
|||||||
$scope.reportPreview = true;
|
$scope.reportPreview = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.rescanRepo = function (repo) {
|
$scope.rescanFolder = function (folder) {
|
||||||
$http.post(urlbase + "/scan?repo=" + encodeURIComponent(repo));
|
$http.post(urlbase + "/scan?folder=" + encodeURIComponent(folder));
|
||||||
};
|
};
|
||||||
|
|
||||||
$scope.init();
|
$scope.init();
|
||||||
setInterval($scope.refresh, 10000);
|
setInterval($scope.refresh, 10000);
|
||||||
});
|
});
|
||||||
|
|
||||||
function nodeCompare(a, b) {
|
function deviceCompare(a, b) {
|
||||||
if (typeof a.Name !== 'undefined' && typeof b.Name !== 'undefined') {
|
if (typeof a.Name !== 'undefined' && typeof b.Name !== 'undefined') {
|
||||||
if (a.Name < b.Name)
|
if (a.Name < b.Name)
|
||||||
return -1;
|
return -1;
|
||||||
return a.Name > b.Name;
|
return a.Name > b.Name;
|
||||||
}
|
}
|
||||||
if (a.NodeID < b.NodeID) {
|
if (a.DeviceID < b.DeviceID) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
return a.NodeID > b.NodeID;
|
return a.DeviceID > b.DeviceID;
|
||||||
}
|
}
|
||||||
|
|
||||||
function repoCompare(a, b) {
|
function folderCompare(a, b) {
|
||||||
if (a.ID < b.ID) {
|
if (a.ID < b.ID) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
return a.ID > b.ID;
|
return a.ID > b.ID;
|
||||||
}
|
}
|
||||||
|
|
||||||
function repoMap(l) {
|
function folderMap(l) {
|
||||||
var m = {};
|
var m = {};
|
||||||
l.forEach(function (r) {
|
l.forEach(function (r) {
|
||||||
m[r.ID] = r;
|
m[r.ID] = r;
|
||||||
@ -994,12 +994,12 @@ function repoMap(l) {
|
|||||||
return m;
|
return m;
|
||||||
}
|
}
|
||||||
|
|
||||||
function repoList(m) {
|
function folderList(m) {
|
||||||
var l = [];
|
var l = [];
|
||||||
for (var id in m) {
|
for (var id in m) {
|
||||||
l.push(m[id]);
|
l.push(m[id]);
|
||||||
}
|
}
|
||||||
l.sort(repoCompare);
|
l.sort(folderCompare);
|
||||||
return l;
|
return l;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1137,20 +1137,20 @@ syncthing.filter('basename', function () {
|
|||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
syncthing.directive('uniqueRepo', function () {
|
syncthing.directive('uniqueFolder', function () {
|
||||||
return {
|
return {
|
||||||
require: 'ngModel',
|
require: 'ngModel',
|
||||||
link: function (scope, elm, attrs, ctrl) {
|
link: function (scope, elm, attrs, ctrl) {
|
||||||
ctrl.$parsers.unshift(function (viewValue) {
|
ctrl.$parsers.unshift(function (viewValue) {
|
||||||
if (scope.editingExisting) {
|
if (scope.editingExisting) {
|
||||||
// we shouldn't validate
|
// we shouldn't validate
|
||||||
ctrl.$setValidity('uniqueRepo', true);
|
ctrl.$setValidity('uniqueFolder', true);
|
||||||
} else if (scope.repos[viewValue]) {
|
} else if (scope.folders[viewValue]) {
|
||||||
// the repo exists already
|
// the folder exists already
|
||||||
ctrl.$setValidity('uniqueRepo', false);
|
ctrl.$setValidity('uniqueFolder', false);
|
||||||
} else {
|
} else {
|
||||||
// the repo is unique
|
// the folder is unique
|
||||||
ctrl.$setValidity('uniqueRepo', true);
|
ctrl.$setValidity('uniqueFolder', true);
|
||||||
}
|
}
|
||||||
return viewValue;
|
return viewValue;
|
||||||
});
|
});
|
||||||
@ -1158,20 +1158,20 @@ syncthing.directive('uniqueRepo', function () {
|
|||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
syncthing.directive('validNodeid', function ($http) {
|
syncthing.directive('validDeviceid', function ($http) {
|
||||||
return {
|
return {
|
||||||
require: 'ngModel',
|
require: 'ngModel',
|
||||||
link: function (scope, elm, attrs, ctrl) {
|
link: function (scope, elm, attrs, ctrl) {
|
||||||
ctrl.$parsers.unshift(function (viewValue) {
|
ctrl.$parsers.unshift(function (viewValue) {
|
||||||
if (scope.editingExisting) {
|
if (scope.editingExisting) {
|
||||||
// we shouldn't validate
|
// we shouldn't validate
|
||||||
ctrl.$setValidity('validNodeid', true);
|
ctrl.$setValidity('validDeviceid', true);
|
||||||
} else {
|
} else {
|
||||||
$http.get(urlbase + '/nodeid?id=' + viewValue).success(function (resp) {
|
$http.get(urlbase + '/deviceid?id=' + viewValue).success(function (resp) {
|
||||||
if (resp.error) {
|
if (resp.error) {
|
||||||
ctrl.$setValidity('validNodeid', false);
|
ctrl.$setValidity('validDeviceid', false);
|
||||||
} else {
|
} else {
|
||||||
ctrl.$setValidity('validNodeid', true);
|
ctrl.$setValidity('validDeviceid', true);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
286
gui/index.html
286
gui/index.html
@ -13,7 +13,7 @@
|
|||||||
<meta name="author" content="">
|
<meta name="author" content="">
|
||||||
<link rel="shortcut icon" href="img/favicon.png">
|
<link rel="shortcut icon" href="img/favicon.png">
|
||||||
|
|
||||||
<title>Syncthing | {{thisNodeName()}}</title>
|
<title>Syncthing | {{thisDeviceName()}}</title>
|
||||||
<link href="bootstrap/css/bootstrap.min.css" rel="stylesheet">
|
<link href="bootstrap/css/bootstrap.min.css" rel="stylesheet">
|
||||||
<link href="font/raleway.css" rel="stylesheet">
|
<link href="font/raleway.css" rel="stylesheet">
|
||||||
<link href="overrides.css" rel="stylesheet">
|
<link href="overrides.css" rel="stylesheet">
|
||||||
@ -27,7 +27,7 @@
|
|||||||
<nav class="navbar navbar-top navbar-default" role="navigation">
|
<nav class="navbar navbar-top navbar-default" role="navigation">
|
||||||
<div class="container">
|
<div class="container">
|
||||||
<span class="navbar-brand"><img class="logo" src="img/logo-text-64.png" height="32" width="117"/></span>
|
<span class="navbar-brand"><img class="logo" src="img/logo-text-64.png" height="32" width="117"/></span>
|
||||||
<p class="navbar-text hidden-xs">{{thisNodeName()}}</p>
|
<p class="navbar-text hidden-xs">{{thisDeviceName()}}</p>
|
||||||
<ul class="nav navbar-nav navbar-right">
|
<ul class="nav navbar-nav navbar-right">
|
||||||
<li ng-if="upgradeInfo.newer">
|
<li ng-if="upgradeInfo.newer">
|
||||||
<button type="button" class="btn navbar-btn btn-primary btn-sm" href="" ng-click="upgrade()">
|
<button type="button" class="btn navbar-btn btn-primary btn-sm" href="" ng-click="upgrade()">
|
||||||
@ -39,7 +39,7 @@
|
|||||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><span class="glyphicon glyphicon-cog"></span></a>
|
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><span class="glyphicon glyphicon-cog"></span></a>
|
||||||
<ul class="dropdown-menu">
|
<ul class="dropdown-menu">
|
||||||
<li><a href="" ng-click="editSettings()"><span class="glyphicon glyphicon-cog"></span> <span translate>Settings</span></a></li>
|
<li><a href="" ng-click="editSettings()"><span class="glyphicon glyphicon-cog"></span> <span translate>Settings</span></a></li>
|
||||||
<li><a href="" ng-click="idNode()"><span class="glyphicon glyphicon-qrcode"></span> <span translate>Show ID</span></a></li>
|
<li><a href="" ng-click="idDevice()"><span class="glyphicon glyphicon-qrcode"></span> <span translate>Show ID</span></a></li>
|
||||||
<li class="divider"></li>
|
<li class="divider"></li>
|
||||||
<li><a href="" ng-click="shutdown()"><span class="glyphicon glyphicon-off"></span> <span translate>Shutdown</span></a></li>
|
<li><a href="" ng-click="shutdown()"><span class="glyphicon glyphicon-off"></span> <span translate>Shutdown</span></a></li>
|
||||||
<li><a href="" ng-click="restart()"><span class="glyphicon glyphicon-refresh"></span> <span translate>Restart</span></a></li>
|
<li><a href="" ng-click="restart()"><span class="glyphicon glyphicon-refresh"></span> <span translate>Restart</span></a></li>
|
||||||
@ -74,90 +74,90 @@
|
|||||||
|
|
||||||
<div class="row">
|
<div class="row">
|
||||||
|
|
||||||
<!-- Repository list (top left) -->
|
<!-- Folder list (top left) -->
|
||||||
|
|
||||||
<div class="col-md-6">
|
<div class="col-md-6">
|
||||||
<div class="panel-group" id="repositories">
|
<div class="panel-group" id="folders">
|
||||||
<div class="panel panel-{{repoClass(repo.ID)}}" ng-repeat="repo in repoList()">
|
<div class="panel panel-{{folderClass(folder.ID)}}" ng-repeat="folder in folderList()">
|
||||||
<div class="panel-heading" data-toggle="collapse" data-parent="#repositories" href="#repo-{{$index}}" style="cursor: pointer">
|
<div class="panel-heading" data-toggle="collapse" data-parent="#folders" href="#folder-{{$index}}" style="cursor: pointer">
|
||||||
<h3 class="panel-title">
|
<h3 class="panel-title">
|
||||||
<span class="glyphicon glyphicon-hdd"></span> {{repo.ID}}
|
<span class="glyphicon glyphicon-hdd"></span> {{folder.ID}}
|
||||||
<span class="pull-right hidden-xs" ng-switch="repoStatus(repo.ID)">
|
<span class="pull-right hidden-xs" ng-switch="folderStatus(folder.ID)">
|
||||||
<span translate ng-switch-when="unknown">Unknown</span>
|
<span translate ng-switch-when="unknown">Unknown</span>
|
||||||
<span translate ng-switch-when="stopped">Stopped</span>
|
<span translate ng-switch-when="stopped">Stopped</span>
|
||||||
<span translate ng-switch-when="scanning">Scanning</span>
|
<span translate ng-switch-when="scanning">Scanning</span>
|
||||||
<span ng-switch-when="syncing">
|
<span ng-switch-when="syncing">
|
||||||
<span translate>Syncing</span>
|
<span translate>Syncing</span>
|
||||||
({{syncPercentage(repo.ID)}}%)
|
({{syncPercentage(folder.ID)}}%)
|
||||||
</span>
|
</span>
|
||||||
<span ng-switch-when="idle">
|
<span ng-switch-when="idle">
|
||||||
<span translate>Idle</span>
|
<span translate>Idle</span>
|
||||||
({{syncPercentage(repo.ID)}}%)
|
({{syncPercentage(folder.ID)}}%)
|
||||||
</span>
|
</span>
|
||||||
</span>
|
</span>
|
||||||
</h3>
|
</h3>
|
||||||
</div>
|
</div>
|
||||||
<div id="repo-{{$index}}" class="panel-collapse collapse" ng-class="{in: $index === 0}">
|
<div id="folder-{{$index}}" class="panel-collapse collapse" ng-class="{in: $index === 0}">
|
||||||
<div class="panel-body">
|
<div class="panel-body">
|
||||||
<table class="table table-condensed table-striped">
|
<table class="table table-condensed table-striped">
|
||||||
<tbody>
|
<tbody>
|
||||||
<tr>
|
<tr>
|
||||||
<th><span class="glyphicon glyphicon-tag"></span> <span translate>Repository ID</span></th>
|
<th><span class="glyphicon glyphicon-tag"></span> <span translate>Folder ID</span></th>
|
||||||
<td class="text-right">{{repo.ID}}</td>
|
<td class="text-right">{{folder.ID}}</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th><span class="glyphicon glyphicon-folder-open"></span> <span translate>Folder</span></th>
|
<th><span class="glyphicon glyphicon-folder-open"></span> <span translate>Folder</span></th>
|
||||||
<td class="text-right">{{repo.Directory}}</td>
|
<td class="text-right">{{folder.Directory}}</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr ng-if="model[repo.ID].invalid">
|
<tr ng-if="model[folder.ID].invalid">
|
||||||
<th><span class="glyphicon glyphicon-warning-sign"></span> <span translate>Error</span></th>
|
<th><span class="glyphicon glyphicon-warning-sign"></span> <span translate>Error</span></th>
|
||||||
<td class="text-right">{{model[repo.ID].invalid}}</td>
|
<td class="text-right">{{model[folder.ID].invalid}}</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th><span class="glyphicon glyphicon-globe"></span> <span translate>Global Repository</span></th>
|
<th><span class="glyphicon glyphicon-globe"></span> <span translate>Global Folder</span></th>
|
||||||
<td class="text-right">{{model[repo.ID].globalFiles | alwaysNumber}} <span translate>items</span>, ~{{model[repo.ID].globalBytes | binary}}B</td>
|
<td class="text-right">{{model[folder.ID].globalFiles | alwaysNumber}} <span translate>items</span>, ~{{model[folder.ID].globalBytes | binary}}B</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th><span class="glyphicon glyphicon-home"></span> <span translate>Local Repository</span></th>
|
<th><span class="glyphicon glyphicon-home"></span> <span translate>Local Folder</span></th>
|
||||||
<td class="text-right">{{model[repo.ID].localFiles | alwaysNumber}} <span translate>items</span>, ~{{model[repo.ID].localBytes | binary}}B</td>
|
<td class="text-right">{{model[folder.ID].localFiles | alwaysNumber}} <span translate>items</span>, ~{{model[folder.ID].localBytes | binary}}B</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th><span class="glyphicon glyphicon-cloud-download"></span> <span translate>Out Of Sync</span></th>
|
<th><span class="glyphicon glyphicon-cloud-download"></span> <span translate>Out Of Sync</span></th>
|
||||||
<td class="text-right">
|
<td class="text-right">
|
||||||
<a ng-if="model[repo.ID].needFiles > 0" ng-click="showNeed(repo.ID)" href="">{{model[repo.ID].needFiles | alwaysNumber}} <span translate>items</span>, ~{{model[repo.ID].needBytes | binary}}B</a>
|
<a ng-if="model[folder.ID].needFiles > 0" ng-click="showNeed(folder.ID)" href="">{{model[folder.ID].needFiles | alwaysNumber}} <span translate>items</span>, ~{{model[folder.ID].needBytes | binary}}B</a>
|
||||||
<span ng-if="model[repo.ID].needFiles == 0">0 <span translate>items</span>, 0 B</span>
|
<span ng-if="model[folder.ID].needFiles == 0">0 <span translate>items</span>, 0 B</span>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th><span class="glyphicon glyphicon-lock"></span> <span translate>Master Repo</span></th>
|
<th><span class="glyphicon glyphicon-lock"></span> <span translate>Master Folder</span></th>
|
||||||
<td class="text-right">
|
<td class="text-right">
|
||||||
<span translate ng-if="repo.ReadOnly">Yes</span>
|
<span translate ng-if="folder.ReadOnly">Yes</span>
|
||||||
<span translate ng-if="!repo.ReadOnly">No</span>
|
<span translate ng-if="!folder.ReadOnly">No</span>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th><span class="glyphicon glyphicon-unchecked"></span> <span translate>Ignore Permissions</span></th>
|
<th><span class="glyphicon glyphicon-unchecked"></span> <span translate>Ignore Permissions</span></th>
|
||||||
<td class="text-right">
|
<td class="text-right">
|
||||||
<span translate ng-if="repo.IgnorePerms">Yes</span>
|
<span translate ng-if="folder.IgnorePerms">Yes</span>
|
||||||
<span translate ng-if="!repo.IgnorePerms">No</span>
|
<span translate ng-if="!folder.IgnorePerms">No</span>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th><span class="glyphicon glyphicon-refresh"></span> <span translate>Rescan Interval</span></th>
|
<th><span class="glyphicon glyphicon-refresh"></span> <span translate>Rescan Interval</span></th>
|
||||||
<td class="text-right">{{repo.RescanIntervalS}} s</td>
|
<td class="text-right">{{folder.RescanIntervalS}} s</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th><span class="glyphicon glyphicon-share-alt"></span> <span translate>Shared With</span></th>
|
<th><span class="glyphicon glyphicon-share-alt"></span> <span translate>Shared With</span></th>
|
||||||
<td class="text-right">{{sharesRepo(repo)}}</td>
|
<td class="text-right">{{sharesFolder(folder)}}</td>
|
||||||
</tr>
|
</tr>
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
</div>
|
</div>
|
||||||
<div class="panel-footer">
|
<div class="panel-footer">
|
||||||
<button class="btn btn-sm btn-danger" ng-if="repo.ReadOnly && model[repo.ID].needFiles > 0" ng-click="override(repo.ID)" href=""><span class="glyphicon glyphicon-upload"></span> <span translate>Override Changes</span></button>
|
<button class="btn btn-sm btn-danger" ng-if="folder.ReadOnly && model[folder.ID].needFiles > 0" ng-click="override(folder.ID)" href=""><span class="glyphicon glyphicon-upload"></span> <span translate>Override Changes</span></button>
|
||||||
<span class="pull-right">
|
<span class="pull-right">
|
||||||
<button class="btn btn-sm btn-default" href="" ng-show="repoStatus(repo.ID) == 'idle'" ng-click="rescanRepo(repo.ID)"><span class="glyphicon glyphicon-refresh"></span> <span translate>Rescan</span></button>
|
<button class="btn btn-sm btn-default" href="" ng-show="folderStatus(folder.ID) == 'idle'" ng-click="rescanFolder(folder.ID)"><span class="glyphicon glyphicon-refresh"></span> <span translate>Rescan</span></button>
|
||||||
<button class="btn btn-sm btn-default" href="" ng-click="editRepo(repo)"><span class="glyphicon glyphicon-pencil"></span> <span translate>Edit</span></button>
|
<button class="btn btn-sm btn-default" href="" ng-click="editFolder(folder)"><span class="glyphicon glyphicon-pencil"></span> <span translate>Edit</span></button>
|
||||||
</span>
|
</span>
|
||||||
<div class="clearfix"></div>
|
<div class="clearfix"></div>
|
||||||
</div>
|
</div>
|
||||||
@ -165,24 +165,24 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<button class="btn btn-sm btn-default pull-right" ng-click="addRepo()"><span class="glyphicon glyphicon-plus"></span> <span translate>Add Repository</span></button>
|
<button class="btn btn-sm btn-default pull-right" ng-click="addFolder()"><span class="glyphicon glyphicon-plus"></span> <span translate>Add Folder</span></button>
|
||||||
<div class="clearfix"></div>
|
<div class="clearfix"></div>
|
||||||
</div>
|
</div>
|
||||||
<hr class="visible-sm"/>
|
<hr class="visible-sm"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- Node list (top right) -->
|
<!-- Device list (top right) -->
|
||||||
|
|
||||||
<!-- This node -->
|
<!-- This device -->
|
||||||
|
|
||||||
<div class="col-md-6">
|
<div class="col-md-6">
|
||||||
<div class="panel panel-default" ng-repeat="nodeCfg in [thisNode()]">
|
<div class="panel panel-default" ng-repeat="deviceCfg in [thisDevice()]">
|
||||||
<div class="panel-heading" data-toggle="collapse" href="#node-this" style="cursor: pointer">
|
<div class="panel-heading" data-toggle="collapse" href="#device-this" style="cursor: pointer">
|
||||||
<h3 class="panel-title">
|
<h3 class="panel-title">
|
||||||
<span class="glyphicon glyphicon-home"></span> {{nodeName(nodeCfg)}}
|
<span class="glyphicon glyphicon-home"></span> {{deviceName(deviceCfg)}}
|
||||||
</h3>
|
</h3>
|
||||||
</div>
|
</div>
|
||||||
<div id="node-this" class="panel-collapse collapse in">
|
<div id="device-this" class="panel-collapse collapse in">
|
||||||
<div class="panel-body">
|
<div class="panel-body">
|
||||||
<table class="table table-condensed table-striped">
|
<table class="table table-condensed table-striped">
|
||||||
<tbody>
|
<tbody>
|
||||||
@ -219,75 +219,75 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- Remote nodes -->
|
<!-- Remote devices -->
|
||||||
|
|
||||||
<div class="panel-group" id="nodes">
|
<div class="panel-group" id="devices">
|
||||||
<div class="panel panel-{{nodeClass(nodeCfg)}}" ng-repeat="nodeCfg in otherNodes()">
|
<div class="panel panel-{{deviceClass(deviceCfg)}}" ng-repeat="deviceCfg in otherDevices()">
|
||||||
<div class="panel-heading" data-toggle="collapse" data-parent="#nodes" href="#node-{{$index}}" style="cursor: pointer">
|
<div class="panel-heading" data-toggle="collapse" data-parent="#devices" href="#device-{{$index}}" style="cursor: pointer">
|
||||||
<h3 class="panel-title">
|
<h3 class="panel-title">
|
||||||
<span class="glyphicon glyphicon-retweet"></span> {{nodeName(nodeCfg)}}
|
<span class="glyphicon glyphicon-retweet"></span> {{deviceName(deviceCfg)}}
|
||||||
<span class="pull-right hidden-xs">
|
<span class="pull-right hidden-xs">
|
||||||
<span ng-if="connections[nodeCfg.NodeID] && completion[nodeCfg.NodeID]._total == 100">
|
<span ng-if="connections[deviceCfg.DeviceID] && completion[deviceCfg.DeviceID]._total == 100">
|
||||||
<span translate>Up to Date</span> (100%)
|
<span translate>Up to Date</span> (100%)
|
||||||
</span>
|
</span>
|
||||||
<span ng-if="connections[nodeCfg.NodeID] && completion[nodeCfg.NodeID]._total < 100">
|
<span ng-if="connections[deviceCfg.DeviceID] && completion[deviceCfg.DeviceID]._total < 100">
|
||||||
<span translate>Syncing</span> ({{completion[nodeCfg.NodeID]._total | number:0}}%)
|
<span translate>Syncing</span> ({{completion[deviceCfg.DeviceID]._total | number:0}}%)
|
||||||
</span>
|
</span>
|
||||||
<span translate ng-if="!connections[nodeCfg.NodeID]">Disconnected</span>
|
<span translate ng-if="!connections[deviceCfg.DeviceID]">Disconnected</span>
|
||||||
</span>
|
</span>
|
||||||
</h3>
|
</h3>
|
||||||
</div>
|
</div>
|
||||||
<div id="node-{{$index}}" class="panel-collapse collapse">
|
<div id="device-{{$index}}" class="panel-collapse collapse">
|
||||||
<div class="panel-body">
|
<div class="panel-body">
|
||||||
<table class="table table-condensed table-striped">
|
<table class="table table-condensed table-striped">
|
||||||
<tbody>
|
<tbody>
|
||||||
<tr ng-if="connections[nodeCfg.NodeID]">
|
<tr ng-if="connections[deviceCfg.DeviceID]">
|
||||||
<th><span class="glyphicon glyphicon-cloud-download"></span> <span translate>Download Rate</span></th>
|
<th><span class="glyphicon glyphicon-cloud-download"></span> <span translate>Download Rate</span></th>
|
||||||
<td class="text-right">{{connections[nodeCfg.NodeID].inbps | metric}}bps ({{connections[nodeCfg.NodeID].InBytesTotal | binary}}B)</td>
|
<td class="text-right">{{connections[deviceCfg.DeviceID].inbps | metric}}bps ({{connections[deviceCfg.DeviceID].InBytesTotal | binary}}B)</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr ng-if="connections[nodeCfg.NodeID]">
|
<tr ng-if="connections[deviceCfg.DeviceID]">
|
||||||
<th><span class="glyphicon glyphicon-cloud-upload"></span> <span translate>Upload Rate</span></th>
|
<th><span class="glyphicon glyphicon-cloud-upload"></span> <span translate>Upload Rate</span></th>
|
||||||
<td class="text-right">{{connections[nodeCfg.NodeID].outbps | metric}}bps ({{connections[nodeCfg.NodeID].OutBytesTotal | binary}}B)</td>
|
<td class="text-right">{{connections[deviceCfg.DeviceID].outbps | metric}}bps ({{connections[deviceCfg.DeviceID].OutBytesTotal | binary}}B)</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th><span class="glyphicon glyphicon-link"></span> <span translate>Address</span></th>
|
<th><span class="glyphicon glyphicon-link"></span> <span translate>Address</span></th>
|
||||||
<td class="text-right">{{nodeAddr(nodeCfg)}}</td>
|
<td class="text-right">{{deviceAddr(deviceCfg)}}</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr ng-if="connections[nodeCfg.NodeID]">
|
<tr ng-if="connections[deviceCfg.DeviceID]">
|
||||||
<th><span class="glyphicon glyphicon-comment"></span> <span translate>Synchronization</span></th>
|
<th><span class="glyphicon glyphicon-comment"></span> <span translate>Synchronization</span></th>
|
||||||
<td class="text-right">{{completion[nodeCfg.NodeID]._total | alwaysNumber | number:0}}%</td>
|
<td class="text-right">{{completion[deviceCfg.DeviceID]._total | alwaysNumber | number:0}}%</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th><span class="glyphicon glyphicon-compressed"></span> <span translate>Use Compression</span></th>
|
<th><span class="glyphicon glyphicon-compressed"></span> <span translate>Use Compression</span></th>
|
||||||
<td translate ng-if="nodeCfg.Compression" class="text-right">Yes</td>
|
<td translate ng-if="deviceCfg.Compression" class="text-right">Yes</td>
|
||||||
<td translate ng-if="!nodeCfg.Compression" class="text-right">No</td>
|
<td translate ng-if="!deviceCfg.Compression" class="text-right">No</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th><span class="glyphicon glyphicon-thumbs-up"></span> <span translate>Introducer</span></th>
|
<th><span class="glyphicon glyphicon-thumbs-up"></span> <span translate>Introducer</span></th>
|
||||||
<td translate ng-if="nodeCfg.Introducer" class="text-right">Yes</td>
|
<td translate ng-if="deviceCfg.Introducer" class="text-right">Yes</td>
|
||||||
<td translate ng-if="!nodeCfg.Introducer" class="text-right">No</td>
|
<td translate ng-if="!deviceCfg.Introducer" class="text-right">No</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr ng-if="connections[nodeCfg.NodeID]">
|
<tr ng-if="connections[deviceCfg.DeviceID]">
|
||||||
<th><span class="glyphicon glyphicon-tag"></span> <span translate>Version</span></th>
|
<th><span class="glyphicon glyphicon-tag"></span> <span translate>Version</span></th>
|
||||||
<td class="text-right">{{connections[nodeCfg.NodeID].ClientVersion}}</td>
|
<td class="text-right">{{connections[deviceCfg.DeviceID].ClientVersion}}</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr ng-if="!connections[nodeCfg.NodeID]">
|
<tr ng-if="!connections[deviceCfg.DeviceID]">
|
||||||
<th><span class="glyphicon glyphicon-eye-open"></span> <span translate>Last seen</span></th>
|
<th><span class="glyphicon glyphicon-eye-open"></span> <span translate>Last seen</span></th>
|
||||||
<td translate ng-if="!stats[nodeCfg.NodeID].LastSeenDays || stats[nodeCfg.NodeID].LastSeenDays >= 365" class="text-right">Never</td>
|
<td translate ng-if="!stats[deviceCfg.DeviceID].LastSeenDays || stats[deviceCfg.DeviceID].LastSeenDays >= 365" class="text-right">Never</td>
|
||||||
<td ng-if="stats[nodeCfg.NodeID].LastSeenDays < 365" class="text-right">{{stats[nodeCfg.NodeID].LastSeen | date:"yyyy-MM-dd HH:mm"}}</td>
|
<td ng-if="stats[deviceCfg.DeviceID].LastSeenDays < 365" class="text-right">{{stats[deviceCfg.DeviceID].LastSeen | date:"yyyy-MM-dd HH:mm"}}</td>
|
||||||
</tr>
|
</tr>
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
</div>
|
</div>
|
||||||
<div class="panel-footer">
|
<div class="panel-footer">
|
||||||
<span class="pull-right"><a class="btn btn-sm btn-default" href="" ng-click="editNode(nodeCfg)"><span class="glyphicon glyphicon-pencil"></span> <span translate>Edit</span></a></span>
|
<span class="pull-right"><a class="btn btn-sm btn-default" href="" ng-click="editDevice(deviceCfg)"><span class="glyphicon glyphicon-pencil"></span> <span translate>Edit</span></a></span>
|
||||||
<div class="clearfix"></div>
|
<div class="clearfix"></div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<button class="btn btn-sm btn-default pull-right" ng-click="addNode()"><span class="glyphicon glyphicon-plus"></span> <span translate>Add Node</span></button>
|
<button class="btn btn-sm btn-default pull-right" ng-click="addDevice()"><span class="glyphicon glyphicon-plus"></span> <span translate>Add Device</span></button>
|
||||||
<div class="clearfix"></div>
|
<div class="clearfix"></div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@ -300,7 +300,7 @@
|
|||||||
<div class="panel panel-warning">
|
<div class="panel panel-warning">
|
||||||
<div class="panel-heading"><h3 class="panel-title"><span translate>Notice</span></h3></div>
|
<div class="panel-heading"><h3 class="panel-title"><span translate>Notice</span></h3></div>
|
||||||
<div class="panel-body">
|
<div class="panel-body">
|
||||||
<p ng-repeat="err in errorList()"><small>{{err.Time | date:"H:mm:ss"}}:</small> {{friendlyNodes(err.Error)}}</p>
|
<p ng-repeat="err in errorList()"><small>{{err.Time | date:"H:mm:ss"}}:</small> {{friendlyDevices(err.Error)}}</p>
|
||||||
</div>
|
</div>
|
||||||
<div class="panel-footer">
|
<div class="panel-footer">
|
||||||
<button type="button" class="pull-right btn btn-sm btn-default" ng-click="clearErrors()"><span class="glyphicon glyphicon-ok"></span> <span translate>OK</span></button>
|
<button type="button" class="pull-right btn btn-sm btn-default" ng-click="clearErrors()"><span class="glyphicon glyphicon-ok"></span> <span translate>OK</span></button>
|
||||||
@ -354,48 +354,48 @@
|
|||||||
|
|
||||||
<!-- ID modal -->
|
<!-- ID modal -->
|
||||||
|
|
||||||
<modal id="idqr" large="yes" status="info" close="yes" icon="qrcode" title="{{'Node Identification' | translate}} — {{nodeName(thisNode())}}">
|
<modal id="idqr" large="yes" status="info" close="yes" icon="qrcode" title="{{'Device Identification' | translate}} — {{deviceName(thisDevice())}}">
|
||||||
<div class="well well-sm text-monospace text-center">{{myID}}</div>
|
<div class="well well-sm text-monospace text-center">{{myID}}</div>
|
||||||
<img ng-if="myID" class="center-block img-thumbnail" src="qr/?text={{myID}}"/>
|
<img ng-if="myID" class="center-block img-thumbnail" src="qr/?text={{myID}}"/>
|
||||||
</modal>
|
</modal>
|
||||||
|
|
||||||
<!-- Node editor modal -->
|
<!-- Device editor modal -->
|
||||||
|
|
||||||
<div id="editNode" class="modal fade" tabindex="-1">
|
<div id="editDevice" class="modal fade" tabindex="-1">
|
||||||
<div class="modal-dialog modal-lg">
|
<div class="modal-dialog modal-lg">
|
||||||
<div class="modal-content">
|
<div class="modal-content">
|
||||||
<div class="modal-header">
|
<div class="modal-header">
|
||||||
<h4 translate ng-show="!editingExisting" class="modal-title">Add Node</h4>
|
<h4 translate ng-show="!editingExisting" class="modal-title">Add Device</h4>
|
||||||
<h4 translate ng-show="editingExisting" class="modal-title">Edit Node</h4>
|
<h4 translate ng-show="editingExisting" class="modal-title">Edit Device</h4>
|
||||||
</div>
|
</div>
|
||||||
<div class="modal-body">
|
<div class="modal-body">
|
||||||
<form role="form" name="nodeEditor">
|
<form role="form" name="deviceEditor">
|
||||||
<div class="form-group" ng-class="{'has-error': nodeEditor.nodeID.$invalid && nodeEditor.nodeID.$dirty}">
|
<div class="form-group" ng-class="{'has-error': deviceEditor.deviceID.$invalid && deviceEditor.deviceID.$dirty}">
|
||||||
<label translate for="nodeID">Node ID</label>
|
<label translate for="deviceID">Device ID</label>
|
||||||
<input ng-if="!editingExisting" name="nodeID" id="nodeID" class="form-control text-monospace" type="text" ng-model="currentNode.NodeID" required valid-nodeid></input>
|
<input ng-if="!editingExisting" name="deviceID" id="deviceID" class="form-control text-monospace" type="text" ng-model="currentDevice.DeviceID" required valid-deviceid></input>
|
||||||
<div ng-if="editingExisting" class="well well-sm text-monospace">{{currentNode.NodeID}}</div>
|
<div ng-if="editingExisting" class="well well-sm text-monospace">{{currentDevice.DeviceID}}</div>
|
||||||
<p class="help-block">
|
<p class="help-block">
|
||||||
<span translate ng-if="nodeEditor.nodeID.$valid || nodeEditor.nodeID.$pristine">The node ID to enter here can be found in the "Edit > Show ID" dialog on the other node. Spaces and dashes are optional (ignored).</span>
|
<span translate ng-if="deviceEditor.deviceID.$valid || deviceEditor.deviceID.$pristine">The device ID to enter here can be found in the "Edit > Show ID" dialog on the other device. Spaces and dashes are optional (ignored).</span>
|
||||||
<span translate ng-show="!editingExisting && (nodeEditor.nodeID.$valid || nodeEditor.nodeID.$pristine)">When adding a new node, keep in mind that this node must be added on the other side too.</span>
|
<span translate ng-show="!editingExisting && (deviceEditor.deviceID.$valid || deviceEditor.deviceID.$pristine)">When adding a new device, keep in mind that this device must be added on the other side too.</span>
|
||||||
<span translate ng-if="nodeEditor.nodeID.$error.required && nodeEditor.nodeID.$dirty">The node ID cannot be blank.</span>
|
<span translate ng-if="deviceEditor.deviceID.$error.required && deviceEditor.deviceID.$dirty">The device ID cannot be blank.</span>
|
||||||
<span translate ng-if="nodeEditor.nodeID.$error.validNodeid && nodeEditor.nodeID.$dirty">The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.</span>
|
<span translate ng-if="deviceEditor.deviceID.$error.validDeviceid && deviceEditor.deviceID.$dirty">The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.</span>
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label translate for="name">Node Name</label>
|
<label translate for="name">Device Name</label>
|
||||||
<input id="name" class="form-control" type="text" ng-model="currentNode.Name"></input>
|
<input id="name" class="form-control" type="text" ng-model="currentDevice.Name"></input>
|
||||||
<p translate ng-if="currentNode.NodeID == myID" class="help-block">Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.</p>
|
<p translate ng-if="currentDevice.DeviceID == myID" class="help-block">Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.</p>
|
||||||
<p translate ng-if="currentNode.NodeID != myID" class="help-block">Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.</p>
|
<p translate ng-if="currentDevice.DeviceID != myID" class="help-block">Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.</p>
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label translate for="addresses">Addresses</label>
|
<label translate for="addresses">Addresses</label>
|
||||||
<input ng-disabled="currentNode.NodeID == myID" id="addresses" class="form-control" type="text" ng-model="currentNode.AddressesStr"></input>
|
<input ng-disabled="currentDevice.DeviceID == myID" id="addresses" class="form-control" type="text" ng-model="currentDevice.AddressesStr"></input>
|
||||||
<p translate class="help-block">Enter comma separated "ip:port" addresses or "dynamic" to perform automatic discovery of the address.</p>
|
<p translate class="help-block">Enter comma separated "ip:port" addresses or "dynamic" to perform automatic discovery of the address.</p>
|
||||||
</div>
|
</div>
|
||||||
<div ng-if="!editingSelf" class="form-group">
|
<div ng-if="!editingSelf" class="form-group">
|
||||||
<div class="checkbox">
|
<div class="checkbox">
|
||||||
<label>
|
<label>
|
||||||
<input type="checkbox" ng-model="currentNode.Compression"> <span translate>Use Compression</span>
|
<input type="checkbox" ng-model="currentDevice.Compression"> <span translate>Use Compression</span>
|
||||||
</label>
|
</label>
|
||||||
<p translate class="help-block">Compression is recommended in most setups.</p>
|
<p translate class="help-block">Compression is recommended in most setups.</p>
|
||||||
</div>
|
</div>
|
||||||
@ -403,58 +403,58 @@
|
|||||||
<div ng-if="!editingSelf" class="form-group">
|
<div ng-if="!editingSelf" class="form-group">
|
||||||
<div class="checkbox">
|
<div class="checkbox">
|
||||||
<label>
|
<label>
|
||||||
<input type="checkbox" ng-model="currentNode.Introducer"> <span translate>Introducer</span>
|
<input type="checkbox" ng-model="currentDevice.Introducer"> <span translate>Introducer</span>
|
||||||
</label>
|
</label>
|
||||||
<p translate class="help-block">Any nodes configured on an introducer node will be added to this node as well.</p>
|
<p translate class="help-block">Any devices configured on an introducer device will be added to this device as well.</p>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</form>
|
</form>
|
||||||
</div>
|
</div>
|
||||||
<div class="modal-footer">
|
<div class="modal-footer">
|
||||||
<button type="button" class="btn btn-primary btn-sm" ng-click="saveNode()" ng-disabled="nodeEditor.$invalid"><span class="glyphicon glyphicon-ok"></span> <span translate>Save</span></button>
|
<button type="button" class="btn btn-primary btn-sm" ng-click="saveDevice()" ng-disabled="deviceEditor.$invalid"><span class="glyphicon glyphicon-ok"></span> <span translate>Save</span></button>
|
||||||
<button type="button" class="btn btn-default btn-sm" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span> <span translate>Close</span></button>
|
<button type="button" class="btn btn-default btn-sm" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span> <span translate>Close</span></button>
|
||||||
<button ng-if="editingExisting && !editingSelf" type="button" class="btn btn-danger pull-left btn-sm" ng-click="deleteNode()"><span class="glyphicon glyphicon-minus"></span> <span translate>Delete</span></button>
|
<button ng-if="editingExisting && !editingSelf" type="button" class="btn btn-danger pull-left btn-sm" ng-click="deleteDevice()"><span class="glyphicon glyphicon-minus"></span> <span translate>Delete</span></button>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- Repo editor modal -->
|
<!-- Folder editor modal -->
|
||||||
|
|
||||||
<div id="editRepo" class="modal fade" tabindex="-1">
|
<div id="editFolder" class="modal fade" tabindex="-1">
|
||||||
<div class="modal-dialog modal-lg">
|
<div class="modal-dialog modal-lg">
|
||||||
<div class="modal-content">
|
<div class="modal-content">
|
||||||
<div class="modal-header">
|
<div class="modal-header">
|
||||||
<h4 ng-show="!editingExisting" class="modal-title"><span translate>Add Repository</span></h4>
|
<h4 ng-show="!editingExisting" class="modal-title"><span translate>Add Folder</span></h4>
|
||||||
<h4 ng-show="editingExisting" class="modal-title"><span translate>Edit Repository</span></h4>
|
<h4 ng-show="editingExisting" class="modal-title"><span translate>Edit Folder</span></h4>
|
||||||
</div>
|
</div>
|
||||||
<div class="modal-body">
|
<div class="modal-body">
|
||||||
<form role="form" name="repoEditor">
|
<form role="form" name="folderEditor">
|
||||||
<div class="row">
|
<div class="row">
|
||||||
<div class="col-md-12">
|
<div class="col-md-12">
|
||||||
<div class="form-group" ng-class="{'has-error': repoEditor.repoID.$invalid && repoEditor.repoID.$dirty}">
|
<div class="form-group" ng-class="{'has-error': folderEditor.folderID.$invalid && folderEditor.folderID.$dirty}">
|
||||||
<label for="repoID"><span translate>Repository ID</span></label>
|
<label for="folderID"><span translate>Folder ID</span></label>
|
||||||
<input name="repoID" ng-disabled="editingExisting" id="repoID" class="form-control" type="text" ng-model="currentRepo.ID" required unique-repo ng-pattern="/^[a-zA-Z0-9-_.]{1,64}$/"></input>
|
<input name="folderID" ng-disabled="editingExisting" id="folderID" class="form-control" type="text" ng-model="currentFolder.ID" required unique-folder ng-pattern="/^[a-zA-Z0-9-_.]{1,64}$/"></input>
|
||||||
<p class="help-block">
|
<p class="help-block">
|
||||||
<span translate ng-if="repoEditor.repoID.$valid || repoEditor.repoID.$pristine">Short identifier for the repository. Must be the same on all cluster nodes.</span>
|
<span translate ng-if="folderEditor.folderID.$valid || folderEditor.folderID.$pristine">Short identifier for the folder. Must be the same on all cluster devices.</span>
|
||||||
<span translate ng-if="repoEditor.repoID.$error.uniqueRepo">The repository ID must be unique.</span>
|
<span translate ng-if="folderEditor.folderID.$error.uniqueFolder">The folder ID must be unique.</span>
|
||||||
<span translate ng-if="repoEditor.repoID.$error.required && repoEditor.repoID.$dirty">The repository ID cannot be blank.</span>
|
<span translate ng-if="folderEditor.folderID.$error.required && folderEditor.folderID.$dirty">The folder ID cannot be blank.</span>
|
||||||
<span translate ng-if="repoEditor.repoID.$error.pattern && repoEditor.repoID.$dirty">The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.</span>
|
<span translate ng-if="folderEditor.folderID.$error.pattern && folderEditor.folderID.$dirty">The folder ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.</span>
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group" ng-class="{'has-error': repoEditor.repoPath.$invalid && repoEditor.repoPath.$dirty}">
|
<div class="form-group" ng-class="{'has-error': folderEditor.folderPath.$invalid && folderEditor.folderPath.$dirty}">
|
||||||
<label translate for="repoPath">Repository Path</label>
|
<label translate for="folderPath">Folder Path</label>
|
||||||
<input name="repoPath" ng-disabled="editingExisting" id="repoPath" class="form-control" type="text" ng-model="currentRepo.Directory" required></input>
|
<input name="folderPath" ng-disabled="editingExisting" id="folderPath" class="form-control" type="text" ng-model="currentFolder.Directory" required></input>
|
||||||
<p class="help-block">
|
<p class="help-block">
|
||||||
<span translate ng-if="repoEditor.repoPath.$valid || repoEditor.repoPath.$pristine">Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for</span> <code>{{system.tilde}}</code>.
|
<span translate ng-if="folderEditor.folderPath.$valid || folderEditor.folderPath.$pristine">Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for</span> <code>{{system.tilde}}</code>.
|
||||||
<span translate ng-if="repoEditor.repoPath.$error.required && repoEditor.repoPath.$dirty">The repository path cannot be blank.</span>
|
<span translate ng-if="folderEditor.folderPath.$error.required && folderEditor.folderPath.$dirty">The folder path cannot be blank.</span>
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group" ng-class="{'has-error': repoEditor.rescanIntervalS.$invalid && repoEditor.rescanIntervalS.$dirty}">
|
<div class="form-group" ng-class="{'has-error': folderEditor.rescanIntervalS.$invalid && folderEditor.rescanIntervalS.$dirty}">
|
||||||
<label for="rescanIntervalS"><span translate>Rescan Interval</span> (s)</label>
|
<label for="rescanIntervalS"><span translate>Rescan Interval</span> (s)</label>
|
||||||
<input name="rescanIntervalS" id="rescanIntervalS" class="form-control" type="number" ng-model="currentRepo.RescanIntervalS" required min="5"></input>
|
<input name="rescanIntervalS" id="rescanIntervalS" class="form-control" type="number" ng-model="currentFolder.RescanIntervalS" required min="5"></input>
|
||||||
<p class="help-block">
|
<p class="help-block">
|
||||||
<span translate ng-if="!repoEditor.rescanIntervalS.$valid && repoEditor.rescanIntervalS.$dirty">The rescan interval must be at least 5 seconds.</span>
|
<span translate ng-if="!folderEditor.rescanIntervalS.$valid && folderEditor.rescanIntervalS.$dirty">The rescan interval must be at least 5 seconds.</span>
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@ -464,27 +464,27 @@
|
|||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<div class="checkbox">
|
<div class="checkbox">
|
||||||
<label>
|
<label>
|
||||||
<input type="checkbox" ng-model="currentRepo.ReadOnly"> <span translate>Repository Master</span>
|
<input type="checkbox" ng-model="currentFolder.ReadOnly"> <span translate>Folder Master</span>
|
||||||
</label>
|
</label>
|
||||||
</div>
|
</div>
|
||||||
<p translate class="help-block">Files are protected from changes made on other nodes, but changes made on this node will be sent to the rest of the cluster.</p>
|
<p translate class="help-block">Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.</p>
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<div class="checkbox">
|
<div class="checkbox">
|
||||||
<label>
|
<label>
|
||||||
<input type="checkbox" ng-model="currentRepo.IgnorePerms"> <span translate>Ignore Permissions</span>
|
<input type="checkbox" ng-model="currentFolder.IgnorePerms"> <span translate>Ignore Permissions</span>
|
||||||
</label>
|
</label>
|
||||||
</div>
|
</div>
|
||||||
<p translate class="help-block">File permission bits are ignored when looking for changes. Use on FAT filesystems.</p>
|
<p translate class="help-block">File permission bits are ignored when looking for changes. Use on FAT filesystems.</p>
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label translate for="nodes">Share With Nodes</label>
|
<label translate for="devices">Share With Devices</label>
|
||||||
<div class="checkbox" ng-repeat="node in otherNodes()">
|
<div class="checkbox" ng-repeat="device in otherDevices()">
|
||||||
<label>
|
<label>
|
||||||
<input type="checkbox" ng-model="currentRepo.selectedNodes[node.NodeID]"> {{nodeName(node)}}
|
<input type="checkbox" ng-model="currentFolder.selectedDevices[device.DeviceID]"> {{deviceName(device)}}
|
||||||
</label>
|
</label>
|
||||||
</div>
|
</div>
|
||||||
<p translate class="help-block">Select the nodes to share this repository with.</p>
|
<p translate class="help-block">Select the devices to share this folder with.</p>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="col-md-6">
|
<div class="col-md-6">
|
||||||
@ -492,54 +492,54 @@
|
|||||||
<label translate>File Versioning</label>
|
<label translate>File Versioning</label>
|
||||||
<div class="radio">
|
<div class="radio">
|
||||||
<label>
|
<label>
|
||||||
<input type="radio" ng-model="currentRepo.FileVersioningSelector" value="none"> <span translate>No File Versioning</span>
|
<input type="radio" ng-model="currentFolder.FileVersioningSelector" value="none"> <span translate>No File Versioning</span>
|
||||||
</label>
|
</label>
|
||||||
</div>
|
</div>
|
||||||
<div class="radio">
|
<div class="radio">
|
||||||
<label>
|
<label>
|
||||||
<input type="radio" ng-model="currentRepo.FileVersioningSelector" value="simple"> <span translate>Simple File Versioning</span>
|
<input type="radio" ng-model="currentFolder.FileVersioningSelector" value="simple"> <span translate>Simple File Versioning</span>
|
||||||
</label>
|
</label>
|
||||||
</div>
|
</div>
|
||||||
<div class="radio">
|
<div class="radio">
|
||||||
<label>
|
<label>
|
||||||
<input type="radio" ng-model="currentRepo.FileVersioningSelector" value="staggered"> <span translate>Staggered File Versioning</span>
|
<input type="radio" ng-model="currentFolder.FileVersioningSelector" value="staggered"> <span translate>Staggered File Versioning</span>
|
||||||
</label>
|
</label>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group" ng-if="currentRepo.FileVersioningSelector=='simple'" ng-class="{'has-error': repoEditor.simpleKeep.$invalid && repoEditor.simpleKeep.$dirty}">
|
<div class="form-group" ng-if="currentFolder.FileVersioningSelector=='simple'" ng-class="{'has-error': folderEditor.simpleKeep.$invalid && folderEditor.simpleKeep.$dirty}">
|
||||||
<p translate class="help-block">Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.</p>
|
<p translate class="help-block">Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.</p>
|
||||||
<label translate for="simpleKeep">Keep Versions</label>
|
<label translate for="simpleKeep">Keep Versions</label>
|
||||||
<input name="simpleKeep" id="simpleKeep" class="form-control" type="number" ng-model="currentRepo.simpleKeep" required min="1"></input>
|
<input name="simpleKeep" id="simpleKeep" class="form-control" type="number" ng-model="currentFolder.simpleKeep" required min="1"></input>
|
||||||
<p class="help-block">
|
<p class="help-block">
|
||||||
<span translate ng-if="repoEditor.simpleKeep.$valid || repoEditor.simpleKeep.$pristine">The number of old versions to keep, per file.</span>
|
<span translate ng-if="folderEditor.simpleKeep.$valid || folderEditor.simpleKeep.$pristine">The number of old versions to keep, per file.</span>
|
||||||
<span translate ng-if="repoEditor.simpleKeep.$error.required && repoEditor.simpleKeep.$dirty">The number of versions must be a number and cannot be blank.</span>
|
<span translate ng-if="folderEditor.simpleKeep.$error.required && folderEditor.simpleKeep.$dirty">The number of versions must be a number and cannot be blank.</span>
|
||||||
<span translate ng-if="repoEditor.simpleKeep.$error.min && repoEditor.simpleKeep.$dirty">You must keep at least one version.</span>
|
<span translate ng-if="folderEditor.simpleKeep.$error.min && folderEditor.simpleKeep.$dirty">You must keep at least one version.</span>
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group" ng-if="currentRepo.FileVersioningSelector=='staggered'" ng-class="{'has-error': repoEditor.staggeredMaxAge.$invalid && repoEditor.staggeredMaxAge.$dirty}">
|
<div class="form-group" ng-if="currentFolder.FileVersioningSelector=='staggered'" ng-class="{'has-error': folderEditor.staggeredMaxAge.$invalid && folderEditor.staggeredMaxAge.$dirty}">
|
||||||
<p class="help-block"><span translate>Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.</span> <span translate>Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.</span></p>
|
<p class="help-block"><span translate>Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.</span> <span translate>Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.</span></p>
|
||||||
<p translate class="help-block">The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.</p>
|
<p translate class="help-block">The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.</p>
|
||||||
<label translate for="staggeredMaxAge">Maximum Age</label>
|
<label translate for="staggeredMaxAge">Maximum Age</label>
|
||||||
<input name="staggeredMaxAge" id="staggeredMaxAge" class="form-control" type="number" ng-model="currentRepo.staggeredMaxAge" required></input>
|
<input name="staggeredMaxAge" id="staggeredMaxAge" class="form-control" type="number" ng-model="currentFolder.staggeredMaxAge" required></input>
|
||||||
<p class="help-block">
|
<p class="help-block">
|
||||||
<span translate ng-if="repoEditor.staggeredMaxAge.$valid || repoEditor.staggeredMaxAge.$pristine">The maximum time to keep a version (in days, set to 0 to keep versions forever).</span>
|
<span translate ng-if="folderEditor.staggeredMaxAge.$valid || folderEditor.staggeredMaxAge.$pristine">The maximum time to keep a version (in days, set to 0 to keep versions forever).</span>
|
||||||
<span translate ng-if="repoEditor.staggeredMaxAge.$error.required && repoEditor.staggeredMaxAge.$dirty">The maximum age must be a number and cannot be blank.</span>
|
<span translate ng-if="folderEditor.staggeredMaxAge.$error.required && folderEditor.staggeredMaxAge.$dirty">The maximum age must be a number and cannot be blank.</span>
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group" ng-if="currentRepo.FileVersioningSelector == 'staggered'">
|
<div class="form-group" ng-if="currentFolder.FileVersioningSelector == 'staggered'">
|
||||||
<label translate for="staggeredVersionsPath">Versions Path</label>
|
<label translate for="staggeredVersionsPath">Versions Path</label>
|
||||||
<input name="staggeredVersionsPath" id="staggeredVersionsPath" class="form-control" type="text" ng-model="currentRepo.staggeredVersionsPath"></input>
|
<input name="staggeredVersionsPath" id="staggeredVersionsPath" class="form-control" type="text" ng-model="currentFolder.staggeredVersionsPath"></input>
|
||||||
<p translate class="help-block">Path where versions should be stored (leave empty for the default .stversions folder in the repository).</p>
|
<p translate class="help-block">Path where versions should be stored (leave empty for the default .stversions folder in the folder).</p>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</form>
|
</form>
|
||||||
<div translate ng-show="!editingExisting">When adding a new repository, keep in mind that the Repository ID is used to tie repositories together between nodes. They are case sensitive and must match exactly between all nodes.</div>
|
<div translate ng-show="!editingExisting">When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="modal-footer">
|
<div class="modal-footer">
|
||||||
<button type="button" class="btn btn-primary btn-sm" ng-click="saveRepo()" ng-disabled="repoEditor.$invalid"><span class="glyphicon glyphicon-ok"></span> <span translate>Save</span></button>
|
<button type="button" class="btn btn-primary btn-sm" ng-click="saveFolder()" ng-disabled="folderEditor.$invalid"><span class="glyphicon glyphicon-ok"></span> <span translate>Save</span></button>
|
||||||
<button type="button" class="btn btn-default btn-sm" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span> <span translate>Close</span></button>
|
<button type="button" class="btn btn-default btn-sm" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span> <span translate>Close</span></button>
|
||||||
<button ng-if="editingExisting" type="button" class="btn btn-danger pull-left btn-sm" ng-click="deleteRepo()"><span class="glyphicon glyphicon-minus"></span> <span translate>Delete</span></button>
|
<button ng-if="editingExisting" type="button" class="btn btn-danger pull-left btn-sm" ng-click="deleteFolder()"><span class="glyphicon glyphicon-minus"></span> <span translate>Delete</span></button>
|
||||||
<button id="editIgnoresButton" ng-if="editingExisting" type="button" class="btn btn-default pull-left btn-sm" ng-click="editIgnores()"><span class="glyphicon glyphicon-eye-close"></span> <span translate>Ignore Patterns</span></button>
|
<button id="editIgnoresButton" ng-if="editingExisting" type="button" class="btn btn-default pull-left btn-sm" ng-click="editIgnores()"><span class="glyphicon glyphicon-eye-close"></span> <span translate>Ignore Patterns</span></button>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@ -569,7 +569,7 @@
|
|||||||
</dl>
|
</dl>
|
||||||
</div>
|
</div>
|
||||||
<div class="modal-footer">
|
<div class="modal-footer">
|
||||||
<div class="pull-left"><span translate>Editing</span> <code>{{currentRepo.Directory}}/.stignore</code></div>
|
<div class="pull-left"><span translate>Editing</span> <code>{{currentFolder.Directory}}/.stignore</code></div>
|
||||||
<button type="button" class="btn btn-primary btn-sm" data-dismiss="modal" ng-click="saveIgnores()"><span class="glyphicon glyphicon-ok"></span> <span translate>Save</span></button>
|
<button type="button" class="btn btn-primary btn-sm" data-dismiss="modal" ng-click="saveIgnores()"><span class="glyphicon glyphicon-ok"></span> <span translate>Save</span></button>
|
||||||
<button type="button" class="btn btn-default btn-sm" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span> <span translate>Close</span></button>
|
<button type="button" class="btn btn-default btn-sm" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span> <span translate>Close</span></button>
|
||||||
</div>
|
</div>
|
||||||
@ -591,8 +591,8 @@
|
|||||||
|
|
||||||
<div class="col-md-6">
|
<div class="col-md-6">
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label translate for="NodeName">Node Name</label>
|
<label translate for="DeviceName">Device Name</label>
|
||||||
<input id="NodeName" class="form-control" type="text" ng-model="tmpOptions.NodeName">
|
<input id="DeviceName" class="form-control" type="text" ng-model="tmpOptions.DeviceName">
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label translate for="ListenStr">Sync Protocol Listen Addresses</label>
|
<label translate for="ListenStr">Sync Protocol Listen Addresses</label>
|
||||||
@ -698,7 +698,7 @@
|
|||||||
<h4 translate class="modal-title">Allow Anonymous Usage Reporting?</h4>
|
<h4 translate class="modal-title">Allow Anonymous Usage Reporting?</h4>
|
||||||
</div>
|
</div>
|
||||||
<div class="modal-body">
|
<div class="modal-body">
|
||||||
<p translate>The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.</p>
|
<p translate>The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.</p>
|
||||||
<p translate translate-value-url="https://data.syncthing.net">The aggregated statistics are publicly available at {%url%}.</p>
|
<p translate translate-value-url="https://data.syncthing.net">The aggregated statistics are publicly available at {%url%}.</p>
|
||||||
<button translate type="button" class="btn btn-default btn-sm" ng-show="!reportPreview" ng-click="showReportPreview()">Preview Usage Report</button>
|
<button translate type="button" class="btn btn-default btn-sm" ng-show="!reportPreview" ng-click="showReportPreview()">Preview Usage Report</button>
|
||||||
<pre ng-if="reportPreview"><small>{{reportData | json}}</small></pre>
|
<pre ng-if="reportPreview"><small>{{reportData | json}}</small></pre>
|
||||||
@ -720,7 +720,7 @@
|
|||||||
<h4 translate class="modal-title">Anonymous Usage Reporting</h4>
|
<h4 translate class="modal-title">Anonymous Usage Reporting</h4>
|
||||||
</div>
|
</div>
|
||||||
<div class="modal-body">
|
<div class="modal-body">
|
||||||
<p translate>The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.</p>
|
<p translate>The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.</p>
|
||||||
<p translate translate-value-url="https://data.syncthing.net">The aggregated statistics are publicly available at {%url%}.</p>
|
<p translate translate-value-url="https://data.syncthing.net">The aggregated statistics are publicly available at {%url%}.</p>
|
||||||
<pre><small>{{reportData | json}}</small></pre>
|
<pre><small>{{reportData | json}}</small></pre>
|
||||||
</div>
|
</div>
|
||||||
|
@ -25,24 +25,24 @@ var l = logger.DefaultLogger
|
|||||||
type Configuration struct {
|
type Configuration struct {
|
||||||
Location string `xml:"-" json:"-"`
|
Location string `xml:"-" json:"-"`
|
||||||
Version int `xml:"version,attr" default:"3"`
|
Version int `xml:"version,attr" default:"3"`
|
||||||
Repositories []RepositoryConfiguration `xml:"repository"`
|
Folders []FolderConfiguration `xml:"folder"`
|
||||||
Nodes []NodeConfiguration `xml:"node"`
|
Devices []DeviceConfiguration `xml:"device"`
|
||||||
GUI GUIConfiguration `xml:"gui"`
|
GUI GUIConfiguration `xml:"gui"`
|
||||||
Options OptionsConfiguration `xml:"options"`
|
Options OptionsConfiguration `xml:"options"`
|
||||||
XMLName xml.Name `xml:"configuration" json:"-"`
|
XMLName xml.Name `xml:"configuration" json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RepositoryConfiguration struct {
|
type FolderConfiguration struct {
|
||||||
ID string `xml:"id,attr"`
|
ID string `xml:"id,attr"`
|
||||||
Directory string `xml:"directory,attr"`
|
Directory string `xml:"directory,attr"`
|
||||||
Nodes []RepositoryNodeConfiguration `xml:"node"`
|
Devices []FolderDeviceConfiguration `xml:"device"`
|
||||||
ReadOnly bool `xml:"ro,attr"`
|
ReadOnly bool `xml:"ro,attr"`
|
||||||
RescanIntervalS int `xml:"rescanIntervalS,attr" default:"60"`
|
RescanIntervalS int `xml:"rescanIntervalS,attr" default:"60"`
|
||||||
IgnorePerms bool `xml:"ignorePerms,attr"`
|
IgnorePerms bool `xml:"ignorePerms,attr"`
|
||||||
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
|
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
|
||||||
Versioning VersioningConfiguration `xml:"versioning"`
|
Versioning VersioningConfiguration `xml:"versioning"`
|
||||||
|
|
||||||
nodeIDs []protocol.NodeID
|
deviceIDs []protocol.DeviceID
|
||||||
}
|
}
|
||||||
|
|
||||||
type VersioningConfiguration struct {
|
type VersioningConfiguration struct {
|
||||||
@ -86,17 +86,17 @@ func (c *VersioningConfiguration) UnmarshalXML(d *xml.Decoder, start xml.StartEl
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RepositoryConfiguration) NodeIDs() []protocol.NodeID {
|
func (r *FolderConfiguration) DeviceIDs() []protocol.DeviceID {
|
||||||
if r.nodeIDs == nil {
|
if r.deviceIDs == nil {
|
||||||
for _, n := range r.Nodes {
|
for _, n := range r.Devices {
|
||||||
r.nodeIDs = append(r.nodeIDs, n.NodeID)
|
r.deviceIDs = append(r.deviceIDs, n.DeviceID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return r.nodeIDs
|
return r.deviceIDs
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodeConfiguration struct {
|
type DeviceConfiguration struct {
|
||||||
NodeID protocol.NodeID `xml:"id,attr"`
|
DeviceID protocol.DeviceID `xml:"id,attr"`
|
||||||
Name string `xml:"name,attr,omitempty"`
|
Name string `xml:"name,attr,omitempty"`
|
||||||
Addresses []string `xml:"address,omitempty"`
|
Addresses []string `xml:"address,omitempty"`
|
||||||
Compression bool `xml:"compression,attr"`
|
Compression bool `xml:"compression,attr"`
|
||||||
@ -104,8 +104,8 @@ type NodeConfiguration struct {
|
|||||||
Introducer bool `xml:"introducer,attr"`
|
Introducer bool `xml:"introducer,attr"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RepositoryNodeConfiguration struct {
|
type FolderDeviceConfiguration struct {
|
||||||
NodeID protocol.NodeID `xml:"id,attr"`
|
DeviceID protocol.DeviceID `xml:"id,attr"`
|
||||||
|
|
||||||
Deprecated_Name string `xml:"name,attr,omitempty" json:"-"`
|
Deprecated_Name string `xml:"name,attr,omitempty" json:"-"`
|
||||||
Deprecated_Addresses []string `xml:"address,omitempty" json:"-"`
|
Deprecated_Addresses []string `xml:"address,omitempty" json:"-"`
|
||||||
@ -145,35 +145,35 @@ type GUIConfiguration struct {
|
|||||||
APIKey string `xml:"apikey,omitempty"`
|
APIKey string `xml:"apikey,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *Configuration) NodeMap() map[protocol.NodeID]NodeConfiguration {
|
func (cfg *Configuration) DeviceMap() map[protocol.DeviceID]DeviceConfiguration {
|
||||||
m := make(map[protocol.NodeID]NodeConfiguration, len(cfg.Nodes))
|
m := make(map[protocol.DeviceID]DeviceConfiguration, len(cfg.Devices))
|
||||||
for _, n := range cfg.Nodes {
|
for _, n := range cfg.Devices {
|
||||||
m[n.NodeID] = n
|
m[n.DeviceID] = n
|
||||||
}
|
}
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *Configuration) GetNodeConfiguration(nodeID protocol.NodeID) *NodeConfiguration {
|
func (cfg *Configuration) GetDeviceConfiguration(deviceID protocol.DeviceID) *DeviceConfiguration {
|
||||||
for i, node := range cfg.Nodes {
|
for i, device := range cfg.Devices {
|
||||||
if node.NodeID == nodeID {
|
if device.DeviceID == deviceID {
|
||||||
return &cfg.Nodes[i]
|
return &cfg.Devices[i]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *Configuration) GetRepoConfiguration(repoID string) *RepositoryConfiguration {
|
func (cfg *Configuration) GetFolderConfiguration(folderID string) *FolderConfiguration {
|
||||||
for i, repo := range cfg.Repositories {
|
for i, folder := range cfg.Folders {
|
||||||
if repo.ID == repoID {
|
if folder.ID == folderID {
|
||||||
return &cfg.Repositories[i]
|
return &cfg.Folders[i]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *Configuration) RepoMap() map[string]RepositoryConfiguration {
|
func (cfg *Configuration) FolderMap() map[string]FolderConfiguration {
|
||||||
m := make(map[string]RepositoryConfiguration, len(cfg.Repositories))
|
m := make(map[string]FolderConfiguration, len(cfg.Folders))
|
||||||
for _, r := range cfg.Repositories {
|
for _, r := range cfg.Folders {
|
||||||
m[r.ID] = r
|
m[r.ID] = r
|
||||||
}
|
}
|
||||||
return m
|
return m
|
||||||
@ -290,44 +290,44 @@ func uniqueStrings(ss []string) []string {
|
|||||||
return us
|
return us
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *Configuration) prepare(myID protocol.NodeID) {
|
func (cfg *Configuration) prepare(myID protocol.DeviceID) {
|
||||||
fillNilSlices(&cfg.Options)
|
fillNilSlices(&cfg.Options)
|
||||||
|
|
||||||
cfg.Options.ListenAddress = uniqueStrings(cfg.Options.ListenAddress)
|
cfg.Options.ListenAddress = uniqueStrings(cfg.Options.ListenAddress)
|
||||||
|
|
||||||
// Initialize an empty slice for repositories if the config has none
|
// Initialize an empty slice for folders if the config has none
|
||||||
if cfg.Repositories == nil {
|
if cfg.Folders == nil {
|
||||||
cfg.Repositories = []RepositoryConfiguration{}
|
cfg.Folders = []FolderConfiguration{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for missing, bad or duplicate repository ID:s
|
// Check for missing, bad or duplicate folder ID:s
|
||||||
var seenRepos = map[string]*RepositoryConfiguration{}
|
var seenFolders = map[string]*FolderConfiguration{}
|
||||||
var uniqueCounter int
|
var uniqueCounter int
|
||||||
for i := range cfg.Repositories {
|
for i := range cfg.Folders {
|
||||||
repo := &cfg.Repositories[i]
|
folder := &cfg.Folders[i]
|
||||||
|
|
||||||
if len(repo.Directory) == 0 {
|
if len(folder.Directory) == 0 {
|
||||||
repo.Invalid = "no directory configured"
|
folder.Invalid = "no directory configured"
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if repo.ID == "" {
|
if folder.ID == "" {
|
||||||
repo.ID = "default"
|
folder.ID = "default"
|
||||||
}
|
}
|
||||||
|
|
||||||
if seen, ok := seenRepos[repo.ID]; ok {
|
if seen, ok := seenFolders[folder.ID]; ok {
|
||||||
l.Warnf("Multiple repositories with ID %q; disabling", repo.ID)
|
l.Warnf("Multiple folders with ID %q; disabling", folder.ID)
|
||||||
|
|
||||||
seen.Invalid = "duplicate repository ID"
|
seen.Invalid = "duplicate folder ID"
|
||||||
if seen.ID == repo.ID {
|
if seen.ID == folder.ID {
|
||||||
uniqueCounter++
|
uniqueCounter++
|
||||||
seen.ID = fmt.Sprintf("%s~%d", repo.ID, uniqueCounter)
|
seen.ID = fmt.Sprintf("%s~%d", folder.ID, uniqueCounter)
|
||||||
}
|
}
|
||||||
repo.Invalid = "duplicate repository ID"
|
folder.Invalid = "duplicate folder ID"
|
||||||
uniqueCounter++
|
uniqueCounter++
|
||||||
repo.ID = fmt.Sprintf("%s~%d", repo.ID, uniqueCounter)
|
folder.ID = fmt.Sprintf("%s~%d", folder.ID, uniqueCounter)
|
||||||
} else {
|
} else {
|
||||||
seenRepos[repo.ID] = repo
|
seenFolders[folder.ID] = folder
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -362,42 +362,42 @@ func (cfg *Configuration) prepare(myID protocol.NodeID) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build a list of available nodes
|
// Build a list of available devices
|
||||||
existingNodes := make(map[protocol.NodeID]bool)
|
existingDevices := make(map[protocol.DeviceID]bool)
|
||||||
existingNodes[myID] = true
|
existingDevices[myID] = true
|
||||||
for _, node := range cfg.Nodes {
|
for _, device := range cfg.Devices {
|
||||||
existingNodes[node.NodeID] = true
|
existingDevices[device.DeviceID] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure this node is present in all relevant places
|
// Ensure this device is present in all relevant places
|
||||||
me := cfg.GetNodeConfiguration(myID)
|
me := cfg.GetDeviceConfiguration(myID)
|
||||||
if me == nil {
|
if me == nil {
|
||||||
myName, _ := os.Hostname()
|
myName, _ := os.Hostname()
|
||||||
cfg.Nodes = append(cfg.Nodes, NodeConfiguration{
|
cfg.Devices = append(cfg.Devices, DeviceConfiguration{
|
||||||
NodeID: myID,
|
DeviceID: myID,
|
||||||
Name: myName,
|
Name: myName,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
sort.Sort(NodeConfigurationList(cfg.Nodes))
|
sort.Sort(DeviceConfigurationList(cfg.Devices))
|
||||||
// Ensure that any loose nodes are not present in the wrong places
|
// Ensure that any loose devices are not present in the wrong places
|
||||||
// Ensure that there are no duplicate nodes
|
// Ensure that there are no duplicate devices
|
||||||
for i := range cfg.Repositories {
|
for i := range cfg.Folders {
|
||||||
cfg.Repositories[i].Nodes = ensureNodePresent(cfg.Repositories[i].Nodes, myID)
|
cfg.Folders[i].Devices = ensureDevicePresent(cfg.Folders[i].Devices, myID)
|
||||||
cfg.Repositories[i].Nodes = ensureExistingNodes(cfg.Repositories[i].Nodes, existingNodes)
|
cfg.Folders[i].Devices = ensureExistingDevices(cfg.Folders[i].Devices, existingDevices)
|
||||||
cfg.Repositories[i].Nodes = ensureNoDuplicates(cfg.Repositories[i].Nodes)
|
cfg.Folders[i].Devices = ensureNoDuplicates(cfg.Folders[i].Devices)
|
||||||
sort.Sort(RepositoryNodeConfigurationList(cfg.Repositories[i].Nodes))
|
sort.Sort(FolderDeviceConfigurationList(cfg.Folders[i].Devices))
|
||||||
}
|
}
|
||||||
|
|
||||||
// An empty address list is equivalent to a single "dynamic" entry
|
// An empty address list is equivalent to a single "dynamic" entry
|
||||||
for i := range cfg.Nodes {
|
for i := range cfg.Devices {
|
||||||
n := &cfg.Nodes[i]
|
n := &cfg.Devices[i]
|
||||||
if len(n.Addresses) == 0 || len(n.Addresses) == 1 && n.Addresses[0] == "" {
|
if len(n.Addresses) == 0 || len(n.Addresses) == 1 && n.Addresses[0] == "" {
|
||||||
n.Addresses = []string{"dynamic"}
|
n.Addresses = []string{"dynamic"}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(location string, myID protocol.NodeID) Configuration {
|
func New(location string, myID protocol.DeviceID) Configuration {
|
||||||
var cfg Configuration
|
var cfg Configuration
|
||||||
|
|
||||||
cfg.Location = location
|
cfg.Location = location
|
||||||
@ -411,7 +411,7 @@ func New(location string, myID protocol.NodeID) Configuration {
|
|||||||
return cfg
|
return cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
func Load(location string, myID protocol.NodeID) (Configuration, error) {
|
func Load(location string, myID protocol.DeviceID) (Configuration, error) {
|
||||||
var cfg Configuration
|
var cfg Configuration
|
||||||
|
|
||||||
cfg.Location = location
|
cfg.Location = location
|
||||||
@ -435,24 +435,24 @@ func Load(location string, myID protocol.NodeID) (Configuration, error) {
|
|||||||
// ChangeRequiresRestart returns true if updating the configuration requires a
|
// ChangeRequiresRestart returns true if updating the configuration requires a
|
||||||
// complete restart.
|
// complete restart.
|
||||||
func ChangeRequiresRestart(from, to Configuration) bool {
|
func ChangeRequiresRestart(from, to Configuration) bool {
|
||||||
// Adding, removing or changing repos requires restart
|
// Adding, removing or changing folders requires restart
|
||||||
if len(from.Repositories) != len(to.Repositories) {
|
if len(from.Folders) != len(to.Folders) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
fromRepos := from.RepoMap()
|
fromFolders := from.FolderMap()
|
||||||
toRepos := to.RepoMap()
|
toFolders := to.FolderMap()
|
||||||
for id := range fromRepos {
|
for id := range fromFolders {
|
||||||
if !reflect.DeepEqual(fromRepos[id], toRepos[id]) {
|
if !reflect.DeepEqual(fromFolders[id], toFolders[id]) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Removing a node requires a restart. Adding one does not. Changing
|
// Removing a device requires a restart. Adding one does not. Changing
|
||||||
// address or name does not.
|
// address or name does not.
|
||||||
fromNodes := from.NodeMap()
|
fromDevices := from.DeviceMap()
|
||||||
toNodes := to.NodeMap()
|
toDevices := to.DeviceMap()
|
||||||
for nodeID := range fromNodes {
|
for deviceID := range fromDevices {
|
||||||
if _, ok := toNodes[nodeID]; !ok {
|
if _, ok := toDevices[deviceID]; !ok {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -466,22 +466,22 @@ func ChangeRequiresRestart(from, to Configuration) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func convertV3V4(cfg *Configuration) {
|
func convertV3V4(cfg *Configuration) {
|
||||||
// In previous versions, rescan interval was common for each repository.
|
// In previous versions, rescan interval was common for each folder.
|
||||||
// From now, it can be set independently. We have to make sure, that after upgrade
|
// From now, it can be set independently. We have to make sure, that after upgrade
|
||||||
// the individual rescan interval will be defined for every existing repository.
|
// the individual rescan interval will be defined for every existing folder.
|
||||||
for i := range cfg.Repositories {
|
for i := range cfg.Folders {
|
||||||
cfg.Repositories[i].RescanIntervalS = cfg.Options.Deprecated_RescanIntervalS
|
cfg.Folders[i].RescanIntervalS = cfg.Options.Deprecated_RescanIntervalS
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.Options.Deprecated_RescanIntervalS = 0
|
cfg.Options.Deprecated_RescanIntervalS = 0
|
||||||
|
|
||||||
// In previous versions, repositories held full node configurations.
|
// In previous versions, folders held full device configurations.
|
||||||
// Since that's the only place where node configs were in V1, we still have
|
// Since that's the only place where device configs were in V1, we still have
|
||||||
// to define the deprecated fields to be able to upgrade from V1 to V4.
|
// to define the deprecated fields to be able to upgrade from V1 to V4.
|
||||||
for i, repo := range cfg.Repositories {
|
for i, folder := range cfg.Folders {
|
||||||
|
|
||||||
for j := range repo.Nodes {
|
for j := range folder.Devices {
|
||||||
rncfg := cfg.Repositories[i].Nodes[j]
|
rncfg := cfg.Folders[i].Devices[j]
|
||||||
rncfg.Deprecated_Name = ""
|
rncfg.Deprecated_Name = ""
|
||||||
rncfg.Deprecated_Addresses = nil
|
rncfg.Deprecated_Addresses = nil
|
||||||
}
|
}
|
||||||
@ -492,10 +492,10 @@ func convertV3V4(cfg *Configuration) {
|
|||||||
|
|
||||||
func convertV2V3(cfg *Configuration) {
|
func convertV2V3(cfg *Configuration) {
|
||||||
// In previous versions, compression was always on. When upgrading, enable
|
// In previous versions, compression was always on. When upgrading, enable
|
||||||
// compression on all existing new. New nodes will get compression on by
|
// compression on all existing new. New devices will get compression on by
|
||||||
// default by the GUI.
|
// default by the GUI.
|
||||||
for i := range cfg.Nodes {
|
for i := range cfg.Devices {
|
||||||
cfg.Nodes[i].Compression = true
|
cfg.Devices[i].Compression = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// The global discovery format and port number changed in v0.9. Having the
|
// The global discovery format and port number changed in v0.9. Having the
|
||||||
@ -508,31 +508,31 @@ func convertV2V3(cfg *Configuration) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func convertV1V2(cfg *Configuration) {
|
func convertV1V2(cfg *Configuration) {
|
||||||
// Collect the list of nodes.
|
// Collect the list of devices.
|
||||||
// Replace node configs inside repositories with only a reference to the nide ID.
|
// Replace device configs inside folders with only a reference to the nide ID.
|
||||||
// Set all repositories to read only if the global read only flag is set.
|
// Set all folders to read only if the global read only flag is set.
|
||||||
var nodes = map[string]RepositoryNodeConfiguration{}
|
var devices = map[string]FolderDeviceConfiguration{}
|
||||||
for i, repo := range cfg.Repositories {
|
for i, folder := range cfg.Folders {
|
||||||
cfg.Repositories[i].ReadOnly = cfg.Options.Deprecated_ReadOnly
|
cfg.Folders[i].ReadOnly = cfg.Options.Deprecated_ReadOnly
|
||||||
for j, node := range repo.Nodes {
|
for j, device := range folder.Devices {
|
||||||
id := node.NodeID.String()
|
id := device.DeviceID.String()
|
||||||
if _, ok := nodes[id]; !ok {
|
if _, ok := devices[id]; !ok {
|
||||||
nodes[id] = node
|
devices[id] = device
|
||||||
}
|
}
|
||||||
cfg.Repositories[i].Nodes[j] = RepositoryNodeConfiguration{NodeID: node.NodeID}
|
cfg.Folders[i].Devices[j] = FolderDeviceConfiguration{DeviceID: device.DeviceID}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cfg.Options.Deprecated_ReadOnly = false
|
cfg.Options.Deprecated_ReadOnly = false
|
||||||
|
|
||||||
// Set and sort the list of nodes.
|
// Set and sort the list of devices.
|
||||||
for _, node := range nodes {
|
for _, device := range devices {
|
||||||
cfg.Nodes = append(cfg.Nodes, NodeConfiguration{
|
cfg.Devices = append(cfg.Devices, DeviceConfiguration{
|
||||||
NodeID: node.NodeID,
|
DeviceID: device.DeviceID,
|
||||||
Name: node.Deprecated_Name,
|
Name: device.Deprecated_Name,
|
||||||
Addresses: node.Deprecated_Addresses,
|
Addresses: device.Deprecated_Addresses,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
sort.Sort(NodeConfigurationList(cfg.Nodes))
|
sort.Sort(DeviceConfigurationList(cfg.Devices))
|
||||||
|
|
||||||
// GUI
|
// GUI
|
||||||
cfg.GUI.Address = cfg.Options.Deprecated_GUIAddress
|
cfg.GUI.Address = cfg.Options.Deprecated_GUIAddress
|
||||||
@ -543,73 +543,73 @@ func convertV1V2(cfg *Configuration) {
|
|||||||
cfg.Version = 2
|
cfg.Version = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodeConfigurationList []NodeConfiguration
|
type DeviceConfigurationList []DeviceConfiguration
|
||||||
|
|
||||||
func (l NodeConfigurationList) Less(a, b int) bool {
|
func (l DeviceConfigurationList) Less(a, b int) bool {
|
||||||
return l[a].NodeID.Compare(l[b].NodeID) == -1
|
return l[a].DeviceID.Compare(l[b].DeviceID) == -1
|
||||||
}
|
}
|
||||||
func (l NodeConfigurationList) Swap(a, b int) {
|
func (l DeviceConfigurationList) Swap(a, b int) {
|
||||||
l[a], l[b] = l[b], l[a]
|
l[a], l[b] = l[b], l[a]
|
||||||
}
|
}
|
||||||
func (l NodeConfigurationList) Len() int {
|
func (l DeviceConfigurationList) Len() int {
|
||||||
return len(l)
|
return len(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
type RepositoryNodeConfigurationList []RepositoryNodeConfiguration
|
type FolderDeviceConfigurationList []FolderDeviceConfiguration
|
||||||
|
|
||||||
func (l RepositoryNodeConfigurationList) Less(a, b int) bool {
|
func (l FolderDeviceConfigurationList) Less(a, b int) bool {
|
||||||
return l[a].NodeID.Compare(l[b].NodeID) == -1
|
return l[a].DeviceID.Compare(l[b].DeviceID) == -1
|
||||||
}
|
}
|
||||||
func (l RepositoryNodeConfigurationList) Swap(a, b int) {
|
func (l FolderDeviceConfigurationList) Swap(a, b int) {
|
||||||
l[a], l[b] = l[b], l[a]
|
l[a], l[b] = l[b], l[a]
|
||||||
}
|
}
|
||||||
func (l RepositoryNodeConfigurationList) Len() int {
|
func (l FolderDeviceConfigurationList) Len() int {
|
||||||
return len(l)
|
return len(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ensureNodePresent(nodes []RepositoryNodeConfiguration, myID protocol.NodeID) []RepositoryNodeConfiguration {
|
func ensureDevicePresent(devices []FolderDeviceConfiguration, myID protocol.DeviceID) []FolderDeviceConfiguration {
|
||||||
for _, node := range nodes {
|
for _, device := range devices {
|
||||||
if node.NodeID.Equals(myID) {
|
if device.DeviceID.Equals(myID) {
|
||||||
return nodes
|
return devices
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes = append(nodes, RepositoryNodeConfiguration{
|
devices = append(devices, FolderDeviceConfiguration{
|
||||||
NodeID: myID,
|
DeviceID: myID,
|
||||||
})
|
})
|
||||||
|
|
||||||
return nodes
|
return devices
|
||||||
}
|
}
|
||||||
|
|
||||||
func ensureExistingNodes(nodes []RepositoryNodeConfiguration, existingNodes map[protocol.NodeID]bool) []RepositoryNodeConfiguration {
|
func ensureExistingDevices(devices []FolderDeviceConfiguration, existingDevices map[protocol.DeviceID]bool) []FolderDeviceConfiguration {
|
||||||
count := len(nodes)
|
count := len(devices)
|
||||||
i := 0
|
i := 0
|
||||||
loop:
|
loop:
|
||||||
for i < count {
|
for i < count {
|
||||||
if _, ok := existingNodes[nodes[i].NodeID]; !ok {
|
if _, ok := existingDevices[devices[i].DeviceID]; !ok {
|
||||||
nodes[i] = nodes[count-1]
|
devices[i] = devices[count-1]
|
||||||
count--
|
count--
|
||||||
continue loop
|
continue loop
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
return nodes[0:count]
|
return devices[0:count]
|
||||||
}
|
}
|
||||||
|
|
||||||
func ensureNoDuplicates(nodes []RepositoryNodeConfiguration) []RepositoryNodeConfiguration {
|
func ensureNoDuplicates(devices []FolderDeviceConfiguration) []FolderDeviceConfiguration {
|
||||||
count := len(nodes)
|
count := len(devices)
|
||||||
i := 0
|
i := 0
|
||||||
seenNodes := make(map[protocol.NodeID]bool)
|
seenDevices := make(map[protocol.DeviceID]bool)
|
||||||
loop:
|
loop:
|
||||||
for i < count {
|
for i < count {
|
||||||
id := nodes[i].NodeID
|
id := devices[i].DeviceID
|
||||||
if _, ok := seenNodes[id]; ok {
|
if _, ok := seenDevices[id]; ok {
|
||||||
nodes[i] = nodes[count-1]
|
devices[i] = devices[count-1]
|
||||||
count--
|
count--
|
||||||
continue loop
|
continue loop
|
||||||
}
|
}
|
||||||
seenNodes[id] = true
|
seenDevices[id] = true
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
return nodes[0:count]
|
return devices[0:count]
|
||||||
}
|
}
|
||||||
|
@ -12,13 +12,13 @@ import (
|
|||||||
"github.com/syncthing/syncthing/internal/protocol"
|
"github.com/syncthing/syncthing/internal/protocol"
|
||||||
)
|
)
|
||||||
|
|
||||||
var node1, node2, node3, node4 protocol.NodeID
|
var device1, device2, device3, device4 protocol.DeviceID
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
node1, _ = protocol.NodeIDFromString("AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ")
|
device1, _ = protocol.DeviceIDFromString("AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ")
|
||||||
node2, _ = protocol.NodeIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
|
device2, _ = protocol.DeviceIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
|
||||||
node3, _ = protocol.NodeIDFromString("LGFPDIT-7SKNNJL-VJZA4FC-7QNCRKA-CE753K7-2BW5QDK-2FOZ7FR-FEP57QJ")
|
device3, _ = protocol.DeviceIDFromString("LGFPDIT-7SKNNJL-VJZA4FC-7QNCRKA-CE753K7-2BW5QDK-2FOZ7FR-FEP57QJ")
|
||||||
node4, _ = protocol.NodeIDFromString("P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2")
|
device4, _ = protocol.DeviceIDFromString("P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDefaultValues(t *testing.T) {
|
func TestDefaultValues(t *testing.T) {
|
||||||
@ -39,69 +39,69 @@ func TestDefaultValues(t *testing.T) {
|
|||||||
RestartOnWakeup: true,
|
RestartOnWakeup: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := New("test", node1)
|
cfg := New("test", device1)
|
||||||
|
|
||||||
if !reflect.DeepEqual(cfg.Options, expected) {
|
if !reflect.DeepEqual(cfg.Options, expected) {
|
||||||
t.Errorf("Default config differs;\n E: %#v\n A: %#v", expected, cfg.Options)
|
t.Errorf("Default config differs;\n E: %#v\n A: %#v", expected, cfg.Options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNodeConfig(t *testing.T) {
|
func TestDeviceConfig(t *testing.T) {
|
||||||
for i, ver := range []string{"v1", "v2", "v3", "v4"} {
|
for i, ver := range []string{"v1", "v2", "v3", "v4"} {
|
||||||
cfg, err := Load("testdata/"+ver+".xml", node1)
|
cfg, err := Load("testdata/"+ver+".xml", device1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedRepos := []RepositoryConfiguration{
|
expectedFolders := []FolderConfiguration{
|
||||||
{
|
{
|
||||||
ID: "test",
|
ID: "test",
|
||||||
Directory: "~/Sync",
|
Directory: "~/Sync",
|
||||||
Nodes: []RepositoryNodeConfiguration{{NodeID: node1}, {NodeID: node4}},
|
Devices: []FolderDeviceConfiguration{{DeviceID: device1}, {DeviceID: device4}},
|
||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
RescanIntervalS: 600,
|
RescanIntervalS: 600,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
expectedNodes := []NodeConfiguration{
|
expectedDevices := []DeviceConfiguration{
|
||||||
{
|
{
|
||||||
NodeID: node1,
|
DeviceID: device1,
|
||||||
Name: "node one",
|
Name: "device one",
|
||||||
Addresses: []string{"a"},
|
Addresses: []string{"a"},
|
||||||
Compression: true,
|
Compression: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
NodeID: node4,
|
DeviceID: device4,
|
||||||
Name: "node two",
|
Name: "device two",
|
||||||
Addresses: []string{"b"},
|
Addresses: []string{"b"},
|
||||||
Compression: true,
|
Compression: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
expectedNodeIDs := []protocol.NodeID{node1, node4}
|
expectedDeviceIDs := []protocol.DeviceID{device1, device4}
|
||||||
|
|
||||||
if cfg.Version != 4 {
|
if cfg.Version != 4 {
|
||||||
t.Errorf("%d: Incorrect version %d != 3", i, cfg.Version)
|
t.Errorf("%d: Incorrect version %d != 3", i, cfg.Version)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(cfg.Repositories, expectedRepos) {
|
if !reflect.DeepEqual(cfg.Folders, expectedFolders) {
|
||||||
t.Errorf("%d: Incorrect Repositories\n A: %#v\n E: %#v", i, cfg.Repositories, expectedRepos)
|
t.Errorf("%d: Incorrect Folders\n A: %#v\n E: %#v", i, cfg.Folders, expectedFolders)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(cfg.Nodes, expectedNodes) {
|
if !reflect.DeepEqual(cfg.Devices, expectedDevices) {
|
||||||
t.Errorf("%d: Incorrect Nodes\n A: %#v\n E: %#v", i, cfg.Nodes, expectedNodes)
|
t.Errorf("%d: Incorrect Devices\n A: %#v\n E: %#v", i, cfg.Devices, expectedDevices)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(cfg.Repositories[0].NodeIDs(), expectedNodeIDs) {
|
if !reflect.DeepEqual(cfg.Folders[0].DeviceIDs(), expectedDeviceIDs) {
|
||||||
t.Errorf("%d: Incorrect NodeIDs\n A: %#v\n E: %#v", i, cfg.Repositories[0].NodeIDs(), expectedNodeIDs)
|
t.Errorf("%d: Incorrect DeviceIDs\n A: %#v\n E: %#v", i, cfg.Folders[0].DeviceIDs(), expectedDeviceIDs)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(cfg.NodeMap()) != len(expectedNodes) {
|
if len(cfg.DeviceMap()) != len(expectedDevices) {
|
||||||
t.Errorf("Unexpected number of NodeMap() entries")
|
t.Errorf("Unexpected number of DeviceMap() entries")
|
||||||
}
|
}
|
||||||
if len(cfg.RepoMap()) != len(expectedRepos) {
|
if len(cfg.FolderMap()) != len(expectedFolders) {
|
||||||
t.Errorf("Unexpected number of RepoMap() entries")
|
t.Errorf("Unexpected number of FolderMap() entries")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoListenAddress(t *testing.T) {
|
func TestNoListenAddress(t *testing.T) {
|
||||||
cfg, err := Load("testdata/nolistenaddress.xml", node1)
|
cfg, err := Load("testdata/nolistenaddress.xml", device1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
@ -130,7 +130,7 @@ func TestOverriddenValues(t *testing.T) {
|
|||||||
RestartOnWakeup: false,
|
RestartOnWakeup: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg, err := Load("testdata/overridenvalues.xml", node1)
|
cfg, err := Load("testdata/overridenvalues.xml", device1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
@ -140,80 +140,80 @@ func TestOverriddenValues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNodeAddressesDynamic(t *testing.T) {
|
func TestDeviceAddressesDynamic(t *testing.T) {
|
||||||
name, _ := os.Hostname()
|
name, _ := os.Hostname()
|
||||||
expected := []NodeConfiguration{
|
expected := []DeviceConfiguration{
|
||||||
{
|
{
|
||||||
NodeID: node1,
|
DeviceID: device1,
|
||||||
Addresses: []string{"dynamic"},
|
Addresses: []string{"dynamic"},
|
||||||
Compression: true,
|
Compression: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
NodeID: node2,
|
DeviceID: device2,
|
||||||
Addresses: []string{"dynamic"},
|
Addresses: []string{"dynamic"},
|
||||||
Compression: true,
|
Compression: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
NodeID: node3,
|
DeviceID: device3,
|
||||||
Addresses: []string{"dynamic"},
|
Addresses: []string{"dynamic"},
|
||||||
Compression: true,
|
Compression: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
NodeID: node4,
|
DeviceID: device4,
|
||||||
Name: name, // Set when auto created
|
Name: name, // Set when auto created
|
||||||
Addresses: []string{"dynamic"},
|
Addresses: []string{"dynamic"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg, err := Load("testdata/nodeaddressesdynamic.xml", node4)
|
cfg, err := Load("testdata/deviceaddressesdynamic.xml", device4)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(cfg.Nodes, expected) {
|
if !reflect.DeepEqual(cfg.Devices, expected) {
|
||||||
t.Errorf("Nodes differ;\n E: %#v\n A: %#v", expected, cfg.Nodes)
|
t.Errorf("Devices differ;\n E: %#v\n A: %#v", expected, cfg.Devices)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNodeAddressesStatic(t *testing.T) {
|
func TestDeviceAddressesStatic(t *testing.T) {
|
||||||
name, _ := os.Hostname()
|
name, _ := os.Hostname()
|
||||||
expected := []NodeConfiguration{
|
expected := []DeviceConfiguration{
|
||||||
{
|
{
|
||||||
NodeID: node1,
|
DeviceID: device1,
|
||||||
Addresses: []string{"192.0.2.1", "192.0.2.2"},
|
Addresses: []string{"192.0.2.1", "192.0.2.2"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
NodeID: node2,
|
DeviceID: device2,
|
||||||
Addresses: []string{"192.0.2.3:6070", "[2001:db8::42]:4242"},
|
Addresses: []string{"192.0.2.3:6070", "[2001:db8::42]:4242"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
NodeID: node3,
|
DeviceID: device3,
|
||||||
Addresses: []string{"[2001:db8::44]:4444", "192.0.2.4:6090"},
|
Addresses: []string{"[2001:db8::44]:4444", "192.0.2.4:6090"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
NodeID: node4,
|
DeviceID: device4,
|
||||||
Name: name, // Set when auto created
|
Name: name, // Set when auto created
|
||||||
Addresses: []string{"dynamic"},
|
Addresses: []string{"dynamic"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg, err := Load("testdata/nodeaddressesstatic.xml", node4)
|
cfg, err := Load("testdata/deviceaddressesstatic.xml", device4)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(cfg.Nodes, expected) {
|
if !reflect.DeepEqual(cfg.Devices, expected) {
|
||||||
t.Errorf("Nodes differ;\n E: %#v\n A: %#v", expected, cfg.Nodes)
|
t.Errorf("Devices differ;\n E: %#v\n A: %#v", expected, cfg.Devices)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestVersioningConfig(t *testing.T) {
|
func TestVersioningConfig(t *testing.T) {
|
||||||
cfg, err := Load("testdata/versioningconfig.xml", node4)
|
cfg, err := Load("testdata/versioningconfig.xml", device4)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
vc := cfg.Repositories[0].Versioning
|
vc := cfg.Folders[0].Versioning
|
||||||
if vc.Type != "simple" {
|
if vc.Type != "simple" {
|
||||||
t.Errorf(`vc.Type %q != "simple"`, vc.Type)
|
t.Errorf(`vc.Type %q != "simple"`, vc.Type)
|
||||||
}
|
}
|
||||||
@ -239,7 +239,7 @@ func TestNewSaveLoad(t *testing.T) {
|
|||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := New(path, node1)
|
cfg := New(path, device1)
|
||||||
|
|
||||||
// To make the equality pass later
|
// To make the equality pass later
|
||||||
cfg.XMLName.Local = "configuration"
|
cfg.XMLName.Local = "configuration"
|
||||||
@ -256,7 +256,7 @@ func TestNewSaveLoad(t *testing.T) {
|
|||||||
t.Error(path, "does not exist")
|
t.Error(path, "does not exist")
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg2, err := Load(path, node1)
|
cfg2, err := Load(path, device1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
@ -268,7 +268,7 @@ func TestNewSaveLoad(t *testing.T) {
|
|||||||
cfg.GUI.User = "test"
|
cfg.GUI.User = "test"
|
||||||
cfg.Save()
|
cfg.Save()
|
||||||
|
|
||||||
cfg2, err = Load(path, node1)
|
cfg2, err = Load(path, device1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
@ -283,13 +283,13 @@ func TestNewSaveLoad(t *testing.T) {
|
|||||||
func TestPrepare(t *testing.T) {
|
func TestPrepare(t *testing.T) {
|
||||||
var cfg Configuration
|
var cfg Configuration
|
||||||
|
|
||||||
if cfg.Repositories != nil || cfg.Nodes != nil || cfg.Options.ListenAddress != nil {
|
if cfg.Folders != nil || cfg.Devices != nil || cfg.Options.ListenAddress != nil {
|
||||||
t.Error("Expected nil")
|
t.Error("Expected nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.prepare(node1)
|
cfg.prepare(device1)
|
||||||
|
|
||||||
if cfg.Repositories == nil || cfg.Nodes == nil || cfg.Options.ListenAddress == nil {
|
if cfg.Folders == nil || cfg.Devices == nil || cfg.Options.ListenAddress == nil {
|
||||||
t.Error("Unexpected nil")
|
t.Error("Unexpected nil")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Discoverer struct {
|
type Discoverer struct {
|
||||||
myID protocol.NodeID
|
myID protocol.DeviceID
|
||||||
listenAddrs []string
|
listenAddrs []string
|
||||||
localBcastIntv time.Duration
|
localBcastIntv time.Duration
|
||||||
globalBcastIntv time.Duration
|
globalBcastIntv time.Duration
|
||||||
@ -28,7 +28,7 @@ type Discoverer struct {
|
|||||||
cacheLifetime time.Duration
|
cacheLifetime time.Duration
|
||||||
broadcastBeacon beacon.Interface
|
broadcastBeacon beacon.Interface
|
||||||
multicastBeacon beacon.Interface
|
multicastBeacon beacon.Interface
|
||||||
registry map[protocol.NodeID][]cacheEntry
|
registry map[protocol.DeviceID][]cacheEntry
|
||||||
registryLock sync.RWMutex
|
registryLock sync.RWMutex
|
||||||
extServer string
|
extServer string
|
||||||
extPort uint16
|
extPort uint16
|
||||||
@ -49,7 +49,7 @@ var (
|
|||||||
ErrIncorrectMagic = errors.New("incorrect magic number")
|
ErrIncorrectMagic = errors.New("incorrect magic number")
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewDiscoverer(id protocol.NodeID, addresses []string) *Discoverer {
|
func NewDiscoverer(id protocol.DeviceID, addresses []string) *Discoverer {
|
||||||
return &Discoverer{
|
return &Discoverer{
|
||||||
myID: id,
|
myID: id,
|
||||||
listenAddrs: addresses,
|
listenAddrs: addresses,
|
||||||
@ -57,7 +57,7 @@ func NewDiscoverer(id protocol.NodeID, addresses []string) *Discoverer {
|
|||||||
globalBcastIntv: 1800 * time.Second,
|
globalBcastIntv: 1800 * time.Second,
|
||||||
errorRetryIntv: 60 * time.Second,
|
errorRetryIntv: 60 * time.Second,
|
||||||
cacheLifetime: 5 * time.Minute,
|
cacheLifetime: 5 * time.Minute,
|
||||||
registry: make(map[protocol.NodeID][]cacheEntry),
|
registry: make(map[protocol.DeviceID][]cacheEntry),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,9 +120,9 @@ func (d *Discoverer) ExtAnnounceOK() bool {
|
|||||||
return d.extAnnounceOK
|
return d.extAnnounceOK
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discoverer) Lookup(node protocol.NodeID) []string {
|
func (d *Discoverer) Lookup(device protocol.DeviceID) []string {
|
||||||
d.registryLock.Lock()
|
d.registryLock.Lock()
|
||||||
cached := d.filterCached(d.registry[node])
|
cached := d.filterCached(d.registry[device])
|
||||||
d.registryLock.Unlock()
|
d.registryLock.Unlock()
|
||||||
|
|
||||||
if len(cached) > 0 {
|
if len(cached) > 0 {
|
||||||
@ -132,7 +132,7 @@ func (d *Discoverer) Lookup(node protocol.NodeID) []string {
|
|||||||
}
|
}
|
||||||
return addrs
|
return addrs
|
||||||
} else if len(d.extServer) != 0 {
|
} else if len(d.extServer) != 0 {
|
||||||
addrs := d.externalLookup(node)
|
addrs := d.externalLookup(device)
|
||||||
cached = make([]cacheEntry, len(addrs))
|
cached = make([]cacheEntry, len(addrs))
|
||||||
for i := range addrs {
|
for i := range addrs {
|
||||||
cached[i] = cacheEntry{
|
cached[i] = cacheEntry{
|
||||||
@ -142,32 +142,32 @@ func (d *Discoverer) Lookup(node protocol.NodeID) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
d.registryLock.Lock()
|
d.registryLock.Lock()
|
||||||
d.registry[node] = cached
|
d.registry[device] = cached
|
||||||
d.registryLock.Unlock()
|
d.registryLock.Unlock()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discoverer) Hint(node string, addrs []string) {
|
func (d *Discoverer) Hint(device string, addrs []string) {
|
||||||
resAddrs := resolveAddrs(addrs)
|
resAddrs := resolveAddrs(addrs)
|
||||||
var id protocol.NodeID
|
var id protocol.DeviceID
|
||||||
id.UnmarshalText([]byte(node))
|
id.UnmarshalText([]byte(device))
|
||||||
d.registerNode(nil, Node{
|
d.registerDevice(nil, Device{
|
||||||
Addresses: resAddrs,
|
Addresses: resAddrs,
|
||||||
ID: id[:],
|
ID: id[:],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discoverer) All() map[protocol.NodeID][]cacheEntry {
|
func (d *Discoverer) All() map[protocol.DeviceID][]cacheEntry {
|
||||||
d.registryLock.RLock()
|
d.registryLock.RLock()
|
||||||
nodes := make(map[protocol.NodeID][]cacheEntry, len(d.registry))
|
devices := make(map[protocol.DeviceID][]cacheEntry, len(d.registry))
|
||||||
for node, addrs := range d.registry {
|
for device, addrs := range d.registry {
|
||||||
addrsCopy := make([]cacheEntry, len(addrs))
|
addrsCopy := make([]cacheEntry, len(addrs))
|
||||||
copy(addrsCopy, addrs)
|
copy(addrsCopy, addrs)
|
||||||
nodes[node] = addrsCopy
|
devices[device] = addrsCopy
|
||||||
}
|
}
|
||||||
d.registryLock.RUnlock()
|
d.registryLock.RUnlock()
|
||||||
return nodes
|
return devices
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discoverer) announcementPkt() []byte {
|
func (d *Discoverer) announcementPkt() []byte {
|
||||||
@ -190,7 +190,7 @@ func (d *Discoverer) announcementPkt() []byte {
|
|||||||
}
|
}
|
||||||
var pkt = Announce{
|
var pkt = Announce{
|
||||||
Magic: AnnouncementMagic,
|
Magic: AnnouncementMagic,
|
||||||
This: Node{d.myID[:], addrs},
|
This: Device{d.myID[:], addrs},
|
||||||
}
|
}
|
||||||
return pkt.MarshalXDR()
|
return pkt.MarshalXDR()
|
||||||
}
|
}
|
||||||
@ -200,7 +200,7 @@ func (d *Discoverer) sendLocalAnnouncements() {
|
|||||||
|
|
||||||
var pkt = Announce{
|
var pkt = Announce{
|
||||||
Magic: AnnouncementMagic,
|
Magic: AnnouncementMagic,
|
||||||
This: Node{d.myID[:], addrs},
|
This: Device{d.myID[:], addrs},
|
||||||
}
|
}
|
||||||
msg := pkt.MarshalXDR()
|
msg := pkt.MarshalXDR()
|
||||||
|
|
||||||
@ -240,7 +240,7 @@ func (d *Discoverer) sendExternalAnnouncements() {
|
|||||||
if d.extPort != 0 {
|
if d.extPort != 0 {
|
||||||
var pkt = Announce{
|
var pkt = Announce{
|
||||||
Magic: AnnouncementMagic,
|
Magic: AnnouncementMagic,
|
||||||
This: Node{d.myID[:], []Address{{Port: d.extPort}}},
|
This: Device{d.myID[:], []Address{{Port: d.extPort}}},
|
||||||
}
|
}
|
||||||
buf = pkt.MarshalXDR()
|
buf = pkt.MarshalXDR()
|
||||||
} else {
|
} else {
|
||||||
@ -264,7 +264,7 @@ func (d *Discoverer) sendExternalAnnouncements() {
|
|||||||
}
|
}
|
||||||
ok = false
|
ok = false
|
||||||
} else {
|
} else {
|
||||||
// Verify that the announce server responds positively for our node ID
|
// Verify that the announce server responds positively for our device ID
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
res := d.externalLookup(d.myID)
|
res := d.externalLookup(d.myID)
|
||||||
@ -321,12 +321,12 @@ func (d *Discoverer) recvAnnouncements(b beacon.Interface) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var newNode bool
|
var newDevice bool
|
||||||
if bytes.Compare(pkt.This.ID, d.myID[:]) != 0 {
|
if bytes.Compare(pkt.This.ID, d.myID[:]) != 0 {
|
||||||
newNode = d.registerNode(addr, pkt.This)
|
newDevice = d.registerDevice(addr, pkt.This)
|
||||||
}
|
}
|
||||||
|
|
||||||
if newNode {
|
if newDevice {
|
||||||
select {
|
select {
|
||||||
case d.forcedBcastTick <- time.Now():
|
case d.forcedBcastTick <- time.Now():
|
||||||
}
|
}
|
||||||
@ -334,9 +334,9 @@ func (d *Discoverer) recvAnnouncements(b beacon.Interface) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
|
func (d *Discoverer) registerDevice(addr net.Addr, device Device) bool {
|
||||||
var id protocol.NodeID
|
var id protocol.DeviceID
|
||||||
copy(id[:], node.ID)
|
copy(id[:], device.ID)
|
||||||
|
|
||||||
d.registryLock.RLock()
|
d.registryLock.RLock()
|
||||||
current := d.filterCached(d.registry[id])
|
current := d.filterCached(d.registry[id])
|
||||||
@ -344,23 +344,23 @@ func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
|
|||||||
|
|
||||||
orig := current
|
orig := current
|
||||||
|
|
||||||
for _, a := range node.Addresses {
|
for _, a := range device.Addresses {
|
||||||
var nodeAddr string
|
var deviceAddr string
|
||||||
if len(a.IP) > 0 {
|
if len(a.IP) > 0 {
|
||||||
nodeAddr = net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
|
deviceAddr = net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
|
||||||
} else if addr != nil {
|
} else if addr != nil {
|
||||||
ua := addr.(*net.UDPAddr)
|
ua := addr.(*net.UDPAddr)
|
||||||
ua.Port = int(a.Port)
|
ua.Port = int(a.Port)
|
||||||
nodeAddr = ua.String()
|
deviceAddr = ua.String()
|
||||||
}
|
}
|
||||||
for i := range current {
|
for i := range current {
|
||||||
if current[i].addr == nodeAddr {
|
if current[i].addr == deviceAddr {
|
||||||
current[i].seen = time.Now()
|
current[i].seen = time.Now()
|
||||||
goto done
|
goto done
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
current = append(current, cacheEntry{
|
current = append(current, cacheEntry{
|
||||||
addr: nodeAddr,
|
addr: deviceAddr,
|
||||||
seen: time.Now(),
|
seen: time.Now(),
|
||||||
})
|
})
|
||||||
done:
|
done:
|
||||||
@ -379,8 +379,8 @@ func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
|
|||||||
for i := range current {
|
for i := range current {
|
||||||
addrs[i] = current[i].addr
|
addrs[i] = current[i].addr
|
||||||
}
|
}
|
||||||
events.Default.Log(events.NodeDiscovered, map[string]interface{}{
|
events.Default.Log(events.DeviceDiscovered, map[string]interface{}{
|
||||||
"node": id.String(),
|
"device": id.String(),
|
||||||
"addrs": addrs,
|
"addrs": addrs,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -388,7 +388,7 @@ func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
|
|||||||
return len(current) > len(orig)
|
return len(current) > len(orig)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
|
func (d *Discoverer) externalLookup(device protocol.DeviceID) []string {
|
||||||
extIP, err := net.ResolveUDPAddr("udp", d.extServer)
|
extIP, err := net.ResolveUDPAddr("udp", d.extServer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if debug {
|
if debug {
|
||||||
@ -414,7 +414,7 @@ func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := Query{QueryMagic, node[:]}.MarshalXDR()
|
buf := Query{QueryMagic, device[:]}.MarshalXDR()
|
||||||
_, err = conn.Write(buf)
|
_, err = conn.Write(buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if debug {
|
if debug {
|
||||||
@ -427,7 +427,7 @@ func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
|
|||||||
n, err := conn.Read(buf)
|
n, err := conn.Read(buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||||
// Expected if the server doesn't know about requested node ID
|
// Expected if the server doesn't know about requested device ID
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if debug {
|
if debug {
|
||||||
@ -451,8 +451,8 @@ func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
|
|||||||
|
|
||||||
var addrs []string
|
var addrs []string
|
||||||
for _, a := range pkt.This.Addresses {
|
for _, a := range pkt.This.Addresses {
|
||||||
nodeAddr := net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
|
deviceAddr := net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
|
||||||
addrs = append(addrs, nodeAddr)
|
addrs = append(addrs, deviceAddr)
|
||||||
}
|
}
|
||||||
return addrs
|
return addrs
|
||||||
}
|
}
|
||||||
|
@ -2,5 +2,5 @@
|
|||||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// Package discover implements the node discovery protocol.
|
// Package discover implements the device discovery protocol.
|
||||||
package discover
|
package discover
|
||||||
|
@ -11,16 +11,16 @@ const (
|
|||||||
|
|
||||||
type Query struct {
|
type Query struct {
|
||||||
Magic uint32
|
Magic uint32
|
||||||
NodeID []byte // max:32
|
DeviceID []byte // max:32
|
||||||
}
|
}
|
||||||
|
|
||||||
type Announce struct {
|
type Announce struct {
|
||||||
Magic uint32
|
Magic uint32
|
||||||
This Node
|
This Device
|
||||||
Extra []Node // max:16
|
Extra []Device // max:16
|
||||||
}
|
}
|
||||||
|
|
||||||
type Node struct {
|
type Device struct {
|
||||||
ID []byte // max:32
|
ID []byte // max:32
|
||||||
Addresses []Address // max:16
|
Addresses []Address // max:16
|
||||||
}
|
}
|
||||||
|
@ -20,17 +20,17 @@ Query Structure:
|
|||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Magic |
|
| Magic |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Length of Node ID |
|
| Length of Device ID |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ Node ID (variable length) \
|
\ Device ID (variable length) \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
|
||||||
|
|
||||||
struct Query {
|
struct Query {
|
||||||
unsigned int Magic;
|
unsigned int Magic;
|
||||||
opaque NodeID<32>;
|
opaque DeviceID<32>;
|
||||||
}
|
}
|
||||||
|
|
||||||
*/
|
*/
|
||||||
@ -53,10 +53,10 @@ func (o Query) AppendXDR(bs []byte) []byte {
|
|||||||
|
|
||||||
func (o Query) encodeXDR(xw *xdr.Writer) (int, error) {
|
func (o Query) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||||
xw.WriteUint32(o.Magic)
|
xw.WriteUint32(o.Magic)
|
||||||
if len(o.NodeID) > 32 {
|
if len(o.DeviceID) > 32 {
|
||||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||||
}
|
}
|
||||||
xw.WriteBytes(o.NodeID)
|
xw.WriteBytes(o.DeviceID)
|
||||||
return xw.Tot(), xw.Error()
|
return xw.Tot(), xw.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,7 +73,7 @@ func (o *Query) UnmarshalXDR(bs []byte) error {
|
|||||||
|
|
||||||
func (o *Query) decodeXDR(xr *xdr.Reader) error {
|
func (o *Query) decodeXDR(xr *xdr.Reader) error {
|
||||||
o.Magic = xr.ReadUint32()
|
o.Magic = xr.ReadUint32()
|
||||||
o.NodeID = xr.ReadBytesMax(32)
|
o.DeviceID = xr.ReadBytesMax(32)
|
||||||
return xr.Error()
|
return xr.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,20 +86,20 @@ Announce Structure:
|
|||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Magic |
|
| Magic |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Node |
|
| Device |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Number of Extra |
|
| Number of Extra |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ Zero or more Node Structures \
|
\ Zero or more Device Structures \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
|
||||||
|
|
||||||
struct Announce {
|
struct Announce {
|
||||||
unsigned int Magic;
|
unsigned int Magic;
|
||||||
Node This;
|
Device This;
|
||||||
Node Extra<16>;
|
Device Extra<16>;
|
||||||
}
|
}
|
||||||
|
|
||||||
*/
|
*/
|
||||||
@ -157,7 +157,7 @@ func (o *Announce) decodeXDR(xr *xdr.Reader) error {
|
|||||||
if _ExtraSize > 16 {
|
if _ExtraSize > 16 {
|
||||||
return xdr.ErrElementSizeExceeded
|
return xdr.ErrElementSizeExceeded
|
||||||
}
|
}
|
||||||
o.Extra = make([]Node, _ExtraSize)
|
o.Extra = make([]Device, _ExtraSize)
|
||||||
for i := range o.Extra {
|
for i := range o.Extra {
|
||||||
(&o.Extra[i]).decodeXDR(xr)
|
(&o.Extra[i]).decodeXDR(xr)
|
||||||
}
|
}
|
||||||
@ -166,7 +166,7 @@ func (o *Announce) decodeXDR(xr *xdr.Reader) error {
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
||||||
Node Structure:
|
Device Structure:
|
||||||
|
|
||||||
0 1 2 3
|
0 1 2 3
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
@ -185,30 +185,30 @@ Node Structure:
|
|||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
|
||||||
|
|
||||||
struct Node {
|
struct Device {
|
||||||
opaque ID<32>;
|
opaque ID<32>;
|
||||||
Address Addresses<16>;
|
Address Addresses<16>;
|
||||||
}
|
}
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
func (o Node) EncodeXDR(w io.Writer) (int, error) {
|
func (o Device) EncodeXDR(w io.Writer) (int, error) {
|
||||||
var xw = xdr.NewWriter(w)
|
var xw = xdr.NewWriter(w)
|
||||||
return o.encodeXDR(xw)
|
return o.encodeXDR(xw)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o Node) MarshalXDR() []byte {
|
func (o Device) MarshalXDR() []byte {
|
||||||
return o.AppendXDR(make([]byte, 0, 128))
|
return o.AppendXDR(make([]byte, 0, 128))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o Node) AppendXDR(bs []byte) []byte {
|
func (o Device) AppendXDR(bs []byte) []byte {
|
||||||
var aw = xdr.AppendWriter(bs)
|
var aw = xdr.AppendWriter(bs)
|
||||||
var xw = xdr.NewWriter(&aw)
|
var xw = xdr.NewWriter(&aw)
|
||||||
o.encodeXDR(xw)
|
o.encodeXDR(xw)
|
||||||
return []byte(aw)
|
return []byte(aw)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o Node) encodeXDR(xw *xdr.Writer) (int, error) {
|
func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||||
if len(o.ID) > 32 {
|
if len(o.ID) > 32 {
|
||||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||||
}
|
}
|
||||||
@ -226,18 +226,18 @@ func (o Node) encodeXDR(xw *xdr.Writer) (int, error) {
|
|||||||
return xw.Tot(), xw.Error()
|
return xw.Tot(), xw.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Node) DecodeXDR(r io.Reader) error {
|
func (o *Device) DecodeXDR(r io.Reader) error {
|
||||||
xr := xdr.NewReader(r)
|
xr := xdr.NewReader(r)
|
||||||
return o.decodeXDR(xr)
|
return o.decodeXDR(xr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Node) UnmarshalXDR(bs []byte) error {
|
func (o *Device) UnmarshalXDR(bs []byte) error {
|
||||||
var br = bytes.NewReader(bs)
|
var br = bytes.NewReader(bs)
|
||||||
var xr = xdr.NewReader(br)
|
var xr = xdr.NewReader(br)
|
||||||
return o.decodeXDR(xr)
|
return o.decodeXDR(xr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Node) decodeXDR(xr *xdr.Reader) error {
|
func (o *Device) decodeXDR(xr *xdr.Reader) error {
|
||||||
o.ID = xr.ReadBytesMax(32)
|
o.ID = xr.ReadBytesMax(32)
|
||||||
_AddressesSize := int(xr.ReadUint32())
|
_AddressesSize := int(xr.ReadUint32())
|
||||||
if _AddressesSize > 16 {
|
if _AddressesSize > 16 {
|
||||||
|
@ -17,15 +17,15 @@ const (
|
|||||||
Ping = 1 << iota
|
Ping = 1 << iota
|
||||||
Starting
|
Starting
|
||||||
StartupComplete
|
StartupComplete
|
||||||
NodeDiscovered
|
DeviceDiscovered
|
||||||
NodeConnected
|
DeviceConnected
|
||||||
NodeDisconnected
|
DeviceDisconnected
|
||||||
NodeRejected
|
DeviceRejected
|
||||||
LocalIndexUpdated
|
LocalIndexUpdated
|
||||||
RemoteIndexUpdated
|
RemoteIndexUpdated
|
||||||
ItemStarted
|
ItemStarted
|
||||||
StateChanged
|
StateChanged
|
||||||
RepoRejected
|
FolderRejected
|
||||||
ConfigSaved
|
ConfigSaved
|
||||||
|
|
||||||
AllEvents = ^EventType(0)
|
AllEvents = ^EventType(0)
|
||||||
@ -39,14 +39,14 @@ func (t EventType) String() string {
|
|||||||
return "Starting"
|
return "Starting"
|
||||||
case StartupComplete:
|
case StartupComplete:
|
||||||
return "StartupComplete"
|
return "StartupComplete"
|
||||||
case NodeDiscovered:
|
case DeviceDiscovered:
|
||||||
return "NodeDiscovered"
|
return "DeviceDiscovered"
|
||||||
case NodeConnected:
|
case DeviceConnected:
|
||||||
return "NodeConnected"
|
return "DeviceConnected"
|
||||||
case NodeDisconnected:
|
case DeviceDisconnected:
|
||||||
return "NodeDisconnected"
|
return "DeviceDisconnected"
|
||||||
case NodeRejected:
|
case DeviceRejected:
|
||||||
return "NodeRejected"
|
return "DeviceRejected"
|
||||||
case LocalIndexUpdated:
|
case LocalIndexUpdated:
|
||||||
return "LocalIndexUpdated"
|
return "LocalIndexUpdated"
|
||||||
case RemoteIndexUpdated:
|
case RemoteIndexUpdated:
|
||||||
@ -55,8 +55,8 @@ func (t EventType) String() string {
|
|||||||
return "ItemStarted"
|
return "ItemStarted"
|
||||||
case StateChanged:
|
case StateChanged:
|
||||||
return "StateChanged"
|
return "StateChanged"
|
||||||
case RepoRejected:
|
case FolderRejected:
|
||||||
return "RepoRejected"
|
return "FolderRejected"
|
||||||
case ConfigSaved:
|
case ConfigSaved:
|
||||||
return "ConfigSaved"
|
return "ConfigSaved"
|
||||||
default:
|
default:
|
||||||
|
@ -41,7 +41,7 @@ func TestTimeout(t *testing.T) {
|
|||||||
func TestEventBeforeSubscribe(t *testing.T) {
|
func TestEventBeforeSubscribe(t *testing.T) {
|
||||||
l := events.NewLogger()
|
l := events.NewLogger()
|
||||||
|
|
||||||
l.Log(events.NodeConnected, "foo")
|
l.Log(events.DeviceConnected, "foo")
|
||||||
s := l.Subscribe(0)
|
s := l.Subscribe(0)
|
||||||
|
|
||||||
_, err := s.Poll(timeout)
|
_, err := s.Poll(timeout)
|
||||||
@ -54,14 +54,14 @@ func TestEventAfterSubscribe(t *testing.T) {
|
|||||||
l := events.NewLogger()
|
l := events.NewLogger()
|
||||||
|
|
||||||
s := l.Subscribe(events.AllEvents)
|
s := l.Subscribe(events.AllEvents)
|
||||||
l.Log(events.NodeConnected, "foo")
|
l.Log(events.DeviceConnected, "foo")
|
||||||
|
|
||||||
ev, err := s.Poll(timeout)
|
ev, err := s.Poll(timeout)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected error:", err)
|
t.Fatal("Unexpected error:", err)
|
||||||
}
|
}
|
||||||
if ev.Type != events.NodeConnected {
|
if ev.Type != events.DeviceConnected {
|
||||||
t.Error("Incorrect event type", ev.Type)
|
t.Error("Incorrect event type", ev.Type)
|
||||||
}
|
}
|
||||||
switch v := ev.Data.(type) {
|
switch v := ev.Data.(type) {
|
||||||
@ -77,8 +77,8 @@ func TestEventAfterSubscribe(t *testing.T) {
|
|||||||
func TestEventAfterSubscribeIgnoreMask(t *testing.T) {
|
func TestEventAfterSubscribeIgnoreMask(t *testing.T) {
|
||||||
l := events.NewLogger()
|
l := events.NewLogger()
|
||||||
|
|
||||||
s := l.Subscribe(events.NodeDisconnected)
|
s := l.Subscribe(events.DeviceDisconnected)
|
||||||
l.Log(events.NodeConnected, "foo")
|
l.Log(events.DeviceConnected, "foo")
|
||||||
|
|
||||||
_, err := s.Poll(timeout)
|
_, err := s.Poll(timeout)
|
||||||
if err != events.ErrTimeout {
|
if err != events.ErrTimeout {
|
||||||
@ -93,7 +93,7 @@ func TestBufferOverflow(t *testing.T) {
|
|||||||
|
|
||||||
t0 := time.Now()
|
t0 := time.Now()
|
||||||
for i := 0; i < events.BufferSize*2; i++ {
|
for i := 0; i < events.BufferSize*2; i++ {
|
||||||
l.Log(events.NodeConnected, "foo")
|
l.Log(events.DeviceConnected, "foo")
|
||||||
}
|
}
|
||||||
if time.Since(t0) > timeout {
|
if time.Since(t0) > timeout {
|
||||||
t.Fatalf("Logging took too long")
|
t.Fatalf("Logging took too long")
|
||||||
@ -104,7 +104,7 @@ func TestUnsubscribe(t *testing.T) {
|
|||||||
l := events.NewLogger()
|
l := events.NewLogger()
|
||||||
|
|
||||||
s := l.Subscribe(events.AllEvents)
|
s := l.Subscribe(events.AllEvents)
|
||||||
l.Log(events.NodeConnected, "foo")
|
l.Log(events.DeviceConnected, "foo")
|
||||||
|
|
||||||
_, err := s.Poll(timeout)
|
_, err := s.Poll(timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -112,7 +112,7 @@ func TestUnsubscribe(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
l.Unsubscribe(s)
|
l.Unsubscribe(s)
|
||||||
l.Log(events.NodeConnected, "foo")
|
l.Log(events.DeviceConnected, "foo")
|
||||||
|
|
||||||
_, err = s.Poll(timeout)
|
_, err = s.Poll(timeout)
|
||||||
if err != events.ErrClosed {
|
if err != events.ErrClosed {
|
||||||
@ -124,8 +124,8 @@ func TestIDs(t *testing.T) {
|
|||||||
l := events.NewLogger()
|
l := events.NewLogger()
|
||||||
|
|
||||||
s := l.Subscribe(events.AllEvents)
|
s := l.Subscribe(events.AllEvents)
|
||||||
l.Log(events.NodeConnected, "foo")
|
l.Log(events.DeviceConnected, "foo")
|
||||||
l.Log(events.NodeConnected, "bar")
|
l.Log(events.DeviceConnected, "bar")
|
||||||
|
|
||||||
ev, err := s.Poll(timeout)
|
ev, err := s.Poll(timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -156,7 +156,7 @@ func TestBufferedSub(t *testing.T) {
|
|||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for i := 0; i < 10*events.BufferSize; i++ {
|
for i := 0; i < 10*events.BufferSize; i++ {
|
||||||
l.Log(events.NodeConnected, fmt.Sprintf("event-%d", i))
|
l.Log(events.DeviceConnected, fmt.Sprintf("event-%d", i))
|
||||||
if i%30 == 0 {
|
if i%30 == 0 {
|
||||||
// Give the buffer routine time to pick up the events
|
// Give the buffer routine time to pick up the events
|
||||||
time.Sleep(20 * time.Millisecond)
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
@ -35,13 +35,13 @@ func clock(v uint64) uint64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
keyTypeNode = iota
|
keyTypeDevice = iota
|
||||||
keyTypeGlobal
|
keyTypeGlobal
|
||||||
)
|
)
|
||||||
|
|
||||||
type fileVersion struct {
|
type fileVersion struct {
|
||||||
version uint64
|
version uint64
|
||||||
node []byte
|
device []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
type versionList struct {
|
type versionList struct {
|
||||||
@ -73,47 +73,47 @@ type dbWriter interface {
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
||||||
keyTypeNode (1 byte)
|
keyTypeDevice (1 byte)
|
||||||
repository (64 bytes)
|
folder (64 bytes)
|
||||||
node (32 bytes)
|
device (32 bytes)
|
||||||
name (variable size)
|
name (variable size)
|
||||||
|
|
|
|
||||||
scanner.File
|
scanner.File
|
||||||
|
|
||||||
keyTypeGlobal (1 byte)
|
keyTypeGlobal (1 byte)
|
||||||
repository (64 bytes)
|
folder (64 bytes)
|
||||||
name (variable size)
|
name (variable size)
|
||||||
|
|
|
|
||||||
[]fileVersion (sorted)
|
[]fileVersion (sorted)
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
func nodeKey(repo, node, file []byte) []byte {
|
func deviceKey(folder, device, file []byte) []byte {
|
||||||
k := make([]byte, 1+64+32+len(file))
|
k := make([]byte, 1+64+32+len(file))
|
||||||
k[0] = keyTypeNode
|
k[0] = keyTypeDevice
|
||||||
copy(k[1:], []byte(repo))
|
copy(k[1:], []byte(folder))
|
||||||
copy(k[1+64:], node[:])
|
copy(k[1+64:], device[:])
|
||||||
copy(k[1+64+32:], []byte(file))
|
copy(k[1+64+32:], []byte(file))
|
||||||
return k
|
return k
|
||||||
}
|
}
|
||||||
|
|
||||||
func globalKey(repo, file []byte) []byte {
|
func globalKey(folder, file []byte) []byte {
|
||||||
k := make([]byte, 1+64+len(file))
|
k := make([]byte, 1+64+len(file))
|
||||||
k[0] = keyTypeGlobal
|
k[0] = keyTypeGlobal
|
||||||
copy(k[1:], []byte(repo))
|
copy(k[1:], []byte(folder))
|
||||||
copy(k[1+64:], []byte(file))
|
copy(k[1+64:], []byte(file))
|
||||||
return k
|
return k
|
||||||
}
|
}
|
||||||
|
|
||||||
func nodeKeyName(key []byte) []byte {
|
func deviceKeyName(key []byte) []byte {
|
||||||
return key[1+64+32:]
|
return key[1+64+32:]
|
||||||
}
|
}
|
||||||
func nodeKeyRepo(key []byte) []byte {
|
func deviceKeyFolder(key []byte) []byte {
|
||||||
repo := key[1 : 1+64]
|
folder := key[1 : 1+64]
|
||||||
izero := bytes.IndexByte(repo, 0)
|
izero := bytes.IndexByte(folder, 0)
|
||||||
return repo[:izero]
|
return folder[:izero]
|
||||||
}
|
}
|
||||||
func nodeKeyNode(key []byte) []byte {
|
func deviceKeyDevice(key []byte) []byte {
|
||||||
return key[1+64 : 1+64+32]
|
return key[1+64 : 1+64+32]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,23 +121,23 @@ func globalKeyName(key []byte) []byte {
|
|||||||
return key[1+64:]
|
return key[1+64:]
|
||||||
}
|
}
|
||||||
|
|
||||||
func globalKeyRepo(key []byte) []byte {
|
func globalKeyFolder(key []byte) []byte {
|
||||||
repo := key[1 : 1+64]
|
folder := key[1 : 1+64]
|
||||||
izero := bytes.IndexByte(repo, 0)
|
izero := bytes.IndexByte(folder, 0)
|
||||||
return repo[:izero]
|
return folder[:izero]
|
||||||
}
|
}
|
||||||
|
|
||||||
type deletionHandler func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64
|
type deletionHandler func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) uint64
|
||||||
|
|
||||||
type fileIterator func(f protocol.FileIntf) bool
|
type fileIterator func(f protocol.FileIntf) bool
|
||||||
|
|
||||||
func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo, deleteFn deletionHandler) uint64 {
|
func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo, deleteFn deletionHandler) uint64 {
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
|
|
||||||
sort.Sort(fileList(fs)) // sort list on name, same as on disk
|
sort.Sort(fileList(fs)) // sort list on name, same as on disk
|
||||||
|
|
||||||
start := nodeKey(repo, node, nil) // before all repo/node files
|
start := deviceKey(folder, device, nil) // before all folder/device files
|
||||||
limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
|
limit := deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
|
||||||
|
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
@ -171,42 +171,42 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
|
|||||||
}
|
}
|
||||||
|
|
||||||
if moreDb {
|
if moreDb {
|
||||||
oldName = nodeKeyName(dbi.Key())
|
oldName = deviceKeyName(dbi.Key())
|
||||||
}
|
}
|
||||||
|
|
||||||
cmp := bytes.Compare(newName, oldName)
|
cmp := bytes.Compare(newName, oldName)
|
||||||
|
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("generic replace; repo=%q node=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", repo, protocol.NodeIDFromBytes(node), moreFs, moreDb, cmp, newName, oldName)
|
l.Debugf("generic replace; folder=%q device=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", folder, protocol.DeviceIDFromBytes(device), moreFs, moreDb, cmp, newName, oldName)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case moreFs && (!moreDb || cmp == -1):
|
case moreFs && (!moreDb || cmp == -1):
|
||||||
// Disk is missing this file. Insert it.
|
// Disk is missing this file. Insert it.
|
||||||
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
|
if lv := ldbInsert(batch, folder, device, newName, fs[fsi]); lv > maxLocalVer {
|
||||||
maxLocalVer = lv
|
maxLocalVer = lv
|
||||||
}
|
}
|
||||||
if fs[fsi].IsInvalid() {
|
if fs[fsi].IsInvalid() {
|
||||||
ldbRemoveFromGlobal(snap, batch, repo, node, newName)
|
ldbRemoveFromGlobal(snap, batch, folder, device, newName)
|
||||||
} else {
|
} else {
|
||||||
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
|
ldbUpdateGlobal(snap, batch, folder, device, newName, fs[fsi].Version)
|
||||||
}
|
}
|
||||||
fsi++
|
fsi++
|
||||||
|
|
||||||
case moreFs && moreDb && cmp == 0:
|
case moreFs && moreDb && cmp == 0:
|
||||||
// File exists on both sides - compare versions. We might get an
|
// File exists on both sides - compare versions. We might get an
|
||||||
// update with the same version and different flags if a node has
|
// update with the same version and different flags if a device has
|
||||||
// marked a file as invalid, so handle that too.
|
// marked a file as invalid, so handle that too.
|
||||||
var ef protocol.FileInfoTruncated
|
var ef protocol.FileInfoTruncated
|
||||||
ef.UnmarshalXDR(dbi.Value())
|
ef.UnmarshalXDR(dbi.Value())
|
||||||
if fs[fsi].Version > ef.Version || fs[fsi].Version != ef.Version {
|
if fs[fsi].Version > ef.Version || fs[fsi].Version != ef.Version {
|
||||||
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
|
if lv := ldbInsert(batch, folder, device, newName, fs[fsi]); lv > maxLocalVer {
|
||||||
maxLocalVer = lv
|
maxLocalVer = lv
|
||||||
}
|
}
|
||||||
if fs[fsi].IsInvalid() {
|
if fs[fsi].IsInvalid() {
|
||||||
ldbRemoveFromGlobal(snap, batch, repo, node, newName)
|
ldbRemoveFromGlobal(snap, batch, folder, device, newName)
|
||||||
} else {
|
} else {
|
||||||
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
|
ldbUpdateGlobal(snap, batch, folder, device, newName, fs[fsi].Version)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Iterate both sides.
|
// Iterate both sides.
|
||||||
@ -215,7 +215,7 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
|
|||||||
|
|
||||||
case moreDb && (!moreFs || cmp == 1):
|
case moreDb && (!moreFs || cmp == 1):
|
||||||
if deleteFn != nil {
|
if deleteFn != nil {
|
||||||
if lv := deleteFn(snap, batch, repo, node, oldName, dbi); lv > maxLocalVer {
|
if lv := deleteFn(snap, batch, folder, device, oldName, dbi); lv > maxLocalVer {
|
||||||
maxLocalVer = lv
|
maxLocalVer = lv
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -231,21 +231,21 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
|
|||||||
return maxLocalVer
|
return maxLocalVer
|
||||||
}
|
}
|
||||||
|
|
||||||
func ldbReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 {
|
func ldbReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) uint64 {
|
||||||
// TODO: Return the remaining maxLocalVer?
|
// TODO: Return the remaining maxLocalVer?
|
||||||
return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64 {
|
return ldbGenericReplace(db, folder, device, fs, func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) uint64 {
|
||||||
// Disk has files that we are missing. Remove it.
|
// Disk has files that we are missing. Remove it.
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("delete; repo=%q node=%v name=%q", repo, protocol.NodeIDFromBytes(node), name)
|
l.Debugf("delete; folder=%q device=%v name=%q", folder, protocol.DeviceIDFromBytes(device), name)
|
||||||
}
|
}
|
||||||
ldbRemoveFromGlobal(db, batch, repo, node, name)
|
ldbRemoveFromGlobal(db, batch, folder, device, name)
|
||||||
batch.Delete(dbi.Key())
|
batch.Delete(dbi.Key())
|
||||||
return 0
|
return 0
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 {
|
func ldbReplaceWithDelete(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) uint64 {
|
||||||
return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64 {
|
return ldbGenericReplace(db, folder, device, fs, func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) uint64 {
|
||||||
var tf protocol.FileInfoTruncated
|
var tf protocol.FileInfoTruncated
|
||||||
err := tf.UnmarshalXDR(dbi.Value())
|
err := tf.UnmarshalXDR(dbi.Value())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -253,7 +253,7 @@ func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileI
|
|||||||
}
|
}
|
||||||
if !tf.IsDeleted() {
|
if !tf.IsDeleted() {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("mark deleted; repo=%q node=%v name=%q", repo, protocol.NodeIDFromBytes(node), name)
|
l.Debugf("mark deleted; folder=%q device=%v name=%q", folder, protocol.DeviceIDFromBytes(device), name)
|
||||||
}
|
}
|
||||||
ts := clock(tf.LocalVersion)
|
ts := clock(tf.LocalVersion)
|
||||||
f := protocol.FileInfo{
|
f := protocol.FileInfo{
|
||||||
@ -264,14 +264,14 @@ func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileI
|
|||||||
Modified: tf.Modified,
|
Modified: tf.Modified,
|
||||||
}
|
}
|
||||||
batch.Put(dbi.Key(), f.MarshalXDR())
|
batch.Put(dbi.Key(), f.MarshalXDR())
|
||||||
ldbUpdateGlobal(db, batch, repo, node, nodeKeyName(dbi.Key()), f.Version)
|
ldbUpdateGlobal(db, batch, folder, device, deviceKeyName(dbi.Key()), f.Version)
|
||||||
return ts
|
return ts
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 {
|
func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) uint64 {
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
|
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
@ -284,16 +284,16 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
|
|||||||
var maxLocalVer uint64
|
var maxLocalVer uint64
|
||||||
for _, f := range fs {
|
for _, f := range fs {
|
||||||
name := []byte(f.Name)
|
name := []byte(f.Name)
|
||||||
fk := nodeKey(repo, node, name)
|
fk := deviceKey(folder, device, name)
|
||||||
bs, err := snap.Get(fk, nil)
|
bs, err := snap.Get(fk, nil)
|
||||||
if err == leveldb.ErrNotFound {
|
if err == leveldb.ErrNotFound {
|
||||||
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
|
if lv := ldbInsert(batch, folder, device, name, f); lv > maxLocalVer {
|
||||||
maxLocalVer = lv
|
maxLocalVer = lv
|
||||||
}
|
}
|
||||||
if f.IsInvalid() {
|
if f.IsInvalid() {
|
||||||
ldbRemoveFromGlobal(snap, batch, repo, node, name)
|
ldbRemoveFromGlobal(snap, batch, folder, device, name)
|
||||||
} else {
|
} else {
|
||||||
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
|
ldbUpdateGlobal(snap, batch, folder, device, name, f.Version)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -306,13 +306,13 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
|
|||||||
// Flags might change without the version being bumped when we set the
|
// Flags might change without the version being bumped when we set the
|
||||||
// invalid flag on an existing file.
|
// invalid flag on an existing file.
|
||||||
if ef.Version != f.Version || ef.Flags != f.Flags {
|
if ef.Version != f.Version || ef.Flags != f.Flags {
|
||||||
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
|
if lv := ldbInsert(batch, folder, device, name, f); lv > maxLocalVer {
|
||||||
maxLocalVer = lv
|
maxLocalVer = lv
|
||||||
}
|
}
|
||||||
if f.IsInvalid() {
|
if f.IsInvalid() {
|
||||||
ldbRemoveFromGlobal(snap, batch, repo, node, name)
|
ldbRemoveFromGlobal(snap, batch, folder, device, name)
|
||||||
} else {
|
} else {
|
||||||
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
|
ldbUpdateGlobal(snap, batch, folder, device, name, f.Version)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -325,29 +325,29 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
|
|||||||
return maxLocalVer
|
return maxLocalVer
|
||||||
}
|
}
|
||||||
|
|
||||||
func ldbInsert(batch dbWriter, repo, node, name []byte, file protocol.FileInfo) uint64 {
|
func ldbInsert(batch dbWriter, folder, device, name []byte, file protocol.FileInfo) uint64 {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("insert; repo=%q node=%v %v", repo, protocol.NodeIDFromBytes(node), file)
|
l.Debugf("insert; folder=%q device=%v %v", folder, protocol.DeviceIDFromBytes(device), file)
|
||||||
}
|
}
|
||||||
|
|
||||||
if file.LocalVersion == 0 {
|
if file.LocalVersion == 0 {
|
||||||
file.LocalVersion = clock(0)
|
file.LocalVersion = clock(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
nk := nodeKey(repo, node, name)
|
nk := deviceKey(folder, device, name)
|
||||||
batch.Put(nk, file.MarshalXDR())
|
batch.Put(nk, file.MarshalXDR())
|
||||||
|
|
||||||
return file.LocalVersion
|
return file.LocalVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
// ldbUpdateGlobal adds this node+version to the version list for the given
|
// ldbUpdateGlobal adds this device+version to the version list for the given
|
||||||
// file. If the node is already present in the list, the version is updated.
|
// file. If the device is already present in the list, the version is updated.
|
||||||
// If the file does not have an entry in the global list, it is created.
|
// If the file does not have an entry in the global list, it is created.
|
||||||
func ldbUpdateGlobal(db dbReader, batch dbWriter, repo, node, file []byte, version uint64) bool {
|
func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device, file []byte, version uint64) bool {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("update global; repo=%q node=%v file=%q version=%d", repo, protocol.NodeIDFromBytes(node), file, version)
|
l.Debugf("update global; folder=%q device=%v file=%q version=%d", folder, protocol.DeviceIDFromBytes(device), file, version)
|
||||||
}
|
}
|
||||||
gk := globalKey(repo, file)
|
gk := globalKey(folder, file)
|
||||||
svl, err := db.Get(gk, nil)
|
svl, err := db.Get(gk, nil)
|
||||||
if err != nil && err != leveldb.ErrNotFound {
|
if err != nil && err != leveldb.ErrNotFound {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -355,7 +355,7 @@ func ldbUpdateGlobal(db dbReader, batch dbWriter, repo, node, file []byte, versi
|
|||||||
|
|
||||||
var fl versionList
|
var fl versionList
|
||||||
nv := fileVersion{
|
nv := fileVersion{
|
||||||
node: node,
|
device: device,
|
||||||
version: version,
|
version: version,
|
||||||
}
|
}
|
||||||
if svl != nil {
|
if svl != nil {
|
||||||
@ -365,7 +365,7 @@ func ldbUpdateGlobal(db dbReader, batch dbWriter, repo, node, file []byte, versi
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := range fl.versions {
|
for i := range fl.versions {
|
||||||
if bytes.Compare(fl.versions[i].node, node) == 0 {
|
if bytes.Compare(fl.versions[i].device, device) == 0 {
|
||||||
if fl.versions[i].version == version {
|
if fl.versions[i].version == version {
|
||||||
// No need to do anything
|
// No need to do anything
|
||||||
return false
|
return false
|
||||||
@ -394,15 +394,15 @@ done:
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// ldbRemoveFromGlobal removes the node from the global version list for the
|
// ldbRemoveFromGlobal removes the device from the global version list for the
|
||||||
// given file. If the version list is empty after this, the file entry is
|
// given file. If the version list is empty after this, the file entry is
|
||||||
// removed entirely.
|
// removed entirely.
|
||||||
func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
|
func ldbRemoveFromGlobal(db dbReader, batch dbWriter, folder, device, file []byte) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("remove from global; repo=%q node=%v file=%q", repo, protocol.NodeIDFromBytes(node), file)
|
l.Debugf("remove from global; folder=%q device=%v file=%q", folder, protocol.DeviceIDFromBytes(device), file)
|
||||||
}
|
}
|
||||||
|
|
||||||
gk := globalKey(repo, file)
|
gk := globalKey(folder, file)
|
||||||
svl, err := db.Get(gk, nil)
|
svl, err := db.Get(gk, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// We might be called to "remove" a global version that doesn't exist
|
// We might be called to "remove" a global version that doesn't exist
|
||||||
@ -417,7 +417,7 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := range fl.versions {
|
for i := range fl.versions {
|
||||||
if bytes.Compare(fl.versions[i].node, node) == 0 {
|
if bytes.Compare(fl.versions[i].device, device) == 0 {
|
||||||
fl.versions = append(fl.versions[:i], fl.versions[i+1:]...)
|
fl.versions = append(fl.versions[:i], fl.versions[i+1:]...)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -430,9 +430,9 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ldbWithHave(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterator) {
|
func ldbWithHave(db *leveldb.DB, folder, device []byte, truncate bool, fn fileIterator) {
|
||||||
start := nodeKey(repo, node, nil) // before all repo/node files
|
start := deviceKey(folder, device, nil) // before all folder/device files
|
||||||
limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
|
limit := deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -452,11 +452,11 @@ func ldbWithHave(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterat
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ldbWithAllRepoTruncated(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol.FileInfoTruncated) bool) {
|
func ldbWithAllFolderTruncated(db *leveldb.DB, folder []byte, fn func(device []byte, f protocol.FileInfoTruncated) bool) {
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
|
|
||||||
start := nodeKey(repo, nil, nil) // before all repo/node files
|
start := deviceKey(folder, nil, nil) // before all folder/device files
|
||||||
limit := nodeKey(repo, protocol.LocalNodeID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
|
limit := deviceKey(folder, protocol.LocalDeviceID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -466,20 +466,20 @@ func ldbWithAllRepoTruncated(db *leveldb.DB, repo []byte, fn func(node []byte, f
|
|||||||
defer dbi.Release()
|
defer dbi.Release()
|
||||||
|
|
||||||
for dbi.Next() {
|
for dbi.Next() {
|
||||||
node := nodeKeyNode(dbi.Key())
|
device := deviceKeyDevice(dbi.Key())
|
||||||
var f protocol.FileInfoTruncated
|
var f protocol.FileInfoTruncated
|
||||||
err := f.UnmarshalXDR(dbi.Value())
|
err := f.UnmarshalXDR(dbi.Value())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if cont := fn(node, f); !cont {
|
if cont := fn(device, f); !cont {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ldbGet(db *leveldb.DB, repo, node, file []byte) protocol.FileInfo {
|
func ldbGet(db *leveldb.DB, folder, device, file []byte) protocol.FileInfo {
|
||||||
nk := nodeKey(repo, node, file)
|
nk := deviceKey(folder, device, file)
|
||||||
bs, err := db.Get(nk, nil)
|
bs, err := db.Get(nk, nil)
|
||||||
if err == leveldb.ErrNotFound {
|
if err == leveldb.ErrNotFound {
|
||||||
return protocol.FileInfo{}
|
return protocol.FileInfo{}
|
||||||
@ -496,8 +496,8 @@ func ldbGet(db *leveldb.DB, repo, node, file []byte) protocol.FileInfo {
|
|||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
func ldbGetGlobal(db *leveldb.DB, repo, file []byte) protocol.FileInfo {
|
func ldbGetGlobal(db *leveldb.DB, folder, file []byte) protocol.FileInfo {
|
||||||
k := globalKey(repo, file)
|
k := globalKey(folder, file)
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -522,7 +522,7 @@ func ldbGetGlobal(db *leveldb.DB, repo, file []byte) protocol.FileInfo {
|
|||||||
panic("no versions?")
|
panic("no versions?")
|
||||||
}
|
}
|
||||||
|
|
||||||
k = nodeKey(repo, vl.versions[0].node, file)
|
k = deviceKey(folder, vl.versions[0].device, file)
|
||||||
bs, err = snap.Get(k, nil)
|
bs, err = snap.Get(k, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -536,11 +536,11 @@ func ldbGetGlobal(db *leveldb.DB, repo, file []byte) protocol.FileInfo {
|
|||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
func ldbWithGlobal(db *leveldb.DB, repo []byte, truncate bool, fn fileIterator) {
|
func ldbWithGlobal(db *leveldb.DB, folder []byte, truncate bool, fn fileIterator) {
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
|
|
||||||
start := globalKey(repo, nil)
|
start := globalKey(folder, nil)
|
||||||
limit := globalKey(repo, []byte{0xff, 0xff, 0xff, 0xff})
|
limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -559,7 +559,7 @@ func ldbWithGlobal(db *leveldb.DB, repo []byte, truncate bool, fn fileIterator)
|
|||||||
l.Debugln(dbi.Key())
|
l.Debugln(dbi.Key())
|
||||||
panic("no versions?")
|
panic("no versions?")
|
||||||
}
|
}
|
||||||
fk := nodeKey(repo, vl.versions[0].node, globalKeyName(dbi.Key()))
|
fk := deviceKey(folder, vl.versions[0].device, globalKeyName(dbi.Key()))
|
||||||
bs, err := snap.Get(fk, nil)
|
bs, err := snap.Get(fk, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -576,8 +576,8 @@ func ldbWithGlobal(db *leveldb.DB, repo []byte, truncate bool, fn fileIterator)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ldbAvailability(db *leveldb.DB, repo, file []byte) []protocol.NodeID {
|
func ldbAvailability(db *leveldb.DB, folder, file []byte) []protocol.DeviceID {
|
||||||
k := globalKey(repo, file)
|
k := globalKey(folder, file)
|
||||||
bs, err := db.Get(k, nil)
|
bs, err := db.Get(k, nil)
|
||||||
if err == leveldb.ErrNotFound {
|
if err == leveldb.ErrNotFound {
|
||||||
return nil
|
return nil
|
||||||
@ -592,23 +592,23 @@ func ldbAvailability(db *leveldb.DB, repo, file []byte) []protocol.NodeID {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var nodes []protocol.NodeID
|
var devices []protocol.DeviceID
|
||||||
for _, v := range vl.versions {
|
for _, v := range vl.versions {
|
||||||
if v.version != vl.versions[0].version {
|
if v.version != vl.versions[0].version {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
n := protocol.NodeIDFromBytes(v.node)
|
n := protocol.DeviceIDFromBytes(v.device)
|
||||||
nodes = append(nodes, n)
|
devices = append(devices, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nodes
|
return devices
|
||||||
}
|
}
|
||||||
|
|
||||||
func ldbWithNeed(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterator) {
|
func ldbWithNeed(db *leveldb.DB, folder, device []byte, truncate bool, fn fileIterator) {
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
|
|
||||||
start := globalKey(repo, nil)
|
start := globalKey(folder, nil)
|
||||||
limit := globalKey(repo, []byte{0xff, 0xff, 0xff, 0xff})
|
limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -633,7 +633,7 @@ outer:
|
|||||||
need := false // If we have a lower version of the file
|
need := false // If we have a lower version of the file
|
||||||
var haveVersion uint64
|
var haveVersion uint64
|
||||||
for _, v := range vl.versions {
|
for _, v := range vl.versions {
|
||||||
if bytes.Compare(v.node, node) == 0 {
|
if bytes.Compare(v.device, device) == 0 {
|
||||||
have = true
|
have = true
|
||||||
haveVersion = v.version
|
haveVersion = v.version
|
||||||
need = v.version < vl.versions[0].version
|
need = v.version < vl.versions[0].version
|
||||||
@ -650,7 +650,7 @@ outer:
|
|||||||
// We haven't found a valid copy of the file with the needed version.
|
// We haven't found a valid copy of the file with the needed version.
|
||||||
continue outer
|
continue outer
|
||||||
}
|
}
|
||||||
fk := nodeKey(repo, vl.versions[i].node, name)
|
fk := deviceKey(folder, vl.versions[i].device, name)
|
||||||
bs, err := snap.Get(fk, nil)
|
bs, err := snap.Get(fk, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -672,7 +672,7 @@ outer:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("need repo=%q node=%v name=%q need=%v have=%v haveV=%d globalV=%d", repo, protocol.NodeIDFromBytes(node), name, need, have, haveVersion, vl.versions[0].version)
|
l.Debugf("need folder=%q device=%v name=%q need=%v have=%v haveV=%d globalV=%d", folder, protocol.DeviceIDFromBytes(device), name, need, have, haveVersion, vl.versions[0].version)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cont := fn(gf); !cont {
|
if cont := fn(gf); !cont {
|
||||||
@ -686,7 +686,7 @@ outer:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ldbListRepos(db *leveldb.DB) []string {
|
func ldbListFolders(db *leveldb.DB) []string {
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
|
|
||||||
start := []byte{keyTypeGlobal}
|
start := []byte{keyTypeGlobal}
|
||||||
@ -699,24 +699,24 @@ func ldbListRepos(db *leveldb.DB) []string {
|
|||||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||||
defer dbi.Release()
|
defer dbi.Release()
|
||||||
|
|
||||||
repoExists := make(map[string]bool)
|
folderExists := make(map[string]bool)
|
||||||
for dbi.Next() {
|
for dbi.Next() {
|
||||||
repo := string(globalKeyRepo(dbi.Key()))
|
folder := string(globalKeyFolder(dbi.Key()))
|
||||||
if !repoExists[repo] {
|
if !folderExists[folder] {
|
||||||
repoExists[repo] = true
|
folderExists[folder] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
repos := make([]string, 0, len(repoExists))
|
folders := make([]string, 0, len(folderExists))
|
||||||
for k := range repoExists {
|
for k := range folderExists {
|
||||||
repos = append(repos, k)
|
folders = append(folders, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Strings(repos)
|
sort.Strings(folders)
|
||||||
return repos
|
return folders
|
||||||
}
|
}
|
||||||
|
|
||||||
func ldbDropRepo(db *leveldb.DB, repo []byte) {
|
func ldbDropFolder(db *leveldb.DB, folder []byte) {
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
|
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
@ -725,25 +725,25 @@ func ldbDropRepo(db *leveldb.DB, repo []byte) {
|
|||||||
}
|
}
|
||||||
defer snap.Release()
|
defer snap.Release()
|
||||||
|
|
||||||
// Remove all items related to the given repo from the node->file bucket
|
// Remove all items related to the given folder from the device->file bucket
|
||||||
start := []byte{keyTypeNode}
|
start := []byte{keyTypeDevice}
|
||||||
limit := []byte{keyTypeNode + 1}
|
limit := []byte{keyTypeDevice + 1}
|
||||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||||
for dbi.Next() {
|
for dbi.Next() {
|
||||||
itemRepo := nodeKeyRepo(dbi.Key())
|
itemFolder := deviceKeyFolder(dbi.Key())
|
||||||
if bytes.Compare(repo, itemRepo) == 0 {
|
if bytes.Compare(folder, itemFolder) == 0 {
|
||||||
db.Delete(dbi.Key(), nil)
|
db.Delete(dbi.Key(), nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dbi.Release()
|
dbi.Release()
|
||||||
|
|
||||||
// Remove all items related to the given repo from the global bucket
|
// Remove all items related to the given folder from the global bucket
|
||||||
start = []byte{keyTypeGlobal}
|
start = []byte{keyTypeGlobal}
|
||||||
limit = []byte{keyTypeGlobal + 1}
|
limit = []byte{keyTypeGlobal + 1}
|
||||||
dbi = snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
dbi = snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||||
for dbi.Next() {
|
for dbi.Next() {
|
||||||
itemRepo := globalKeyRepo(dbi.Key())
|
itemFolder := globalKeyFolder(dbi.Key())
|
||||||
if bytes.Compare(repo, itemRepo) == 0 {
|
if bytes.Compare(folder, itemFolder) == 0 {
|
||||||
db.Delete(dbi.Key(), nil)
|
db.Delete(dbi.Key(), nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,17 +22,17 @@ fileVersion Structure:
|
|||||||
+ version (64 bits) +
|
+ version (64 bits) +
|
||||||
| |
|
| |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Length of node |
|
| Length of device |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ node (variable length) \
|
\ device (variable length) \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
|
||||||
|
|
||||||
struct fileVersion {
|
struct fileVersion {
|
||||||
unsigned hyper version;
|
unsigned hyper version;
|
||||||
opaque node<>;
|
opaque device<>;
|
||||||
}
|
}
|
||||||
|
|
||||||
*/
|
*/
|
||||||
@ -55,7 +55,7 @@ func (o fileVersion) AppendXDR(bs []byte) []byte {
|
|||||||
|
|
||||||
func (o fileVersion) encodeXDR(xw *xdr.Writer) (int, error) {
|
func (o fileVersion) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||||
xw.WriteUint64(o.version)
|
xw.WriteUint64(o.version)
|
||||||
xw.WriteBytes(o.node)
|
xw.WriteBytes(o.device)
|
||||||
return xw.Tot(), xw.Error()
|
return xw.Tot(), xw.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ func (o *fileVersion) UnmarshalXDR(bs []byte) error {
|
|||||||
|
|
||||||
func (o *fileVersion) decodeXDR(xr *xdr.Reader) error {
|
func (o *fileVersion) decodeXDR(xr *xdr.Reader) error {
|
||||||
o.version = xr.ReadUint64()
|
o.version = xr.ReadUint64()
|
||||||
o.node = xr.ReadBytes()
|
o.device = xr.ReadBytes()
|
||||||
return xr.Error()
|
return xr.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,147 +27,147 @@ type fileRecord struct {
|
|||||||
type bitset uint64
|
type bitset uint64
|
||||||
|
|
||||||
type Set struct {
|
type Set struct {
|
||||||
localVersion map[protocol.NodeID]uint64
|
localVersion map[protocol.DeviceID]uint64
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
repo string
|
folder string
|
||||||
db *leveldb.DB
|
db *leveldb.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSet(repo string, db *leveldb.DB) *Set {
|
func NewSet(folder string, db *leveldb.DB) *Set {
|
||||||
var s = Set{
|
var s = Set{
|
||||||
localVersion: make(map[protocol.NodeID]uint64),
|
localVersion: make(map[protocol.DeviceID]uint64),
|
||||||
repo: repo,
|
folder: folder,
|
||||||
db: db,
|
db: db,
|
||||||
}
|
}
|
||||||
|
|
||||||
var nodeID protocol.NodeID
|
var deviceID protocol.DeviceID
|
||||||
ldbWithAllRepoTruncated(db, []byte(repo), func(node []byte, f protocol.FileInfoTruncated) bool {
|
ldbWithAllFolderTruncated(db, []byte(folder), func(device []byte, f protocol.FileInfoTruncated) bool {
|
||||||
copy(nodeID[:], node)
|
copy(deviceID[:], device)
|
||||||
if f.LocalVersion > s.localVersion[nodeID] {
|
if f.LocalVersion > s.localVersion[deviceID] {
|
||||||
s.localVersion[nodeID] = f.LocalVersion
|
s.localVersion[deviceID] = f.LocalVersion
|
||||||
}
|
}
|
||||||
lamport.Default.Tick(f.Version)
|
lamport.Default.Tick(f.Version)
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("loaded localVersion for %q: %#v", repo, s.localVersion)
|
l.Debugf("loaded localVersion for %q: %#v", folder, s.localVersion)
|
||||||
}
|
}
|
||||||
clock(s.localVersion[protocol.LocalNodeID])
|
clock(s.localVersion[protocol.LocalDeviceID])
|
||||||
|
|
||||||
return &s
|
return &s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) Replace(node protocol.NodeID, fs []protocol.FileInfo) {
|
func (s *Set) Replace(device protocol.DeviceID, fs []protocol.FileInfo) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("%s Replace(%v, [%d])", s.repo, node, len(fs))
|
l.Debugf("%s Replace(%v, [%d])", s.folder, device, len(fs))
|
||||||
}
|
}
|
||||||
normalizeFilenames(fs)
|
normalizeFilenames(fs)
|
||||||
s.mutex.Lock()
|
s.mutex.Lock()
|
||||||
defer s.mutex.Unlock()
|
defer s.mutex.Unlock()
|
||||||
s.localVersion[node] = ldbReplace(s.db, []byte(s.repo), node[:], fs)
|
s.localVersion[device] = ldbReplace(s.db, []byte(s.folder), device[:], fs)
|
||||||
if len(fs) == 0 {
|
if len(fs) == 0 {
|
||||||
// Reset the local version if all files were removed.
|
// Reset the local version if all files were removed.
|
||||||
s.localVersion[node] = 0
|
s.localVersion[device] = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) ReplaceWithDelete(node protocol.NodeID, fs []protocol.FileInfo) {
|
func (s *Set) ReplaceWithDelete(device protocol.DeviceID, fs []protocol.FileInfo) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("%s ReplaceWithDelete(%v, [%d])", s.repo, node, len(fs))
|
l.Debugf("%s ReplaceWithDelete(%v, [%d])", s.folder, device, len(fs))
|
||||||
}
|
}
|
||||||
normalizeFilenames(fs)
|
normalizeFilenames(fs)
|
||||||
s.mutex.Lock()
|
s.mutex.Lock()
|
||||||
defer s.mutex.Unlock()
|
defer s.mutex.Unlock()
|
||||||
if lv := ldbReplaceWithDelete(s.db, []byte(s.repo), node[:], fs); lv > s.localVersion[node] {
|
if lv := ldbReplaceWithDelete(s.db, []byte(s.folder), device[:], fs); lv > s.localVersion[device] {
|
||||||
s.localVersion[node] = lv
|
s.localVersion[device] = lv
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) Update(node protocol.NodeID, fs []protocol.FileInfo) {
|
func (s *Set) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("%s Update(%v, [%d])", s.repo, node, len(fs))
|
l.Debugf("%s Update(%v, [%d])", s.folder, device, len(fs))
|
||||||
}
|
}
|
||||||
normalizeFilenames(fs)
|
normalizeFilenames(fs)
|
||||||
s.mutex.Lock()
|
s.mutex.Lock()
|
||||||
defer s.mutex.Unlock()
|
defer s.mutex.Unlock()
|
||||||
if lv := ldbUpdate(s.db, []byte(s.repo), node[:], fs); lv > s.localVersion[node] {
|
if lv := ldbUpdate(s.db, []byte(s.folder), device[:], fs); lv > s.localVersion[device] {
|
||||||
s.localVersion[node] = lv
|
s.localVersion[device] = lv
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) WithNeed(node protocol.NodeID, fn fileIterator) {
|
func (s *Set) WithNeed(device protocol.DeviceID, fn fileIterator) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("%s WithNeed(%v)", s.repo, node)
|
l.Debugf("%s WithNeed(%v)", s.folder, device)
|
||||||
}
|
}
|
||||||
ldbWithNeed(s.db, []byte(s.repo), node[:], false, nativeFileIterator(fn))
|
ldbWithNeed(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) WithNeedTruncated(node protocol.NodeID, fn fileIterator) {
|
func (s *Set) WithNeedTruncated(device protocol.DeviceID, fn fileIterator) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("%s WithNeedTruncated(%v)", s.repo, node)
|
l.Debugf("%s WithNeedTruncated(%v)", s.folder, device)
|
||||||
}
|
}
|
||||||
ldbWithNeed(s.db, []byte(s.repo), node[:], true, nativeFileIterator(fn))
|
ldbWithNeed(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) WithHave(node protocol.NodeID, fn fileIterator) {
|
func (s *Set) WithHave(device protocol.DeviceID, fn fileIterator) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("%s WithHave(%v)", s.repo, node)
|
l.Debugf("%s WithHave(%v)", s.folder, device)
|
||||||
}
|
}
|
||||||
ldbWithHave(s.db, []byte(s.repo), node[:], false, nativeFileIterator(fn))
|
ldbWithHave(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) WithHaveTruncated(node protocol.NodeID, fn fileIterator) {
|
func (s *Set) WithHaveTruncated(device protocol.DeviceID, fn fileIterator) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("%s WithHaveTruncated(%v)", s.repo, node)
|
l.Debugf("%s WithHaveTruncated(%v)", s.folder, device)
|
||||||
}
|
}
|
||||||
ldbWithHave(s.db, []byte(s.repo), node[:], true, nativeFileIterator(fn))
|
ldbWithHave(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) WithGlobal(fn fileIterator) {
|
func (s *Set) WithGlobal(fn fileIterator) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("%s WithGlobal()", s.repo)
|
l.Debugf("%s WithGlobal()", s.folder)
|
||||||
}
|
}
|
||||||
ldbWithGlobal(s.db, []byte(s.repo), false, nativeFileIterator(fn))
|
ldbWithGlobal(s.db, []byte(s.folder), false, nativeFileIterator(fn))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) WithGlobalTruncated(fn fileIterator) {
|
func (s *Set) WithGlobalTruncated(fn fileIterator) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("%s WithGlobalTruncated()", s.repo)
|
l.Debugf("%s WithGlobalTruncated()", s.folder)
|
||||||
}
|
}
|
||||||
ldbWithGlobal(s.db, []byte(s.repo), true, nativeFileIterator(fn))
|
ldbWithGlobal(s.db, []byte(s.folder), true, nativeFileIterator(fn))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) Get(node protocol.NodeID, file string) protocol.FileInfo {
|
func (s *Set) Get(device protocol.DeviceID, file string) protocol.FileInfo {
|
||||||
f := ldbGet(s.db, []byte(s.repo), node[:], []byte(normalizedFilename(file)))
|
f := ldbGet(s.db, []byte(s.folder), device[:], []byte(normalizedFilename(file)))
|
||||||
f.Name = nativeFilename(f.Name)
|
f.Name = nativeFilename(f.Name)
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) GetGlobal(file string) protocol.FileInfo {
|
func (s *Set) GetGlobal(file string) protocol.FileInfo {
|
||||||
f := ldbGetGlobal(s.db, []byte(s.repo), []byte(normalizedFilename(file)))
|
f := ldbGetGlobal(s.db, []byte(s.folder), []byte(normalizedFilename(file)))
|
||||||
f.Name = nativeFilename(f.Name)
|
f.Name = nativeFilename(f.Name)
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) Availability(file string) []protocol.NodeID {
|
func (s *Set) Availability(file string) []protocol.DeviceID {
|
||||||
return ldbAvailability(s.db, []byte(s.repo), []byte(normalizedFilename(file)))
|
return ldbAvailability(s.db, []byte(s.folder), []byte(normalizedFilename(file)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Set) LocalVersion(node protocol.NodeID) uint64 {
|
func (s *Set) LocalVersion(device protocol.DeviceID) uint64 {
|
||||||
s.mutex.Lock()
|
s.mutex.Lock()
|
||||||
defer s.mutex.Unlock()
|
defer s.mutex.Unlock()
|
||||||
return s.localVersion[node]
|
return s.localVersion[device]
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListRepos returns the repository IDs seen in the database.
|
// ListFolders returns the folder IDs seen in the database.
|
||||||
func ListRepos(db *leveldb.DB) []string {
|
func ListFolders(db *leveldb.DB) []string {
|
||||||
return ldbListRepos(db)
|
return ldbListFolders(db)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DropRepo clears out all information related to the given repo from the
|
// DropFolder clears out all information related to the given folder from the
|
||||||
// database.
|
// database.
|
||||||
func DropRepo(db *leveldb.DB, repo string) {
|
func DropFolder(db *leveldb.DB, folder string) {
|
||||||
ldbDropRepo(db, []byte(repo))
|
ldbDropFolder(db, []byte(folder))
|
||||||
}
|
}
|
||||||
|
|
||||||
func normalizeFilenames(fs []protocol.FileInfo) {
|
func normalizeFilenames(fs []protocol.FileInfo) {
|
||||||
|
@ -18,11 +18,11 @@ import (
|
|||||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
var remoteNode0, remoteNode1 protocol.NodeID
|
var remoteDevice0, remoteDevice1 protocol.DeviceID
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
remoteNode0, _ = protocol.NodeIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
|
remoteDevice0, _ = protocol.DeviceIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
|
||||||
remoteNode1, _ = protocol.NodeIDFromString("I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU")
|
remoteDevice1, _ = protocol.DeviceIDFromString("I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU")
|
||||||
}
|
}
|
||||||
|
|
||||||
func genBlocks(n int) []protocol.BlockInfo {
|
func genBlocks(n int) []protocol.BlockInfo {
|
||||||
@ -48,7 +48,7 @@ func globalList(s *files.Set) []protocol.FileInfo {
|
|||||||
return fs
|
return fs
|
||||||
}
|
}
|
||||||
|
|
||||||
func haveList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
|
func haveList(s *files.Set, n protocol.DeviceID) []protocol.FileInfo {
|
||||||
var fs []protocol.FileInfo
|
var fs []protocol.FileInfo
|
||||||
s.WithHave(n, func(fi protocol.FileIntf) bool {
|
s.WithHave(n, func(fi protocol.FileIntf) bool {
|
||||||
f := fi.(protocol.FileInfo)
|
f := fi.(protocol.FileInfo)
|
||||||
@ -58,7 +58,7 @@ func haveList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
|
|||||||
return fs
|
return fs
|
||||||
}
|
}
|
||||||
|
|
||||||
func needList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
|
func needList(s *files.Set, n protocol.DeviceID) []protocol.FileInfo {
|
||||||
var fs []protocol.FileInfo
|
var fs []protocol.FileInfo
|
||||||
s.WithNeed(n, func(fi protocol.FileIntf) bool {
|
s.WithNeed(n, func(fi protocol.FileIntf) bool {
|
||||||
f := fi.(protocol.FileInfo)
|
f := fi.(protocol.FileInfo)
|
||||||
@ -158,10 +158,10 @@ func TestGlobalSet(t *testing.T) {
|
|||||||
local0[3],
|
local0[3],
|
||||||
}
|
}
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local0)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local0)
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local1)
|
||||||
m.Replace(remoteNode0, remote0)
|
m.Replace(remoteDevice0, remote0)
|
||||||
m.Update(remoteNode0, remote1)
|
m.Update(remoteDevice0, remote1)
|
||||||
|
|
||||||
g := fileList(globalList(m))
|
g := fileList(globalList(m))
|
||||||
sort.Sort(g)
|
sort.Sort(g)
|
||||||
@ -170,40 +170,40 @@ func TestGlobalSet(t *testing.T) {
|
|||||||
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal)
|
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal)
|
||||||
}
|
}
|
||||||
|
|
||||||
h := fileList(haveList(m, protocol.LocalNodeID))
|
h := fileList(haveList(m, protocol.LocalDeviceID))
|
||||||
sort.Sort(h)
|
sort.Sort(h)
|
||||||
|
|
||||||
if fmt.Sprint(h) != fmt.Sprint(localTot) {
|
if fmt.Sprint(h) != fmt.Sprint(localTot) {
|
||||||
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, localTot)
|
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, localTot)
|
||||||
}
|
}
|
||||||
|
|
||||||
h = fileList(haveList(m, remoteNode0))
|
h = fileList(haveList(m, remoteDevice0))
|
||||||
sort.Sort(h)
|
sort.Sort(h)
|
||||||
|
|
||||||
if fmt.Sprint(h) != fmt.Sprint(remoteTot) {
|
if fmt.Sprint(h) != fmt.Sprint(remoteTot) {
|
||||||
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, remoteTot)
|
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, remoteTot)
|
||||||
}
|
}
|
||||||
|
|
||||||
n := fileList(needList(m, protocol.LocalNodeID))
|
n := fileList(needList(m, protocol.LocalDeviceID))
|
||||||
sort.Sort(n)
|
sort.Sort(n)
|
||||||
|
|
||||||
if fmt.Sprint(n) != fmt.Sprint(expectedLocalNeed) {
|
if fmt.Sprint(n) != fmt.Sprint(expectedLocalNeed) {
|
||||||
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedLocalNeed)
|
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedLocalNeed)
|
||||||
}
|
}
|
||||||
|
|
||||||
n = fileList(needList(m, remoteNode0))
|
n = fileList(needList(m, remoteDevice0))
|
||||||
sort.Sort(n)
|
sort.Sort(n)
|
||||||
|
|
||||||
if fmt.Sprint(n) != fmt.Sprint(expectedRemoteNeed) {
|
if fmt.Sprint(n) != fmt.Sprint(expectedRemoteNeed) {
|
||||||
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedRemoteNeed)
|
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedRemoteNeed)
|
||||||
}
|
}
|
||||||
|
|
||||||
f := m.Get(protocol.LocalNodeID, "b")
|
f := m.Get(protocol.LocalDeviceID, "b")
|
||||||
if fmt.Sprint(f) != fmt.Sprint(localTot[1]) {
|
if fmt.Sprint(f) != fmt.Sprint(localTot[1]) {
|
||||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, localTot[1])
|
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, localTot[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
f = m.Get(remoteNode0, "b")
|
f = m.Get(remoteDevice0, "b")
|
||||||
if fmt.Sprint(f) != fmt.Sprint(remote1[0]) {
|
if fmt.Sprint(f) != fmt.Sprint(remote1[0]) {
|
||||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
|
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
|
||||||
}
|
}
|
||||||
@ -213,7 +213,7 @@ func TestGlobalSet(t *testing.T) {
|
|||||||
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, remote1[0])
|
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, remote1[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
f = m.Get(protocol.LocalNodeID, "zz")
|
f = m.Get(protocol.LocalDeviceID, "zz")
|
||||||
if f.Name != "" {
|
if f.Name != "" {
|
||||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
|
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
|
||||||
}
|
}
|
||||||
@ -223,18 +223,18 @@ func TestGlobalSet(t *testing.T) {
|
|||||||
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
|
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
|
||||||
}
|
}
|
||||||
|
|
||||||
av := []protocol.NodeID{protocol.LocalNodeID, remoteNode0}
|
av := []protocol.DeviceID{protocol.LocalDeviceID, remoteDevice0}
|
||||||
a := m.Availability("a")
|
a := m.Availability("a")
|
||||||
if !(len(a) == 2 && (a[0] == av[0] && a[1] == av[1] || a[0] == av[1] && a[1] == av[0])) {
|
if !(len(a) == 2 && (a[0] == av[0] && a[1] == av[1] || a[0] == av[1] && a[1] == av[0])) {
|
||||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
|
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
|
||||||
}
|
}
|
||||||
a = m.Availability("b")
|
a = m.Availability("b")
|
||||||
if len(a) != 1 || a[0] != remoteNode0 {
|
if len(a) != 1 || a[0] != remoteDevice0 {
|
||||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteNode0)
|
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteDevice0)
|
||||||
}
|
}
|
||||||
a = m.Availability("d")
|
a = m.Availability("d")
|
||||||
if len(a) != 1 || a[0] != protocol.LocalNodeID {
|
if len(a) != 1 || a[0] != protocol.LocalDeviceID {
|
||||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, protocol.LocalNodeID)
|
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, protocol.LocalDeviceID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -268,11 +268,11 @@ func TestNeedWithInvalid(t *testing.T) {
|
|||||||
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
|
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
|
||||||
}
|
}
|
||||||
|
|
||||||
s.ReplaceWithDelete(protocol.LocalNodeID, localHave)
|
s.ReplaceWithDelete(protocol.LocalDeviceID, localHave)
|
||||||
s.Replace(remoteNode0, remote0Have)
|
s.Replace(remoteDevice0, remote0Have)
|
||||||
s.Replace(remoteNode1, remote1Have)
|
s.Replace(remoteDevice1, remote1Have)
|
||||||
|
|
||||||
need := fileList(needList(s, protocol.LocalNodeID))
|
need := fileList(needList(s, protocol.LocalDeviceID))
|
||||||
sort.Sort(need)
|
sort.Sort(need)
|
||||||
|
|
||||||
if fmt.Sprint(need) != fmt.Sprint(expectedNeed) {
|
if fmt.Sprint(need) != fmt.Sprint(expectedNeed) {
|
||||||
@ -297,9 +297,9 @@ func TestUpdateToInvalid(t *testing.T) {
|
|||||||
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
|
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
|
||||||
}
|
}
|
||||||
|
|
||||||
s.ReplaceWithDelete(protocol.LocalNodeID, localHave)
|
s.ReplaceWithDelete(protocol.LocalDeviceID, localHave)
|
||||||
|
|
||||||
have := fileList(haveList(s, protocol.LocalNodeID))
|
have := fileList(haveList(s, protocol.LocalDeviceID))
|
||||||
sort.Sort(have)
|
sort.Sort(have)
|
||||||
|
|
||||||
if fmt.Sprint(have) != fmt.Sprint(localHave) {
|
if fmt.Sprint(have) != fmt.Sprint(localHave) {
|
||||||
@ -307,9 +307,9 @@ func TestUpdateToInvalid(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
localHave[1] = protocol.FileInfo{Name: "b", Version: 1001, Flags: protocol.FlagInvalid}
|
localHave[1] = protocol.FileInfo{Name: "b", Version: 1001, Flags: protocol.FlagInvalid}
|
||||||
s.Update(protocol.LocalNodeID, localHave[1:2])
|
s.Update(protocol.LocalDeviceID, localHave[1:2])
|
||||||
|
|
||||||
have = fileList(haveList(s, protocol.LocalNodeID))
|
have = fileList(haveList(s, protocol.LocalDeviceID))
|
||||||
sort.Sort(have)
|
sort.Sort(have)
|
||||||
|
|
||||||
if fmt.Sprint(have) != fmt.Sprint(localHave) {
|
if fmt.Sprint(have) != fmt.Sprint(localHave) {
|
||||||
@ -340,18 +340,18 @@ func TestInvalidAvailability(t *testing.T) {
|
|||||||
protocol.FileInfo{Name: "none", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
protocol.FileInfo{Name: "none", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Replace(remoteNode0, remote0Have)
|
s.Replace(remoteDevice0, remote0Have)
|
||||||
s.Replace(remoteNode1, remote1Have)
|
s.Replace(remoteDevice1, remote1Have)
|
||||||
|
|
||||||
if av := s.Availability("both"); len(av) != 2 {
|
if av := s.Availability("both"); len(av) != 2 {
|
||||||
t.Error("Incorrect availability for 'both':", av)
|
t.Error("Incorrect availability for 'both':", av)
|
||||||
}
|
}
|
||||||
|
|
||||||
if av := s.Availability("r0only"); len(av) != 1 || av[0] != remoteNode0 {
|
if av := s.Availability("r0only"); len(av) != 1 || av[0] != remoteDevice0 {
|
||||||
t.Error("Incorrect availability for 'r0only':", av)
|
t.Error("Incorrect availability for 'r0only':", av)
|
||||||
}
|
}
|
||||||
|
|
||||||
if av := s.Availability("r1only"); len(av) != 1 || av[0] != remoteNode1 {
|
if av := s.Availability("r1only"); len(av) != 1 || av[0] != remoteDevice1 {
|
||||||
t.Error("Incorrect availability for 'r1only':", av)
|
t.Error("Incorrect availability for 'r1only':", av)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -376,22 +376,22 @@ func TestLocalDeleted(t *testing.T) {
|
|||||||
protocol.FileInfo{Name: "z", Version: 1000, Flags: protocol.FlagDirectory},
|
protocol.FileInfo{Name: "z", Version: 1000, Flags: protocol.FlagDirectory},
|
||||||
}
|
}
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local1)
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, []protocol.FileInfo{
|
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||||
local1[0],
|
local1[0],
|
||||||
// [1] removed
|
// [1] removed
|
||||||
local1[2],
|
local1[2],
|
||||||
local1[3],
|
local1[3],
|
||||||
local1[4],
|
local1[4],
|
||||||
})
|
})
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, []protocol.FileInfo{
|
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||||
local1[0],
|
local1[0],
|
||||||
local1[2],
|
local1[2],
|
||||||
// [3] removed
|
// [3] removed
|
||||||
local1[4],
|
local1[4],
|
||||||
})
|
})
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, []protocol.FileInfo{
|
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||||
local1[0],
|
local1[0],
|
||||||
local1[2],
|
local1[2],
|
||||||
// [4] removed
|
// [4] removed
|
||||||
@ -413,7 +413,7 @@ func TestLocalDeleted(t *testing.T) {
|
|||||||
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal1)
|
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal1)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, []protocol.FileInfo{
|
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||||
local1[0],
|
local1[0],
|
||||||
// [2] removed
|
// [2] removed
|
||||||
})
|
})
|
||||||
@ -449,7 +449,7 @@ func Benchmark10kReplace(b *testing.B) {
|
|||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
m := files.NewSet("test", db)
|
m := files.NewSet("test", db)
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -465,14 +465,14 @@ func Benchmark10kUpdateChg(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
m := files.NewSet("test", db)
|
m := files.NewSet("test", db)
|
||||||
m.Replace(remoteNode0, remote)
|
m.Replace(remoteDevice0, remote)
|
||||||
|
|
||||||
var local []protocol.FileInfo
|
var local []protocol.FileInfo
|
||||||
for i := 0; i < 10000; i++ {
|
for i := 0; i < 10000; i++ {
|
||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||||
}
|
}
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
@ -481,7 +481,7 @@ func Benchmark10kUpdateChg(b *testing.B) {
|
|||||||
local[j].Version++
|
local[j].Version++
|
||||||
}
|
}
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
m.Update(protocol.LocalNodeID, local)
|
m.Update(protocol.LocalDeviceID, local)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -496,18 +496,18 @@ func Benchmark10kUpdateSme(b *testing.B) {
|
|||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
m := files.NewSet("test", db)
|
m := files.NewSet("test", db)
|
||||||
m.Replace(remoteNode0, remote)
|
m.Replace(remoteDevice0, remote)
|
||||||
|
|
||||||
var local []protocol.FileInfo
|
var local []protocol.FileInfo
|
||||||
for i := 0; i < 10000; i++ {
|
for i := 0; i < 10000; i++ {
|
||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||||
}
|
}
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
m.Update(protocol.LocalNodeID, local)
|
m.Update(protocol.LocalDeviceID, local)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -523,7 +523,7 @@ func Benchmark10kNeed2k(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
m := files.NewSet("test", db)
|
m := files.NewSet("test", db)
|
||||||
m.Replace(remoteNode0, remote)
|
m.Replace(remoteDevice0, remote)
|
||||||
|
|
||||||
var local []protocol.FileInfo
|
var local []protocol.FileInfo
|
||||||
for i := 0; i < 8000; i++ {
|
for i := 0; i < 8000; i++ {
|
||||||
@ -533,11 +533,11 @@ func Benchmark10kNeed2k(b *testing.B) {
|
|||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
|
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
|
||||||
}
|
}
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
fs := needList(m, protocol.LocalNodeID)
|
fs := needList(m, protocol.LocalDeviceID)
|
||||||
if l := len(fs); l != 2000 {
|
if l := len(fs); l != 2000 {
|
||||||
b.Errorf("wrong length %d != 2k", l)
|
b.Errorf("wrong length %d != 2k", l)
|
||||||
}
|
}
|
||||||
@ -556,7 +556,7 @@ func Benchmark10kHaveFullList(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
m := files.NewSet("test", db)
|
m := files.NewSet("test", db)
|
||||||
m.Replace(remoteNode0, remote)
|
m.Replace(remoteDevice0, remote)
|
||||||
|
|
||||||
var local []protocol.FileInfo
|
var local []protocol.FileInfo
|
||||||
for i := 0; i < 2000; i++ {
|
for i := 0; i < 2000; i++ {
|
||||||
@ -566,11 +566,11 @@ func Benchmark10kHaveFullList(b *testing.B) {
|
|||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
|
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
|
||||||
}
|
}
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
fs := haveList(m, protocol.LocalNodeID)
|
fs := haveList(m, protocol.LocalDeviceID)
|
||||||
if l := len(fs); l != 10000 {
|
if l := len(fs); l != 10000 {
|
||||||
b.Errorf("wrong length %d != 10k", l)
|
b.Errorf("wrong length %d != 10k", l)
|
||||||
}
|
}
|
||||||
@ -589,7 +589,7 @@ func Benchmark10kGlobal(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
m := files.NewSet("test", db)
|
m := files.NewSet("test", db)
|
||||||
m.Replace(remoteNode0, remote)
|
m.Replace(remoteDevice0, remote)
|
||||||
|
|
||||||
var local []protocol.FileInfo
|
var local []protocol.FileInfo
|
||||||
for i := 0; i < 2000; i++ {
|
for i := 0; i < 2000; i++ {
|
||||||
@ -599,7 +599,7 @@ func Benchmark10kGlobal(b *testing.B) {
|
|||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
|
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
|
||||||
}
|
}
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
@ -632,7 +632,7 @@ func TestGlobalReset(t *testing.T) {
|
|||||||
protocol.FileInfo{Name: "e", Version: 1000},
|
protocol.FileInfo{Name: "e", Version: 1000},
|
||||||
}
|
}
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||||
g := globalList(m)
|
g := globalList(m)
|
||||||
sort.Sort(fileList(g))
|
sort.Sort(fileList(g))
|
||||||
|
|
||||||
@ -640,8 +640,8 @@ func TestGlobalReset(t *testing.T) {
|
|||||||
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
|
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Replace(remoteNode0, remote)
|
m.Replace(remoteDevice0, remote)
|
||||||
m.Replace(remoteNode0, nil)
|
m.Replace(remoteDevice0, nil)
|
||||||
|
|
||||||
g = globalList(m)
|
g = globalList(m)
|
||||||
sort.Sort(fileList(g))
|
sort.Sort(fileList(g))
|
||||||
@ -679,10 +679,10 @@ func TestNeed(t *testing.T) {
|
|||||||
protocol.FileInfo{Name: "e", Version: 1000},
|
protocol.FileInfo{Name: "e", Version: 1000},
|
||||||
}
|
}
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||||
m.Replace(remoteNode0, remote)
|
m.Replace(remoteDevice0, remote)
|
||||||
|
|
||||||
need := needList(m, protocol.LocalNodeID)
|
need := needList(m, protocol.LocalDeviceID)
|
||||||
|
|
||||||
sort.Sort(fileList(need))
|
sort.Sort(fileList(need))
|
||||||
sort.Sort(fileList(shouldNeed))
|
sort.Sort(fileList(shouldNeed))
|
||||||
@ -715,23 +715,23 @@ func TestLocalVersion(t *testing.T) {
|
|||||||
protocol.FileInfo{Name: "e", Version: 1000},
|
protocol.FileInfo{Name: "e", Version: 1000},
|
||||||
}
|
}
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local1)
|
||||||
c0 := m.LocalVersion(protocol.LocalNodeID)
|
c0 := m.LocalVersion(protocol.LocalDeviceID)
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local2)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local2)
|
||||||
c1 := m.LocalVersion(protocol.LocalNodeID)
|
c1 := m.LocalVersion(protocol.LocalDeviceID)
|
||||||
if !(c1 > c0) {
|
if !(c1 > c0) {
|
||||||
t.Fatal("Local version number should have incremented")
|
t.Fatal("Local version number should have incremented")
|
||||||
}
|
}
|
||||||
|
|
||||||
m.ReplaceWithDelete(protocol.LocalNodeID, local2)
|
m.ReplaceWithDelete(protocol.LocalDeviceID, local2)
|
||||||
c2 := m.LocalVersion(protocol.LocalNodeID)
|
c2 := m.LocalVersion(protocol.LocalDeviceID)
|
||||||
if c2 != c1 {
|
if c2 != c1 {
|
||||||
t.Fatal("Local version number should be unchanged")
|
t.Fatal("Local version number should be unchanged")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestListDropRepo(t *testing.T) {
|
func TestListDropFolder(t *testing.T) {
|
||||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -743,7 +743,7 @@ func TestListDropRepo(t *testing.T) {
|
|||||||
protocol.FileInfo{Name: "b", Version: 1000},
|
protocol.FileInfo{Name: "b", Version: 1000},
|
||||||
protocol.FileInfo{Name: "c", Version: 1000},
|
protocol.FileInfo{Name: "c", Version: 1000},
|
||||||
}
|
}
|
||||||
s0.Replace(protocol.LocalNodeID, local1)
|
s0.Replace(protocol.LocalDeviceID, local1)
|
||||||
|
|
||||||
s1 := files.NewSet("test1", db)
|
s1 := files.NewSet("test1", db)
|
||||||
local2 := []protocol.FileInfo{
|
local2 := []protocol.FileInfo{
|
||||||
@ -751,13 +751,13 @@ func TestListDropRepo(t *testing.T) {
|
|||||||
protocol.FileInfo{Name: "e", Version: 1002},
|
protocol.FileInfo{Name: "e", Version: 1002},
|
||||||
protocol.FileInfo{Name: "f", Version: 1002},
|
protocol.FileInfo{Name: "f", Version: 1002},
|
||||||
}
|
}
|
||||||
s1.Replace(remoteNode0, local2)
|
s1.Replace(remoteDevice0, local2)
|
||||||
|
|
||||||
// Check that we have both repos and their data is in the global list
|
// Check that we have both folders and their data is in the global list
|
||||||
|
|
||||||
expectedRepoList := []string{"test0", "test1"}
|
expectedFolderList := []string{"test0", "test1"}
|
||||||
if actualRepoList := files.ListRepos(db); !reflect.DeepEqual(actualRepoList, expectedRepoList) {
|
if actualFolderList := files.ListFolders(db); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
|
||||||
t.Fatalf("RepoList mismatch\nE: %v\nA: %v", expectedRepoList, actualRepoList)
|
t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
|
||||||
}
|
}
|
||||||
if l := len(globalList(s0)); l != 3 {
|
if l := len(globalList(s0)); l != 3 {
|
||||||
t.Errorf("Incorrect global length %d != 3 for s0", l)
|
t.Errorf("Incorrect global length %d != 3 for s0", l)
|
||||||
@ -768,11 +768,11 @@ func TestListDropRepo(t *testing.T) {
|
|||||||
|
|
||||||
// Drop one of them and check that it's gone.
|
// Drop one of them and check that it's gone.
|
||||||
|
|
||||||
files.DropRepo(db, "test1")
|
files.DropFolder(db, "test1")
|
||||||
|
|
||||||
expectedRepoList = []string{"test0"}
|
expectedFolderList = []string{"test0"}
|
||||||
if actualRepoList := files.ListRepos(db); !reflect.DeepEqual(actualRepoList, expectedRepoList) {
|
if actualFolderList := files.ListFolders(db); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
|
||||||
t.Fatalf("RepoList mismatch\nE: %v\nA: %v", expectedRepoList, actualRepoList)
|
t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
|
||||||
}
|
}
|
||||||
if l := len(globalList(s0)); l != 3 {
|
if l := len(globalList(s0)); l != 3 {
|
||||||
t.Errorf("Incorrect global length %d != 3 for s0", l)
|
t.Errorf("Incorrect global length %d != 3 for s0", l)
|
||||||
@ -795,14 +795,14 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
|
|||||||
protocol.FileInfo{Name: "b", Version: 1002, Flags: protocol.FlagInvalid},
|
protocol.FileInfo{Name: "b", Version: 1002, Flags: protocol.FlagInvalid},
|
||||||
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(4)},
|
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(4)},
|
||||||
}
|
}
|
||||||
s.Replace(remoteNode0, rem0)
|
s.Replace(remoteDevice0, rem0)
|
||||||
|
|
||||||
rem1 := fileList{
|
rem1 := fileList{
|
||||||
protocol.FileInfo{Name: "a", Version: 1002, Blocks: genBlocks(4)},
|
protocol.FileInfo{Name: "a", Version: 1002, Blocks: genBlocks(4)},
|
||||||
protocol.FileInfo{Name: "b", Version: 1002, Blocks: genBlocks(4)},
|
protocol.FileInfo{Name: "b", Version: 1002, Blocks: genBlocks(4)},
|
||||||
protocol.FileInfo{Name: "c", Version: 1002, Flags: protocol.FlagInvalid},
|
protocol.FileInfo{Name: "c", Version: 1002, Flags: protocol.FlagInvalid},
|
||||||
}
|
}
|
||||||
s.Replace(remoteNode1, rem1)
|
s.Replace(remoteDevice1, rem1)
|
||||||
|
|
||||||
total := fileList{
|
total := fileList{
|
||||||
// There's a valid copy of each file, so it should be merged
|
// There's a valid copy of each file, so it should be merged
|
||||||
@ -811,7 +811,7 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
|
|||||||
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(4)},
|
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(4)},
|
||||||
}
|
}
|
||||||
|
|
||||||
need := fileList(needList(s, protocol.LocalNodeID))
|
need := fileList(needList(s, protocol.LocalDeviceID))
|
||||||
if fmt.Sprint(need) != fmt.Sprint(total) {
|
if fmt.Sprint(need) != fmt.Sprint(total) {
|
||||||
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", need, total)
|
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", need, total)
|
||||||
}
|
}
|
||||||
@ -840,7 +840,7 @@ func TestLongPath(t *testing.T) {
|
|||||||
protocol.FileInfo{Name: string(name), Version: 1000},
|
protocol.FileInfo{Name: string(name), Version: 1000},
|
||||||
}
|
}
|
||||||
|
|
||||||
s.ReplaceWithDelete(protocol.LocalNodeID, local)
|
s.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||||
|
|
||||||
gf := globalList(s)
|
gf := globalList(s)
|
||||||
if l := len(gf); l != 1 {
|
if l := len(gf); l != 1 {
|
||||||
@ -877,8 +877,8 @@ func TestStressGlobalVersion(t *testing.T) {
|
|||||||
m := files.NewSet("test", db)
|
m := files.NewSet("test", db)
|
||||||
|
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
go stressWriter(m, remoteNode0, set1, nil, done)
|
go stressWriter(m, remoteDevice0, set1, nil, done)
|
||||||
go stressWriter(m, protocol.LocalNodeID, set2, nil, done)
|
go stressWriter(m, protocol.LocalDeviceID, set2, nil, done)
|
||||||
|
|
||||||
t0 := time.Now()
|
t0 := time.Now()
|
||||||
for time.Since(t0) < dur {
|
for time.Since(t0) < dur {
|
||||||
@ -891,7 +891,7 @@ func TestStressGlobalVersion(t *testing.T) {
|
|||||||
close(done)
|
close(done)
|
||||||
}
|
}
|
||||||
|
|
||||||
func stressWriter(s *files.Set, id protocol.NodeID, set1, set2 []protocol.FileInfo, done chan struct{}) {
|
func stressWriter(s *files.Set, id protocol.DeviceID, set1, set2 []protocol.FileInfo, done chan struct{}) {
|
||||||
one := true
|
one := true
|
||||||
i := 0
|
i := 0
|
||||||
for {
|
for {
|
||||||
|
51
internal/model/deviceactivity.go
Normal file
51
internal/model/deviceactivity.go
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||||
|
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/internal/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
// deviceActivity tracks the number of outstanding requests per device and can
|
||||||
|
// answer which device is least busy. It is safe for use from multiple
|
||||||
|
// goroutines.
|
||||||
|
type deviceActivity struct {
|
||||||
|
act map[protocol.DeviceID]int
|
||||||
|
mut sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDeviceActivity() *deviceActivity {
|
||||||
|
return &deviceActivity{
|
||||||
|
act: make(map[protocol.DeviceID]int),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m deviceActivity) leastBusy(availability []protocol.DeviceID) protocol.DeviceID {
|
||||||
|
m.mut.Lock()
|
||||||
|
var low int = 2<<30 - 1
|
||||||
|
var selected protocol.DeviceID
|
||||||
|
for _, device := range availability {
|
||||||
|
if usage := m.act[device]; usage < low {
|
||||||
|
low = usage
|
||||||
|
selected = device
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.mut.Unlock()
|
||||||
|
return selected
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m deviceActivity) using(device protocol.DeviceID) {
|
||||||
|
m.mut.Lock()
|
||||||
|
defer m.mut.Unlock()
|
||||||
|
m.act[device]++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m deviceActivity) done(device protocol.DeviceID) {
|
||||||
|
m.mut.Lock()
|
||||||
|
defer m.mut.Unlock()
|
||||||
|
m.act[device]--
|
||||||
|
}
|
56
internal/model/deviceactivity_test.go
Normal file
56
internal/model/deviceactivity_test.go
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||||
|
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/internal/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDeviceActivity(t *testing.T) {
|
||||||
|
n0 := protocol.DeviceID{1, 2, 3, 4}
|
||||||
|
n1 := protocol.DeviceID{5, 6, 7, 8}
|
||||||
|
n2 := protocol.DeviceID{9, 10, 11, 12}
|
||||||
|
devices := []protocol.DeviceID{n0, n1, n2}
|
||||||
|
na := newDeviceActivity()
|
||||||
|
|
||||||
|
if lb := na.leastBusy(devices); lb != n0 {
|
||||||
|
t.Errorf("Least busy device should be n0 (%v) not %v", n0, lb)
|
||||||
|
}
|
||||||
|
if lb := na.leastBusy(devices); lb != n0 {
|
||||||
|
t.Errorf("Least busy device should still be n0 (%v) not %v", n0, lb)
|
||||||
|
}
|
||||||
|
|
||||||
|
na.using(na.leastBusy(devices))
|
||||||
|
if lb := na.leastBusy(devices); lb != n1 {
|
||||||
|
t.Errorf("Least busy device should be n1 (%v) not %v", n1, lb)
|
||||||
|
}
|
||||||
|
|
||||||
|
na.using(na.leastBusy(devices))
|
||||||
|
if lb := na.leastBusy(devices); lb != n2 {
|
||||||
|
t.Errorf("Least busy device should be n2 (%v) not %v", n2, lb)
|
||||||
|
}
|
||||||
|
|
||||||
|
na.using(na.leastBusy(devices))
|
||||||
|
if lb := na.leastBusy(devices); lb != n0 {
|
||||||
|
t.Errorf("Least busy device should be n0 (%v) not %v", n0, lb)
|
||||||
|
}
|
||||||
|
|
||||||
|
na.done(n1)
|
||||||
|
if lb := na.leastBusy(devices); lb != n1 {
|
||||||
|
t.Errorf("Least busy device should be n1 (%v) not %v", n1, lb)
|
||||||
|
}
|
||||||
|
|
||||||
|
na.done(n2)
|
||||||
|
if lb := na.leastBusy(devices); lb != n1 {
|
||||||
|
t.Errorf("Least busy device should still be n1 (%v) not %v", n1, lb)
|
||||||
|
}
|
||||||
|
|
||||||
|
na.done(n0)
|
||||||
|
if lb := na.leastBusy(devices); lb != n0 {
|
||||||
|
t.Errorf("Least busy device should be n0 (%v) not %v", n0, lb)
|
||||||
|
}
|
||||||
|
}
|
@ -2,5 +2,5 @@
|
|||||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// Package model implements repository abstraction and file pulling mechanisms
|
// Package model implements folder abstraction and file pulling mechanisms
|
||||||
package model
|
package model
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -17,11 +17,11 @@ import (
|
|||||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
var node1, node2 protocol.NodeID
|
var device1, device2 protocol.DeviceID
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
node1, _ = protocol.NodeIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
|
device1, _ = protocol.DeviceIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
|
||||||
node2, _ = protocol.NodeIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
|
device2, _ = protocol.DeviceIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
|
||||||
}
|
}
|
||||||
|
|
||||||
var testDataExpected = map[string]protocol.FileInfo{
|
var testDataExpected = map[string]protocol.FileInfo{
|
||||||
@ -57,11 +57,11 @@ func init() {
|
|||||||
|
|
||||||
func TestRequest(t *testing.T) {
|
func TestRequest(t *testing.T) {
|
||||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
m := NewModel("/tmp", &config.Configuration{}, "node", "syncthing", "dev", db)
|
m := NewModel("/tmp", &config.Configuration{}, "device", "syncthing", "dev", db)
|
||||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
|
||||||
m.ScanRepo("default")
|
m.ScanFolder("default")
|
||||||
|
|
||||||
bs, err := m.Request(node1, "default", "foo", 0, 6)
|
bs, err := m.Request(device1, "default", "foo", 0, 6)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -69,7 +69,7 @@ func TestRequest(t *testing.T) {
|
|||||||
t.Errorf("Incorrect data from request: %q", string(bs))
|
t.Errorf("Incorrect data from request: %q", string(bs))
|
||||||
}
|
}
|
||||||
|
|
||||||
bs, err = m.Request(node1, "default", "../walk.go", 0, 6)
|
bs, err = m.Request(device1, "default", "../walk.go", 0, 6)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Error("Unexpected nil error on insecure file read")
|
t.Error("Unexpected nil error on insecure file read")
|
||||||
}
|
}
|
||||||
@ -94,76 +94,76 @@ func genFiles(n int) []protocol.FileInfo {
|
|||||||
|
|
||||||
func BenchmarkIndex10000(b *testing.B) {
|
func BenchmarkIndex10000(b *testing.B) {
|
||||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
|
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
|
||||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
|
||||||
m.ScanRepo("default")
|
m.ScanFolder("default")
|
||||||
files := genFiles(10000)
|
files := genFiles(10000)
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
m.Index(node1, "default", files)
|
m.Index(device1, "default", files)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkIndex00100(b *testing.B) {
|
func BenchmarkIndex00100(b *testing.B) {
|
||||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
|
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
|
||||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
|
||||||
m.ScanRepo("default")
|
m.ScanFolder("default")
|
||||||
files := genFiles(100)
|
files := genFiles(100)
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
m.Index(node1, "default", files)
|
m.Index(device1, "default", files)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkIndexUpdate10000f10000(b *testing.B) {
|
func BenchmarkIndexUpdate10000f10000(b *testing.B) {
|
||||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
|
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
|
||||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
|
||||||
m.ScanRepo("default")
|
m.ScanFolder("default")
|
||||||
files := genFiles(10000)
|
files := genFiles(10000)
|
||||||
m.Index(node1, "default", files)
|
m.Index(device1, "default", files)
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
m.IndexUpdate(node1, "default", files)
|
m.IndexUpdate(device1, "default", files)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkIndexUpdate10000f00100(b *testing.B) {
|
func BenchmarkIndexUpdate10000f00100(b *testing.B) {
|
||||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
|
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
|
||||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
|
||||||
m.ScanRepo("default")
|
m.ScanFolder("default")
|
||||||
files := genFiles(10000)
|
files := genFiles(10000)
|
||||||
m.Index(node1, "default", files)
|
m.Index(device1, "default", files)
|
||||||
|
|
||||||
ufiles := genFiles(100)
|
ufiles := genFiles(100)
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
m.IndexUpdate(node1, "default", ufiles)
|
m.IndexUpdate(device1, "default", ufiles)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkIndexUpdate10000f00001(b *testing.B) {
|
func BenchmarkIndexUpdate10000f00001(b *testing.B) {
|
||||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
|
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
|
||||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
|
||||||
m.ScanRepo("default")
|
m.ScanFolder("default")
|
||||||
files := genFiles(10000)
|
files := genFiles(10000)
|
||||||
m.Index(node1, "default", files)
|
m.Index(device1, "default", files)
|
||||||
|
|
||||||
ufiles := genFiles(1)
|
ufiles := genFiles(1)
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
m.IndexUpdate(node1, "default", ufiles)
|
m.IndexUpdate(device1, "default", ufiles)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type FakeConnection struct {
|
type FakeConnection struct {
|
||||||
id protocol.NodeID
|
id protocol.DeviceID
|
||||||
requestData []byte
|
requestData []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -171,7 +171,7 @@ func (FakeConnection) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f FakeConnection) ID() protocol.NodeID {
|
func (f FakeConnection) ID() protocol.DeviceID {
|
||||||
return f.id
|
return f.id
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,7 +191,7 @@ func (FakeConnection) IndexUpdate(string, []protocol.FileInfo) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f FakeConnection) Request(repo, name string, offset int64, size int) ([]byte, error) {
|
func (f FakeConnection) Request(folder, name string, offset int64, size int) ([]byte, error) {
|
||||||
return f.requestData, nil
|
return f.requestData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -207,9 +207,9 @@ func (FakeConnection) Statistics() protocol.Statistics {
|
|||||||
|
|
||||||
func BenchmarkRequest(b *testing.B) {
|
func BenchmarkRequest(b *testing.B) {
|
||||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
|
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
|
||||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
|
||||||
m.ScanRepo("default")
|
m.ScanFolder("default")
|
||||||
|
|
||||||
const n = 1000
|
const n = 1000
|
||||||
files := make([]protocol.FileInfo, n)
|
files := make([]protocol.FileInfo, n)
|
||||||
@ -223,15 +223,15 @@ func BenchmarkRequest(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fc := FakeConnection{
|
fc := FakeConnection{
|
||||||
id: node1,
|
id: device1,
|
||||||
requestData: []byte("some data to return"),
|
requestData: []byte("some data to return"),
|
||||||
}
|
}
|
||||||
m.AddConnection(fc, fc)
|
m.AddConnection(fc, fc)
|
||||||
m.Index(node1, "default", files)
|
m.Index(device1, "default", files)
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
data, err := m.requestGlobal(node1, "default", files[i%n].Name, 0, 32, nil)
|
data, err := m.requestGlobal(device1, "default", files[i%n].Name, 0, 32, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Error(err)
|
b.Error(err)
|
||||||
}
|
}
|
||||||
@ -241,28 +241,28 @@ func BenchmarkRequest(b *testing.B) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNodeRename(t *testing.T) {
|
func TestDeviceRename(t *testing.T) {
|
||||||
ccm := protocol.ClusterConfigMessage{
|
ccm := protocol.ClusterConfigMessage{
|
||||||
ClientName: "syncthing",
|
ClientName: "syncthing",
|
||||||
ClientVersion: "v0.9.4",
|
ClientVersion: "v0.9.4",
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := config.New("/tmp/test", node1)
|
cfg := config.New("/tmp/test", device1)
|
||||||
cfg.Nodes = []config.NodeConfiguration{
|
cfg.Devices = []config.DeviceConfiguration{
|
||||||
{
|
{
|
||||||
NodeID: node1,
|
DeviceID: device1,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
m := NewModel("/tmp", &cfg, "node", "syncthing", "dev", db)
|
m := NewModel("/tmp", &cfg, "device", "syncthing", "dev", db)
|
||||||
if cfg.Nodes[0].Name != "" {
|
if cfg.Devices[0].Name != "" {
|
||||||
t.Errorf("Node already has a name")
|
t.Errorf("Device already has a name")
|
||||||
}
|
}
|
||||||
|
|
||||||
m.ClusterConfig(node1, ccm)
|
m.ClusterConfig(device1, ccm)
|
||||||
if cfg.Nodes[0].Name != "" {
|
if cfg.Devices[0].Name != "" {
|
||||||
t.Errorf("Node already has a name")
|
t.Errorf("Device already has a name")
|
||||||
}
|
}
|
||||||
|
|
||||||
ccm.Options = []protocol.Option{
|
ccm.Options = []protocol.Option{
|
||||||
@ -271,96 +271,96 @@ func TestNodeRename(t *testing.T) {
|
|||||||
Value: "tester",
|
Value: "tester",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
m.ClusterConfig(node1, ccm)
|
m.ClusterConfig(device1, ccm)
|
||||||
if cfg.Nodes[0].Name != "tester" {
|
if cfg.Devices[0].Name != "tester" {
|
||||||
t.Errorf("Node did not get a name")
|
t.Errorf("Device did not get a name")
|
||||||
}
|
}
|
||||||
|
|
||||||
ccm.Options[0].Value = "tester2"
|
ccm.Options[0].Value = "tester2"
|
||||||
m.ClusterConfig(node1, ccm)
|
m.ClusterConfig(device1, ccm)
|
||||||
if cfg.Nodes[0].Name != "tester" {
|
if cfg.Devices[0].Name != "tester" {
|
||||||
t.Errorf("Node name got overwritten")
|
t.Errorf("Device name got overwritten")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClusterConfig(t *testing.T) {
|
func TestClusterConfig(t *testing.T) {
|
||||||
cfg := config.New("/tmp/test", node1)
|
cfg := config.New("/tmp/test", device1)
|
||||||
cfg.Nodes = []config.NodeConfiguration{
|
cfg.Devices = []config.DeviceConfiguration{
|
||||||
{
|
{
|
||||||
NodeID: node1,
|
DeviceID: device1,
|
||||||
Introducer: true,
|
Introducer: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
NodeID: node2,
|
DeviceID: device2,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
cfg.Repositories = []config.RepositoryConfiguration{
|
cfg.Folders = []config.FolderConfiguration{
|
||||||
{
|
{
|
||||||
ID: "repo1",
|
ID: "folder1",
|
||||||
Nodes: []config.RepositoryNodeConfiguration{
|
Devices: []config.FolderDeviceConfiguration{
|
||||||
{NodeID: node1},
|
{DeviceID: device1},
|
||||||
{NodeID: node2},
|
{DeviceID: device2},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "repo2",
|
ID: "folder2",
|
||||||
Nodes: []config.RepositoryNodeConfiguration{
|
Devices: []config.FolderDeviceConfiguration{
|
||||||
{NodeID: node1},
|
{DeviceID: device1},
|
||||||
{NodeID: node2},
|
{DeviceID: device2},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
|
|
||||||
m := NewModel("/tmp", &cfg, "node", "syncthing", "dev", db)
|
m := NewModel("/tmp", &cfg, "device", "syncthing", "dev", db)
|
||||||
m.AddRepo(cfg.Repositories[0])
|
m.AddFolder(cfg.Folders[0])
|
||||||
m.AddRepo(cfg.Repositories[1])
|
m.AddFolder(cfg.Folders[1])
|
||||||
|
|
||||||
cm := m.clusterConfig(node2)
|
cm := m.clusterConfig(device2)
|
||||||
|
|
||||||
if l := len(cm.Repositories); l != 2 {
|
if l := len(cm.Folders); l != 2 {
|
||||||
t.Fatalf("Incorrect number of repos %d != 2", l)
|
t.Fatalf("Incorrect number of folders %d != 2", l)
|
||||||
}
|
}
|
||||||
|
|
||||||
r := cm.Repositories[0]
|
r := cm.Folders[0]
|
||||||
if r.ID != "repo1" {
|
if r.ID != "folder1" {
|
||||||
t.Errorf("Incorrect repo %q != repo1", r.ID)
|
t.Errorf("Incorrect folder %q != folder1", r.ID)
|
||||||
}
|
}
|
||||||
if l := len(r.Nodes); l != 2 {
|
if l := len(r.Devices); l != 2 {
|
||||||
t.Errorf("Incorrect number of nodes %d != 2", l)
|
t.Errorf("Incorrect number of devices %d != 2", l)
|
||||||
}
|
}
|
||||||
if id := r.Nodes[0].ID; bytes.Compare(id, node1[:]) != 0 {
|
if id := r.Devices[0].ID; bytes.Compare(id, device1[:]) != 0 {
|
||||||
t.Errorf("Incorrect node ID %x != %x", id, node1)
|
t.Errorf("Incorrect device ID %x != %x", id, device1)
|
||||||
}
|
}
|
||||||
if r.Nodes[0].Flags&protocol.FlagIntroducer == 0 {
|
if r.Devices[0].Flags&protocol.FlagIntroducer == 0 {
|
||||||
t.Error("Node1 should be flagged as Introducer")
|
t.Error("Device1 should be flagged as Introducer")
|
||||||
}
|
}
|
||||||
if id := r.Nodes[1].ID; bytes.Compare(id, node2[:]) != 0 {
|
if id := r.Devices[1].ID; bytes.Compare(id, device2[:]) != 0 {
|
||||||
t.Errorf("Incorrect node ID %x != %x", id, node2)
|
t.Errorf("Incorrect device ID %x != %x", id, device2)
|
||||||
}
|
}
|
||||||
if r.Nodes[1].Flags&protocol.FlagIntroducer != 0 {
|
if r.Devices[1].Flags&protocol.FlagIntroducer != 0 {
|
||||||
t.Error("Node2 should not be flagged as Introducer")
|
t.Error("Device2 should not be flagged as Introducer")
|
||||||
}
|
}
|
||||||
|
|
||||||
r = cm.Repositories[1]
|
r = cm.Folders[1]
|
||||||
if r.ID != "repo2" {
|
if r.ID != "folder2" {
|
||||||
t.Errorf("Incorrect repo %q != repo2", r.ID)
|
t.Errorf("Incorrect folder %q != folder2", r.ID)
|
||||||
}
|
}
|
||||||
if l := len(r.Nodes); l != 2 {
|
if l := len(r.Devices); l != 2 {
|
||||||
t.Errorf("Incorrect number of nodes %d != 2", l)
|
t.Errorf("Incorrect number of devices %d != 2", l)
|
||||||
}
|
}
|
||||||
if id := r.Nodes[0].ID; bytes.Compare(id, node1[:]) != 0 {
|
if id := r.Devices[0].ID; bytes.Compare(id, device1[:]) != 0 {
|
||||||
t.Errorf("Incorrect node ID %x != %x", id, node1)
|
t.Errorf("Incorrect device ID %x != %x", id, device1)
|
||||||
}
|
}
|
||||||
if r.Nodes[0].Flags&protocol.FlagIntroducer == 0 {
|
if r.Devices[0].Flags&protocol.FlagIntroducer == 0 {
|
||||||
t.Error("Node1 should be flagged as Introducer")
|
t.Error("Device1 should be flagged as Introducer")
|
||||||
}
|
}
|
||||||
if id := r.Nodes[1].ID; bytes.Compare(id, node2[:]) != 0 {
|
if id := r.Devices[1].ID; bytes.Compare(id, device2[:]) != 0 {
|
||||||
t.Errorf("Incorrect node ID %x != %x", id, node2)
|
t.Errorf("Incorrect device ID %x != %x", id, device2)
|
||||||
}
|
}
|
||||||
if r.Nodes[1].Flags&protocol.FlagIntroducer != 0 {
|
if r.Devices[1].Flags&protocol.FlagIntroducer != 0 {
|
||||||
t.Error("Node2 should not be flagged as Introducer")
|
t.Error("Device2 should not be flagged as Introducer")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -379,8 +379,8 @@ func TestIgnores(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
|
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
|
||||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
|
||||||
|
|
||||||
expected := []string{
|
expected := []string{
|
||||||
".*",
|
".*",
|
||||||
@ -440,7 +440,7 @@ func TestIgnores(t *testing.T) {
|
|||||||
t.Error("No error")
|
t.Error("No error")
|
||||||
}
|
}
|
||||||
|
|
||||||
m.AddRepo(config.RepositoryConfiguration{ID: "fresh", Directory: "XXX"})
|
m.AddFolder(config.FolderConfiguration{ID: "fresh", Directory: "XXX"})
|
||||||
ignores, err = m.GetIgnores("fresh")
|
ignores, err = m.GetIgnores("fresh")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
|
@ -1,51 +0,0 @@
|
|||||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
|
||||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package model
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/internal/protocol"
|
|
||||||
)
|
|
||||||
|
|
||||||
// nodeActivity tracks the number of outstanding requests per node and can
|
|
||||||
// answer which node is least busy. It is safe for use from multiple
|
|
||||||
// goroutines.
|
|
||||||
type nodeActivity struct {
|
|
||||||
act map[protocol.NodeID]int
|
|
||||||
mut sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func newNodeActivity() *nodeActivity {
|
|
||||||
return &nodeActivity{
|
|
||||||
act: make(map[protocol.NodeID]int),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m nodeActivity) leastBusy(availability []protocol.NodeID) protocol.NodeID {
|
|
||||||
m.mut.Lock()
|
|
||||||
var low int = 2<<30 - 1
|
|
||||||
var selected protocol.NodeID
|
|
||||||
for _, node := range availability {
|
|
||||||
if usage := m.act[node]; usage < low {
|
|
||||||
low = usage
|
|
||||||
selected = node
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.mut.Unlock()
|
|
||||||
return selected
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m nodeActivity) using(node protocol.NodeID) {
|
|
||||||
m.mut.Lock()
|
|
||||||
defer m.mut.Unlock()
|
|
||||||
m.act[node]++
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m nodeActivity) done(node protocol.NodeID) {
|
|
||||||
m.mut.Lock()
|
|
||||||
defer m.mut.Unlock()
|
|
||||||
m.act[node]--
|
|
||||||
}
|
|
@ -1,56 +0,0 @@
|
|||||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
|
||||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package model
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/internal/protocol"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNodeActivity(t *testing.T) {
|
|
||||||
n0 := protocol.NodeID{1, 2, 3, 4}
|
|
||||||
n1 := protocol.NodeID{5, 6, 7, 8}
|
|
||||||
n2 := protocol.NodeID{9, 10, 11, 12}
|
|
||||||
nodes := []protocol.NodeID{n0, n1, n2}
|
|
||||||
na := newNodeActivity()
|
|
||||||
|
|
||||||
if lb := na.leastBusy(nodes); lb != n0 {
|
|
||||||
t.Errorf("Least busy node should be n0 (%v) not %v", n0, lb)
|
|
||||||
}
|
|
||||||
if lb := na.leastBusy(nodes); lb != n0 {
|
|
||||||
t.Errorf("Least busy node should still be n0 (%v) not %v", n0, lb)
|
|
||||||
}
|
|
||||||
|
|
||||||
na.using(na.leastBusy(nodes))
|
|
||||||
if lb := na.leastBusy(nodes); lb != n1 {
|
|
||||||
t.Errorf("Least busy node should be n1 (%v) not %v", n1, lb)
|
|
||||||
}
|
|
||||||
|
|
||||||
na.using(na.leastBusy(nodes))
|
|
||||||
if lb := na.leastBusy(nodes); lb != n2 {
|
|
||||||
t.Errorf("Least busy node should be n2 (%v) not %v", n2, lb)
|
|
||||||
}
|
|
||||||
|
|
||||||
na.using(na.leastBusy(nodes))
|
|
||||||
if lb := na.leastBusy(nodes); lb != n0 {
|
|
||||||
t.Errorf("Least busy node should be n0 (%v) not %v", n0, lb)
|
|
||||||
}
|
|
||||||
|
|
||||||
na.done(n1)
|
|
||||||
if lb := na.leastBusy(nodes); lb != n1 {
|
|
||||||
t.Errorf("Least busy node should be n1 (%v) not %v", n1, lb)
|
|
||||||
}
|
|
||||||
|
|
||||||
na.done(n2)
|
|
||||||
if lb := na.leastBusy(nodes); lb != n1 {
|
|
||||||
t.Errorf("Least busy node should still be n1 (%v) not %v", n1, lb)
|
|
||||||
}
|
|
||||||
|
|
||||||
na.done(n0)
|
|
||||||
if lb := na.leastBusy(nodes); lb != n0 {
|
|
||||||
t.Errorf("Least busy node should be n0 (%v) not %v", n0, lb)
|
|
||||||
}
|
|
||||||
}
|
|
@ -23,9 +23,9 @@ import (
|
|||||||
// TODO: Stop on errors
|
// TODO: Stop on errors
|
||||||
|
|
||||||
const (
|
const (
|
||||||
copiersPerRepo = 1
|
copiersPerFolder = 1
|
||||||
pullersPerRepo = 16
|
pullersPerFolder = 16
|
||||||
finishersPerRepo = 2
|
finishersPerFolder = 2
|
||||||
pauseIntv = 60 * time.Second
|
pauseIntv = 60 * time.Second
|
||||||
nextPullIntv = 10 * time.Second
|
nextPullIntv = 10 * time.Second
|
||||||
checkPullIntv = 1 * time.Second
|
checkPullIntv = 1 * time.Second
|
||||||
@ -46,12 +46,12 @@ type copyBlocksState struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
activity = newNodeActivity()
|
activity = newDeviceActivity()
|
||||||
errNoNode = errors.New("no available source node")
|
errNoDevice = errors.New("no available source device")
|
||||||
)
|
)
|
||||||
|
|
||||||
type Puller struct {
|
type Puller struct {
|
||||||
repo string
|
folder string
|
||||||
dir string
|
dir string
|
||||||
scanIntv time.Duration
|
scanIntv time.Duration
|
||||||
model *Model
|
model *Model
|
||||||
@ -75,8 +75,8 @@ func (p *Puller) Serve() {
|
|||||||
defer func() {
|
defer func() {
|
||||||
pullTimer.Stop()
|
pullTimer.Stop()
|
||||||
scanTimer.Stop()
|
scanTimer.Stop()
|
||||||
// TODO: Should there be an actual RepoStopped state?
|
// TODO: Should there be an actual FolderStopped state?
|
||||||
p.model.setState(p.repo, RepoIdle)
|
p.model.setState(p.folder, FolderIdle)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var prevVer uint64
|
var prevVer uint64
|
||||||
@ -94,10 +94,10 @@ loop:
|
|||||||
// Index(), so that we immediately start a pull when new index
|
// Index(), so that we immediately start a pull when new index
|
||||||
// information is available. Before that though, I'd like to build a
|
// information is available. Before that though, I'd like to build a
|
||||||
// repeatable benchmark of how long it takes to sync a change from
|
// repeatable benchmark of how long it takes to sync a change from
|
||||||
// node A to node B, so we have something to work against.
|
// device A to device B, so we have something to work against.
|
||||||
case <-pullTimer.C:
|
case <-pullTimer.C:
|
||||||
// RemoteLocalVersion() is a fast call, doesn't touch the database.
|
// RemoteLocalVersion() is a fast call, doesn't touch the database.
|
||||||
curVer := p.model.RemoteLocalVersion(p.repo)
|
curVer := p.model.RemoteLocalVersion(p.folder)
|
||||||
if curVer == prevVer {
|
if curVer == prevVer {
|
||||||
pullTimer.Reset(checkPullIntv)
|
pullTimer.Reset(checkPullIntv)
|
||||||
continue
|
continue
|
||||||
@ -106,11 +106,11 @@ loop:
|
|||||||
if debug {
|
if debug {
|
||||||
l.Debugln(p, "pulling", prevVer, curVer)
|
l.Debugln(p, "pulling", prevVer, curVer)
|
||||||
}
|
}
|
||||||
p.model.setState(p.repo, RepoSyncing)
|
p.model.setState(p.folder, FolderSyncing)
|
||||||
tries := 0
|
tries := 0
|
||||||
for {
|
for {
|
||||||
tries++
|
tries++
|
||||||
changed := p.pullerIteration(copiersPerRepo, pullersPerRepo, finishersPerRepo)
|
changed := p.pullerIteration(copiersPerFolder, pullersPerFolder, finishersPerFolder)
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugln(p, "changed", changed)
|
l.Debugln(p, "changed", changed)
|
||||||
}
|
}
|
||||||
@ -120,8 +120,8 @@ loop:
|
|||||||
// sync. Remember the local version number and
|
// sync. Remember the local version number and
|
||||||
// schedule a resync a little bit into the future.
|
// schedule a resync a little bit into the future.
|
||||||
|
|
||||||
if lv := p.model.RemoteLocalVersion(p.repo); lv < curVer {
|
if lv := p.model.RemoteLocalVersion(p.folder); lv < curVer {
|
||||||
// There's a corner case where the node we needed
|
// There's a corner case where the device we needed
|
||||||
// files from disconnected during the puller
|
// files from disconnected during the puller
|
||||||
// iteration. The files will have been removed from
|
// iteration. The files will have been removed from
|
||||||
// the index, so we've concluded that we don't need
|
// the index, so we've concluded that we don't need
|
||||||
@ -142,12 +142,12 @@ loop:
|
|||||||
// we're not making it. Probably there are write
|
// we're not making it. Probably there are write
|
||||||
// errors preventing us. Flag this with a warning and
|
// errors preventing us. Flag this with a warning and
|
||||||
// wait a bit longer before retrying.
|
// wait a bit longer before retrying.
|
||||||
l.Warnf("Repo %q isn't making progress - check logs for possible root cause. Pausing puller for %v.", p.repo, pauseIntv)
|
l.Warnf("Folder %q isn't making progress - check logs for possible root cause. Pausing puller for %v.", p.folder, pauseIntv)
|
||||||
pullTimer.Reset(pauseIntv)
|
pullTimer.Reset(pauseIntv)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
p.model.setState(p.repo, RepoIdle)
|
p.model.setState(p.folder, FolderIdle)
|
||||||
|
|
||||||
// The reason for running the scanner from within the puller is that
|
// The reason for running the scanner from within the puller is that
|
||||||
// this is the easiest way to make sure we are not doing both at the
|
// this is the easiest way to make sure we are not doing both at the
|
||||||
@ -156,12 +156,12 @@ loop:
|
|||||||
if debug {
|
if debug {
|
||||||
l.Debugln(p, "rescan")
|
l.Debugln(p, "rescan")
|
||||||
}
|
}
|
||||||
p.model.setState(p.repo, RepoScanning)
|
p.model.setState(p.folder, FolderScanning)
|
||||||
if err := p.model.ScanRepo(p.repo); err != nil {
|
if err := p.model.ScanFolder(p.folder); err != nil {
|
||||||
invalidateRepo(p.model.cfg, p.repo, err)
|
invalidateFolder(p.model.cfg, p.folder, err)
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
p.model.setState(p.repo, RepoIdle)
|
p.model.setState(p.folder, FolderIdle)
|
||||||
scanTimer.Reset(p.scanIntv)
|
scanTimer.Reset(p.scanIntv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -172,13 +172,13 @@ func (p *Puller) Stop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Puller) String() string {
|
func (p *Puller) String() string {
|
||||||
return fmt.Sprintf("puller/%s@%p", p.repo, p)
|
return fmt.Sprintf("puller/%s@%p", p.folder, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// pullerIteration runs a single puller iteration for the given repo and
|
// pullerIteration runs a single puller iteration for the given folder and
|
||||||
// returns the number items that should have been synced (even those that
|
// returns the number items that should have been synced (even those that
|
||||||
// might have failed). One puller iteration handles all files currently
|
// might have failed). One puller iteration handles all files currently
|
||||||
// flagged as needed in the repo. The specified number of copier, puller and
|
// flagged as needed in the folder. The specified number of copier, puller and
|
||||||
// finisher routines are used. It's seldom efficient to use more than one
|
// finisher routines are used. It's seldom efficient to use more than one
|
||||||
// copier routine, while multiple pullers are essential and multiple finishers
|
// copier routine, while multiple pullers are essential and multiple finishers
|
||||||
// may be useful (they are primarily CPU bound due to hashing).
|
// may be useful (they are primarily CPU bound due to hashing).
|
||||||
@ -218,7 +218,7 @@ func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
p.model.rmut.RLock()
|
p.model.rmut.RLock()
|
||||||
files := p.model.repoFiles[p.repo]
|
files := p.model.folderFiles[p.folder]
|
||||||
p.model.rmut.RUnlock()
|
p.model.rmut.RUnlock()
|
||||||
|
|
||||||
// !!!
|
// !!!
|
||||||
@ -228,7 +228,7 @@ func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
|
|||||||
// !!!
|
// !!!
|
||||||
|
|
||||||
changed := 0
|
changed := 0
|
||||||
files.WithNeed(protocol.LocalNodeID, func(intf protocol.FileIntf) bool {
|
files.WithNeed(protocol.LocalDeviceID, func(intf protocol.FileIntf) bool {
|
||||||
|
|
||||||
// Needed items are delivered sorted lexicographically. This isn't
|
// Needed items are delivered sorted lexicographically. This isn't
|
||||||
// really optimal from a performance point of view - it would be
|
// really optimal from a performance point of view - it would be
|
||||||
@ -240,7 +240,7 @@ func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
|
|||||||
file := intf.(protocol.FileInfo)
|
file := intf.(protocol.FileInfo)
|
||||||
|
|
||||||
events.Default.Log(events.ItemStarted, map[string]string{
|
events.Default.Log(events.ItemStarted, map[string]string{
|
||||||
"repo": p.repo,
|
"folder": p.folder,
|
||||||
"item": file.Name,
|
"item": file.Name,
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -290,7 +290,7 @@ func (p *Puller) handleDir(file protocol.FileInfo) {
|
|||||||
mode := os.FileMode(file.Flags & 0777)
|
mode := os.FileMode(file.Flags & 0777)
|
||||||
|
|
||||||
if debug {
|
if debug {
|
||||||
curFile := p.model.CurrentRepoFile(p.repo, file.Name)
|
curFile := p.model.CurrentFolderFile(p.folder, file.Name)
|
||||||
l.Debugf("need dir\n\t%v\n\t%v", file, curFile)
|
l.Debugf("need dir\n\t%v\n\t%v", file, curFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -307,19 +307,19 @@ func (p *Puller) handleDir(file protocol.FileInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err = osutil.InWritableDir(mkdir, realName); err == nil {
|
if err = osutil.InWritableDir(mkdir, realName); err == nil {
|
||||||
p.model.updateLocal(p.repo, file)
|
p.model.updateLocal(p.folder, file)
|
||||||
} else {
|
} else {
|
||||||
l.Infof("Puller (repo %q, file %q): %v", p.repo, file.Name, err)
|
l.Infof("Puller (folder %q, file %q): %v", p.folder, file.Name, err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Weird error when stat()'ing the dir. Probably won't work to do
|
// Weird error when stat()'ing the dir. Probably won't work to do
|
||||||
// anything else with it if we can't even stat() it.
|
// anything else with it if we can't even stat() it.
|
||||||
l.Infof("Puller (repo %q, file %q): %v", p.repo, file.Name, err)
|
l.Infof("Puller (folder %q, file %q): %v", p.folder, file.Name, err)
|
||||||
return
|
return
|
||||||
} else if !info.IsDir() {
|
} else if !info.IsDir() {
|
||||||
l.Infof("Puller (repo %q, file %q): should be dir, but is not", p.repo, file.Name)
|
l.Infof("Puller (folder %q, file %q): should be dir, but is not", p.folder, file.Name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -328,9 +328,9 @@ func (p *Puller) handleDir(file protocol.FileInfo) {
|
|||||||
// It's OK to change mode bits on stuff within non-writable directories.
|
// It's OK to change mode bits on stuff within non-writable directories.
|
||||||
|
|
||||||
if err := os.Chmod(realName, mode); err == nil {
|
if err := os.Chmod(realName, mode); err == nil {
|
||||||
p.model.updateLocal(p.repo, file)
|
p.model.updateLocal(p.folder, file)
|
||||||
} else {
|
} else {
|
||||||
l.Infof("Puller (repo %q, file %q): %v", p.repo, file.Name, err)
|
l.Infof("Puller (folder %q, file %q): %v", p.folder, file.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -339,7 +339,7 @@ func (p *Puller) deleteDir(file protocol.FileInfo) {
|
|||||||
realName := filepath.Join(p.dir, file.Name)
|
realName := filepath.Join(p.dir, file.Name)
|
||||||
err := osutil.InWritableDir(os.Remove, realName)
|
err := osutil.InWritableDir(os.Remove, realName)
|
||||||
if err == nil || os.IsNotExist(err) {
|
if err == nil || os.IsNotExist(err) {
|
||||||
p.model.updateLocal(p.repo, file)
|
p.model.updateLocal(p.folder, file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -355,16 +355,16 @@ func (p *Puller) deleteFile(file protocol.FileInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Infof("Puller (repo %q, file %q): delete: %v", p.repo, file.Name, err)
|
l.Infof("Puller (folder %q, file %q): delete: %v", p.folder, file.Name, err)
|
||||||
} else {
|
} else {
|
||||||
p.model.updateLocal(p.repo, file)
|
p.model.updateLocal(p.folder, file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleFile queues the copies and pulls as necessary for a single new or
|
// handleFile queues the copies and pulls as necessary for a single new or
|
||||||
// changed file.
|
// changed file.
|
||||||
func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, pullChan chan<- pullBlockState) {
|
func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, pullChan chan<- pullBlockState) {
|
||||||
curFile := p.model.CurrentRepoFile(p.repo, file.Name)
|
curFile := p.model.CurrentFolderFile(p.folder, file.Name)
|
||||||
copyBlocks, pullBlocks := scanner.BlockDiff(curFile.Blocks, file.Blocks)
|
copyBlocks, pullBlocks := scanner.BlockDiff(curFile.Blocks, file.Blocks)
|
||||||
|
|
||||||
if len(copyBlocks) == len(curFile.Blocks) && len(pullBlocks) == 0 {
|
if len(copyBlocks) == len(curFile.Blocks) && len(pullBlocks) == 0 {
|
||||||
@ -384,7 +384,7 @@ func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksSt
|
|||||||
|
|
||||||
s := sharedPullerState{
|
s := sharedPullerState{
|
||||||
file: file,
|
file: file,
|
||||||
repo: p.repo,
|
folder: p.folder,
|
||||||
tempName: tempName,
|
tempName: tempName,
|
||||||
realName: realName,
|
realName: realName,
|
||||||
pullNeeded: len(pullBlocks),
|
pullNeeded: len(pullBlocks),
|
||||||
@ -422,18 +422,18 @@ func (p *Puller) shortcutFile(file protocol.FileInfo) {
|
|||||||
realName := filepath.Join(p.dir, file.Name)
|
realName := filepath.Join(p.dir, file.Name)
|
||||||
err := os.Chmod(realName, os.FileMode(file.Flags&0777))
|
err := os.Chmod(realName, os.FileMode(file.Flags&0777))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Infof("Puller (repo %q, file %q): shortcut: %v", p.repo, file.Name, err)
|
l.Infof("Puller (folder %q, file %q): shortcut: %v", p.folder, file.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
t := time.Unix(file.Modified, 0)
|
t := time.Unix(file.Modified, 0)
|
||||||
err = os.Chtimes(realName, t, t)
|
err = os.Chtimes(realName, t, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Infof("Puller (repo %q, file %q): shortcut: %v", p.repo, file.Name, err)
|
l.Infof("Puller (folder %q, file %q): shortcut: %v", p.folder, file.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p.model.updateLocal(p.repo, file)
|
p.model.updateLocal(p.folder, file)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copierRoutine reads pullerStates until the in channel closes and performs
|
// copierRoutine reads pullerStates until the in channel closes and performs
|
||||||
@ -487,13 +487,13 @@ nextBlock:
|
|||||||
continue nextBlock
|
continue nextBlock
|
||||||
}
|
}
|
||||||
|
|
||||||
// Select the least busy node to pull the block frop.model. If we found no
|
// Select the least busy device to pull the block frop.model. If we found no
|
||||||
// feasible node at all, fail the block (and in the long run, the
|
// feasible device at all, fail the block (and in the long run, the
|
||||||
// file).
|
// file).
|
||||||
potentialNodes := p.model.availability(p.repo, state.file.Name)
|
potentialDevices := p.model.availability(p.folder, state.file.Name)
|
||||||
selected := activity.leastBusy(potentialNodes)
|
selected := activity.leastBusy(potentialDevices)
|
||||||
if selected == (protocol.NodeID{}) {
|
if selected == (protocol.DeviceID{}) {
|
||||||
state.earlyClose("pull", errNoNode)
|
state.earlyClose("pull", errNoDevice)
|
||||||
continue nextBlock
|
continue nextBlock
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -505,10 +505,10 @@ nextBlock:
|
|||||||
continue nextBlock
|
continue nextBlock
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch the block, while marking the selected node as in use so that
|
// Fetch the block, while marking the selected device as in use so that
|
||||||
// leastBusy can select another node when someone else asks.
|
// leastBusy can select another device when someone else asks.
|
||||||
activity.using(selected)
|
activity.using(selected)
|
||||||
buf, err := p.model.requestGlobal(selected, p.repo, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash)
|
buf, err := p.model.requestGlobal(selected, p.folder, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash)
|
||||||
activity.done(selected)
|
activity.done(selected)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
state.earlyClose("pull", err)
|
state.earlyClose("pull", err)
|
||||||
@ -589,7 +589,7 @@ func (p *Puller) finisherRoutine(in <-chan *sharedPullerState) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Record the updated file in the index
|
// Record the updated file in the index
|
||||||
p.model.updateLocal(p.repo, state.file)
|
p.model.updateLocal(p.folder, state.file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -609,11 +609,11 @@ func (p *Puller) clean() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func invalidateRepo(cfg *config.Configuration, repoID string, err error) {
|
func invalidateFolder(cfg *config.Configuration, folderID string, err error) {
|
||||||
for i := range cfg.Repositories {
|
for i := range cfg.Folders {
|
||||||
repo := &cfg.Repositories[i]
|
folder := &cfg.Folders[i]
|
||||||
if repo.ID == repoID {
|
if folder.ID == folderID {
|
||||||
repo.Invalid = err.Error()
|
folder.Invalid = err.Error()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@ import (
|
|||||||
type sharedPullerState struct {
|
type sharedPullerState struct {
|
||||||
// Immutable, does not require locking
|
// Immutable, does not require locking
|
||||||
file protocol.FileInfo
|
file protocol.FileInfo
|
||||||
repo string
|
folder string
|
||||||
tempName string
|
tempName string
|
||||||
realName string
|
realName string
|
||||||
|
|
||||||
@ -113,7 +113,7 @@ func (s *sharedPullerState) earlyCloseLocked(context string, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
l.Infof("Puller (repo %q, file %q): %s: %v", s.repo, s.file.Name, context, err)
|
l.Infof("Puller (folder %q, file %q): %s: %v", s.folder, s.file.Name, context, err)
|
||||||
s.err = err
|
s.err = err
|
||||||
if s.fd != nil {
|
if s.fd != nil {
|
||||||
s.fd.Close()
|
s.fd.Close()
|
||||||
@ -133,7 +133,7 @@ func (s *sharedPullerState) copyDone() {
|
|||||||
s.mut.Lock()
|
s.mut.Lock()
|
||||||
s.copyNeeded--
|
s.copyNeeded--
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugln("sharedPullerState", s.repo, s.file.Name, "copyNeeded ->", s.pullNeeded)
|
l.Debugln("sharedPullerState", s.folder, s.file.Name, "copyNeeded ->", s.pullNeeded)
|
||||||
}
|
}
|
||||||
s.mut.Unlock()
|
s.mut.Unlock()
|
||||||
}
|
}
|
||||||
@ -142,7 +142,7 @@ func (s *sharedPullerState) pullDone() {
|
|||||||
s.mut.Lock()
|
s.mut.Lock()
|
||||||
s.pullNeeded--
|
s.pullNeeded--
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugln("sharedPullerState", s.repo, s.file.Name, "pullNeeded ->", s.pullNeeded)
|
l.Debugln("sharedPullerState", s.folder, s.file.Name, "pullNeeded ->", s.pullNeeded)
|
||||||
}
|
}
|
||||||
s.mut.Unlock()
|
s.mut.Unlock()
|
||||||
}
|
}
|
||||||
|
0
internal/model/testdata/.stignore
vendored
Normal file → Executable file
0
internal/model/testdata/.stignore
vendored
Normal file → Executable file
@ -11,7 +11,7 @@ import (
|
|||||||
|
|
||||||
type TestModel struct {
|
type TestModel struct {
|
||||||
data []byte
|
data []byte
|
||||||
repo string
|
folder string
|
||||||
name string
|
name string
|
||||||
offset int64
|
offset int64
|
||||||
size int
|
size int
|
||||||
@ -24,25 +24,25 @@ func newTestModel() *TestModel {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestModel) Index(nodeID NodeID, repo string, files []FileInfo) {
|
func (t *TestModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
|
func (t *TestModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestModel) Request(nodeID NodeID, repo, name string, offset int64, size int) ([]byte, error) {
|
func (t *TestModel) Request(deviceID DeviceID, folder, name string, offset int64, size int) ([]byte, error) {
|
||||||
t.repo = repo
|
t.folder = folder
|
||||||
t.name = name
|
t.name = name
|
||||||
t.offset = offset
|
t.offset = offset
|
||||||
t.size = size
|
t.size = size
|
||||||
return t.data, nil
|
return t.data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestModel) Close(nodeID NodeID, err error) {
|
func (t *TestModel) Close(deviceID DeviceID, err error) {
|
||||||
close(t.closedCh)
|
close(t.closedCh)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestModel) ClusterConfig(nodeID NodeID, config ClusterConfigMessage) {
|
func (t *TestModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestModel) isClosed() bool {
|
func (t *TestModel) isClosed() bool {
|
||||||
|
@ -16,36 +16,36 @@ import (
|
|||||||
"github.com/syncthing/syncthing/internal/luhn"
|
"github.com/syncthing/syncthing/internal/luhn"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NodeID [32]byte
|
type DeviceID [32]byte
|
||||||
|
|
||||||
var LocalNodeID = NodeID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
|
var LocalDeviceID = DeviceID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
|
||||||
|
|
||||||
// NewNodeID generates a new node ID from the raw bytes of a certificate
|
// NewDeviceID generates a new device ID from the raw bytes of a certificate
|
||||||
func NewNodeID(rawCert []byte) NodeID {
|
func NewDeviceID(rawCert []byte) DeviceID {
|
||||||
var n NodeID
|
var n DeviceID
|
||||||
hf := sha256.New()
|
hf := sha256.New()
|
||||||
hf.Write(rawCert)
|
hf.Write(rawCert)
|
||||||
hf.Sum(n[:0])
|
hf.Sum(n[:0])
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func NodeIDFromString(s string) (NodeID, error) {
|
func DeviceIDFromString(s string) (DeviceID, error) {
|
||||||
var n NodeID
|
var n DeviceID
|
||||||
err := n.UnmarshalText([]byte(s))
|
err := n.UnmarshalText([]byte(s))
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func NodeIDFromBytes(bs []byte) NodeID {
|
func DeviceIDFromBytes(bs []byte) DeviceID {
|
||||||
var n NodeID
|
var n DeviceID
|
||||||
if len(bs) != len(n) {
|
if len(bs) != len(n) {
|
||||||
panic("incorrect length of byte slice representing node ID")
|
panic("incorrect length of byte slice representing device ID")
|
||||||
}
|
}
|
||||||
copy(n[:], bs)
|
copy(n[:], bs)
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the canonical string representation of the node ID
|
// String returns the canonical string representation of the device ID
|
||||||
func (n NodeID) String() string {
|
func (n DeviceID) String() string {
|
||||||
id := base32.StdEncoding.EncodeToString(n[:])
|
id := base32.StdEncoding.EncodeToString(n[:])
|
||||||
id = strings.Trim(id, "=")
|
id = strings.Trim(id, "=")
|
||||||
id, err := luhnify(id)
|
id, err := luhnify(id)
|
||||||
@ -57,23 +57,23 @@ func (n NodeID) String() string {
|
|||||||
return id
|
return id
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n NodeID) GoString() string {
|
func (n DeviceID) GoString() string {
|
||||||
return n.String()
|
return n.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n NodeID) Compare(other NodeID) int {
|
func (n DeviceID) Compare(other DeviceID) int {
|
||||||
return bytes.Compare(n[:], other[:])
|
return bytes.Compare(n[:], other[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n NodeID) Equals(other NodeID) bool {
|
func (n DeviceID) Equals(other DeviceID) bool {
|
||||||
return bytes.Compare(n[:], other[:]) == 0
|
return bytes.Compare(n[:], other[:]) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NodeID) MarshalText() ([]byte, error) {
|
func (n *DeviceID) MarshalText() ([]byte, error) {
|
||||||
return []byte(n.String()), nil
|
return []byte(n.String()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NodeID) UnmarshalText(bs []byte) error {
|
func (n *DeviceID) UnmarshalText(bs []byte) error {
|
||||||
id := string(bs)
|
id := string(bs)
|
||||||
id = strings.Trim(id, "=")
|
id = strings.Trim(id, "=")
|
||||||
id = strings.ToUpper(id)
|
id = strings.ToUpper(id)
|
||||||
@ -98,7 +98,7 @@ func (n *NodeID) UnmarshalText(bs []byte) error {
|
|||||||
copy(n[:], dec)
|
copy(n[:], dec)
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
return errors.New("node ID invalid: incorrect length")
|
return errors.New("device ID invalid: incorrect length")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -20,14 +20,14 @@ var formatCases = []string{
|
|||||||
"p561017mzjnu2yiqgdreydm2mgtimgl3bxnpq6w5bmt88z4tjxzwicq2",
|
"p561017mzjnu2yiqgdreydm2mgtimgl3bxnpq6w5bmt88z4tjxzwicq2",
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFormatNodeID(t *testing.T) {
|
func TestFormatDeviceID(t *testing.T) {
|
||||||
for i, tc := range formatCases {
|
for i, tc := range formatCases {
|
||||||
var id NodeID
|
var id DeviceID
|
||||||
err := id.UnmarshalText([]byte(tc))
|
err := id.UnmarshalText([]byte(tc))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("#%d UnmarshalText(%q); %v", i, tc, err)
|
t.Errorf("#%d UnmarshalText(%q); %v", i, tc, err)
|
||||||
} else if f := id.String(); f != formatted {
|
} else if f := id.String(); f != formatted {
|
||||||
t.Errorf("#%d FormatNodeID(%q)\n\t%q !=\n\t%q", i, tc, f, formatted)
|
t.Errorf("#%d FormatDeviceID(%q)\n\t%q !=\n\t%q", i, tc, f, formatted)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -46,20 +46,20 @@ var validateCases = []struct {
|
|||||||
{"p56ioi7mzjnu2iqgdreydm2mgtmgl3bxnpq6w5btbbz4tjxzwicqCCCC", false},
|
{"p56ioi7mzjnu2iqgdreydm2mgtmgl3bxnpq6w5btbbz4tjxzwicqCCCC", false},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateNodeID(t *testing.T) {
|
func TestValidateDeviceID(t *testing.T) {
|
||||||
for _, tc := range validateCases {
|
for _, tc := range validateCases {
|
||||||
var id NodeID
|
var id DeviceID
|
||||||
err := id.UnmarshalText([]byte(tc.s))
|
err := id.UnmarshalText([]byte(tc.s))
|
||||||
if (err == nil && !tc.ok) || (err != nil && tc.ok) {
|
if (err == nil && !tc.ok) || (err != nil && tc.ok) {
|
||||||
t.Errorf("ValidateNodeID(%q); %v != %v", tc.s, err, tc.ok)
|
t.Errorf("ValidateDeviceID(%q); %v != %v", tc.s, err, tc.ok)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMarshallingNodeID(t *testing.T) {
|
func TestMarshallingDeviceID(t *testing.T) {
|
||||||
n0 := NodeID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
n0 := DeviceID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||||
n1 := NodeID{}
|
n1 := DeviceID{}
|
||||||
n2 := NodeID{}
|
n2 := DeviceID{}
|
||||||
|
|
||||||
bs, _ := n0.MarshalText()
|
bs, _ := n0.MarshalText()
|
||||||
n1.UnmarshalText(bs)
|
n1.UnmarshalText(bs)
|
@ -7,7 +7,7 @@ package protocol
|
|||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
type IndexMessage struct {
|
type IndexMessage struct {
|
||||||
Repository string // max:64
|
Folder string // max:64
|
||||||
Files []FileInfo
|
Files []FileInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,7 +90,7 @@ func (b BlockInfo) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type RequestMessage struct {
|
type RequestMessage struct {
|
||||||
Repository string // max:64
|
Folder string // max:64
|
||||||
Name string // max:8192
|
Name string // max:8192
|
||||||
Offset uint64
|
Offset uint64
|
||||||
Size uint32
|
Size uint32
|
||||||
@ -103,7 +103,7 @@ type ResponseMessage struct {
|
|||||||
type ClusterConfigMessage struct {
|
type ClusterConfigMessage struct {
|
||||||
ClientName string // max:64
|
ClientName string // max:64
|
||||||
ClientVersion string // max:64
|
ClientVersion string // max:64
|
||||||
Repositories []Repository // max:64
|
Folders []Folder // max:64
|
||||||
Options []Option // max:64
|
Options []Option // max:64
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -116,12 +116,12 @@ func (o *ClusterConfigMessage) GetOption(key string) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
type Repository struct {
|
type Folder struct {
|
||||||
ID string // max:64
|
ID string // max:64
|
||||||
Nodes []Node // max:64
|
Devices []Device // max:64
|
||||||
}
|
}
|
||||||
|
|
||||||
type Node struct {
|
type Device struct {
|
||||||
ID []byte // max:32
|
ID []byte // max:32
|
||||||
Flags uint32
|
Flags uint32
|
||||||
MaxLocalVersion uint64
|
MaxLocalVersion uint64
|
||||||
|
@ -18,10 +18,10 @@ IndexMessage Structure:
|
|||||||
0 1 2 3
|
0 1 2 3
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Length of Repository |
|
| Length of Folder |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ Repository (variable length) \
|
\ Folder (variable length) \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Number of Files |
|
| Number of Files |
|
||||||
@ -33,7 +33,7 @@ IndexMessage Structure:
|
|||||||
|
|
||||||
|
|
||||||
struct IndexMessage {
|
struct IndexMessage {
|
||||||
string Repository<64>;
|
string Folder<64>;
|
||||||
FileInfo Files<>;
|
FileInfo Files<>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,10 +56,10 @@ func (o IndexMessage) AppendXDR(bs []byte) []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (o IndexMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
func (o IndexMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||||
if len(o.Repository) > 64 {
|
if len(o.Folder) > 64 {
|
||||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||||
}
|
}
|
||||||
xw.WriteString(o.Repository)
|
xw.WriteString(o.Folder)
|
||||||
xw.WriteUint32(uint32(len(o.Files)))
|
xw.WriteUint32(uint32(len(o.Files)))
|
||||||
for i := range o.Files {
|
for i := range o.Files {
|
||||||
_, err := o.Files[i].encodeXDR(xw)
|
_, err := o.Files[i].encodeXDR(xw)
|
||||||
@ -82,7 +82,7 @@ func (o *IndexMessage) UnmarshalXDR(bs []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (o *IndexMessage) decodeXDR(xr *xdr.Reader) error {
|
func (o *IndexMessage) decodeXDR(xr *xdr.Reader) error {
|
||||||
o.Repository = xr.ReadStringMax(64)
|
o.Folder = xr.ReadStringMax(64)
|
||||||
_FilesSize := int(xr.ReadUint32())
|
_FilesSize := int(xr.ReadUint32())
|
||||||
o.Files = make([]FileInfo, _FilesSize)
|
o.Files = make([]FileInfo, _FilesSize)
|
||||||
for i := range o.Files {
|
for i := range o.Files {
|
||||||
@ -362,10 +362,10 @@ RequestMessage Structure:
|
|||||||
0 1 2 3
|
0 1 2 3
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Length of Repository |
|
| Length of Folder |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ Repository (variable length) \
|
\ Folder (variable length) \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Length of Name |
|
| Length of Name |
|
||||||
@ -383,7 +383,7 @@ RequestMessage Structure:
|
|||||||
|
|
||||||
|
|
||||||
struct RequestMessage {
|
struct RequestMessage {
|
||||||
string Repository<64>;
|
string Folder<64>;
|
||||||
string Name<8192>;
|
string Name<8192>;
|
||||||
unsigned hyper Offset;
|
unsigned hyper Offset;
|
||||||
unsigned int Size;
|
unsigned int Size;
|
||||||
@ -408,10 +408,10 @@ func (o RequestMessage) AppendXDR(bs []byte) []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (o RequestMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
func (o RequestMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||||
if len(o.Repository) > 64 {
|
if len(o.Folder) > 64 {
|
||||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||||
}
|
}
|
||||||
xw.WriteString(o.Repository)
|
xw.WriteString(o.Folder)
|
||||||
if len(o.Name) > 8192 {
|
if len(o.Name) > 8192 {
|
||||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||||
}
|
}
|
||||||
@ -433,7 +433,7 @@ func (o *RequestMessage) UnmarshalXDR(bs []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (o *RequestMessage) decodeXDR(xr *xdr.Reader) error {
|
func (o *RequestMessage) decodeXDR(xr *xdr.Reader) error {
|
||||||
o.Repository = xr.ReadStringMax(64)
|
o.Folder = xr.ReadStringMax(64)
|
||||||
o.Name = xr.ReadStringMax(8192)
|
o.Name = xr.ReadStringMax(8192)
|
||||||
o.Offset = xr.ReadUint64()
|
o.Offset = xr.ReadUint64()
|
||||||
o.Size = xr.ReadUint32()
|
o.Size = xr.ReadUint32()
|
||||||
@ -517,10 +517,10 @@ ClusterConfigMessage Structure:
|
|||||||
\ Client Version (variable length) \
|
\ Client Version (variable length) \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Number of Repositories |
|
| Number of Folders |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ Zero or more Repository Structures \
|
\ Zero or more Folder Structures \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Number of Options |
|
| Number of Options |
|
||||||
@ -534,7 +534,7 @@ ClusterConfigMessage Structure:
|
|||||||
struct ClusterConfigMessage {
|
struct ClusterConfigMessage {
|
||||||
string ClientName<64>;
|
string ClientName<64>;
|
||||||
string ClientVersion<64>;
|
string ClientVersion<64>;
|
||||||
Repository Repositories<64>;
|
Folder Folders<64>;
|
||||||
Option Options<64>;
|
Option Options<64>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -565,12 +565,12 @@ func (o ClusterConfigMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
|||||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||||
}
|
}
|
||||||
xw.WriteString(o.ClientVersion)
|
xw.WriteString(o.ClientVersion)
|
||||||
if len(o.Repositories) > 64 {
|
if len(o.Folders) > 64 {
|
||||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||||
}
|
}
|
||||||
xw.WriteUint32(uint32(len(o.Repositories)))
|
xw.WriteUint32(uint32(len(o.Folders)))
|
||||||
for i := range o.Repositories {
|
for i := range o.Folders {
|
||||||
_, err := o.Repositories[i].encodeXDR(xw)
|
_, err := o.Folders[i].encodeXDR(xw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xw.Tot(), err
|
return xw.Tot(), err
|
||||||
}
|
}
|
||||||
@ -602,13 +602,13 @@ func (o *ClusterConfigMessage) UnmarshalXDR(bs []byte) error {
|
|||||||
func (o *ClusterConfigMessage) decodeXDR(xr *xdr.Reader) error {
|
func (o *ClusterConfigMessage) decodeXDR(xr *xdr.Reader) error {
|
||||||
o.ClientName = xr.ReadStringMax(64)
|
o.ClientName = xr.ReadStringMax(64)
|
||||||
o.ClientVersion = xr.ReadStringMax(64)
|
o.ClientVersion = xr.ReadStringMax(64)
|
||||||
_RepositoriesSize := int(xr.ReadUint32())
|
_FoldersSize := int(xr.ReadUint32())
|
||||||
if _RepositoriesSize > 64 {
|
if _FoldersSize > 64 {
|
||||||
return xdr.ErrElementSizeExceeded
|
return xdr.ErrElementSizeExceeded
|
||||||
}
|
}
|
||||||
o.Repositories = make([]Repository, _RepositoriesSize)
|
o.Folders = make([]Folder, _FoldersSize)
|
||||||
for i := range o.Repositories {
|
for i := range o.Folders {
|
||||||
(&o.Repositories[i]).decodeXDR(xr)
|
(&o.Folders[i]).decodeXDR(xr)
|
||||||
}
|
}
|
||||||
_OptionsSize := int(xr.ReadUint32())
|
_OptionsSize := int(xr.ReadUint32())
|
||||||
if _OptionsSize > 64 {
|
if _OptionsSize > 64 {
|
||||||
@ -623,7 +623,7 @@ func (o *ClusterConfigMessage) decodeXDR(xr *xdr.Reader) error {
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
||||||
Repository Structure:
|
Folder Structure:
|
||||||
|
|
||||||
0 1 2 3
|
0 1 2 3
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
@ -634,48 +634,48 @@ Repository Structure:
|
|||||||
\ ID (variable length) \
|
\ ID (variable length) \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Number of Nodes |
|
| Number of Devices |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ Zero or more Node Structures \
|
\ Zero or more Device Structures \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
|
||||||
|
|
||||||
struct Repository {
|
struct Folder {
|
||||||
string ID<64>;
|
string ID<64>;
|
||||||
Node Nodes<64>;
|
Device Devices<64>;
|
||||||
}
|
}
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
func (o Repository) EncodeXDR(w io.Writer) (int, error) {
|
func (o Folder) EncodeXDR(w io.Writer) (int, error) {
|
||||||
var xw = xdr.NewWriter(w)
|
var xw = xdr.NewWriter(w)
|
||||||
return o.encodeXDR(xw)
|
return o.encodeXDR(xw)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o Repository) MarshalXDR() []byte {
|
func (o Folder) MarshalXDR() []byte {
|
||||||
return o.AppendXDR(make([]byte, 0, 128))
|
return o.AppendXDR(make([]byte, 0, 128))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o Repository) AppendXDR(bs []byte) []byte {
|
func (o Folder) AppendXDR(bs []byte) []byte {
|
||||||
var aw = xdr.AppendWriter(bs)
|
var aw = xdr.AppendWriter(bs)
|
||||||
var xw = xdr.NewWriter(&aw)
|
var xw = xdr.NewWriter(&aw)
|
||||||
o.encodeXDR(xw)
|
o.encodeXDR(xw)
|
||||||
return []byte(aw)
|
return []byte(aw)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o Repository) encodeXDR(xw *xdr.Writer) (int, error) {
|
func (o Folder) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||||
if len(o.ID) > 64 {
|
if len(o.ID) > 64 {
|
||||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||||
}
|
}
|
||||||
xw.WriteString(o.ID)
|
xw.WriteString(o.ID)
|
||||||
if len(o.Nodes) > 64 {
|
if len(o.Devices) > 64 {
|
||||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||||
}
|
}
|
||||||
xw.WriteUint32(uint32(len(o.Nodes)))
|
xw.WriteUint32(uint32(len(o.Devices)))
|
||||||
for i := range o.Nodes {
|
for i := range o.Devices {
|
||||||
_, err := o.Nodes[i].encodeXDR(xw)
|
_, err := o.Devices[i].encodeXDR(xw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xw.Tot(), err
|
return xw.Tot(), err
|
||||||
}
|
}
|
||||||
@ -683,33 +683,33 @@ func (o Repository) encodeXDR(xw *xdr.Writer) (int, error) {
|
|||||||
return xw.Tot(), xw.Error()
|
return xw.Tot(), xw.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Repository) DecodeXDR(r io.Reader) error {
|
func (o *Folder) DecodeXDR(r io.Reader) error {
|
||||||
xr := xdr.NewReader(r)
|
xr := xdr.NewReader(r)
|
||||||
return o.decodeXDR(xr)
|
return o.decodeXDR(xr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Repository) UnmarshalXDR(bs []byte) error {
|
func (o *Folder) UnmarshalXDR(bs []byte) error {
|
||||||
var br = bytes.NewReader(bs)
|
var br = bytes.NewReader(bs)
|
||||||
var xr = xdr.NewReader(br)
|
var xr = xdr.NewReader(br)
|
||||||
return o.decodeXDR(xr)
|
return o.decodeXDR(xr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Repository) decodeXDR(xr *xdr.Reader) error {
|
func (o *Folder) decodeXDR(xr *xdr.Reader) error {
|
||||||
o.ID = xr.ReadStringMax(64)
|
o.ID = xr.ReadStringMax(64)
|
||||||
_NodesSize := int(xr.ReadUint32())
|
_DevicesSize := int(xr.ReadUint32())
|
||||||
if _NodesSize > 64 {
|
if _DevicesSize > 64 {
|
||||||
return xdr.ErrElementSizeExceeded
|
return xdr.ErrElementSizeExceeded
|
||||||
}
|
}
|
||||||
o.Nodes = make([]Node, _NodesSize)
|
o.Devices = make([]Device, _DevicesSize)
|
||||||
for i := range o.Nodes {
|
for i := range o.Devices {
|
||||||
(&o.Nodes[i]).decodeXDR(xr)
|
(&o.Devices[i]).decodeXDR(xr)
|
||||||
}
|
}
|
||||||
return xr.Error()
|
return xr.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
||||||
Node Structure:
|
Device Structure:
|
||||||
|
|
||||||
0 1 2 3
|
0 1 2 3
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
@ -728,7 +728,7 @@ Node Structure:
|
|||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
|
||||||
|
|
||||||
struct Node {
|
struct Device {
|
||||||
opaque ID<32>;
|
opaque ID<32>;
|
||||||
unsigned int Flags;
|
unsigned int Flags;
|
||||||
unsigned hyper MaxLocalVersion;
|
unsigned hyper MaxLocalVersion;
|
||||||
@ -736,23 +736,23 @@ struct Node {
|
|||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
func (o Node) EncodeXDR(w io.Writer) (int, error) {
|
func (o Device) EncodeXDR(w io.Writer) (int, error) {
|
||||||
var xw = xdr.NewWriter(w)
|
var xw = xdr.NewWriter(w)
|
||||||
return o.encodeXDR(xw)
|
return o.encodeXDR(xw)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o Node) MarshalXDR() []byte {
|
func (o Device) MarshalXDR() []byte {
|
||||||
return o.AppendXDR(make([]byte, 0, 128))
|
return o.AppendXDR(make([]byte, 0, 128))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o Node) AppendXDR(bs []byte) []byte {
|
func (o Device) AppendXDR(bs []byte) []byte {
|
||||||
var aw = xdr.AppendWriter(bs)
|
var aw = xdr.AppendWriter(bs)
|
||||||
var xw = xdr.NewWriter(&aw)
|
var xw = xdr.NewWriter(&aw)
|
||||||
o.encodeXDR(xw)
|
o.encodeXDR(xw)
|
||||||
return []byte(aw)
|
return []byte(aw)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o Node) encodeXDR(xw *xdr.Writer) (int, error) {
|
func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||||
if len(o.ID) > 32 {
|
if len(o.ID) > 32 {
|
||||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||||
}
|
}
|
||||||
@ -762,18 +762,18 @@ func (o Node) encodeXDR(xw *xdr.Writer) (int, error) {
|
|||||||
return xw.Tot(), xw.Error()
|
return xw.Tot(), xw.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Node) DecodeXDR(r io.Reader) error {
|
func (o *Device) DecodeXDR(r io.Reader) error {
|
||||||
xr := xdr.NewReader(r)
|
xr := xdr.NewReader(r)
|
||||||
return o.decodeXDR(xr)
|
return o.decodeXDR(xr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Node) UnmarshalXDR(bs []byte) error {
|
func (o *Device) UnmarshalXDR(bs []byte) error {
|
||||||
var br = bytes.NewReader(bs)
|
var br = bytes.NewReader(bs)
|
||||||
var xr = xdr.NewReader(br)
|
var xr = xdr.NewReader(br)
|
||||||
return o.decodeXDR(xr)
|
return o.decodeXDR(xr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Node) decodeXDR(xr *xdr.Reader) error {
|
func (o *Device) decodeXDR(xr *xdr.Reader) error {
|
||||||
o.ID = xr.ReadBytesMax(32)
|
o.ID = xr.ReadBytesMax(32)
|
||||||
o.Flags = xr.ReadUint32()
|
o.Flags = xr.ReadUint32()
|
||||||
o.MaxLocalVersion = xr.ReadUint64()
|
o.MaxLocalVersion = xr.ReadUint64()
|
||||||
|
@ -14,29 +14,29 @@ type nativeModel struct {
|
|||||||
next Model
|
next Model
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) Index(nodeID NodeID, repo string, files []FileInfo) {
|
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
|
||||||
for i := range files {
|
for i := range files {
|
||||||
files[i].Name = norm.NFD.String(files[i].Name)
|
files[i].Name = norm.NFD.String(files[i].Name)
|
||||||
}
|
}
|
||||||
m.next.Index(nodeID, repo, files)
|
m.next.Index(deviceID, folder, files)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
|
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
|
||||||
for i := range files {
|
for i := range files {
|
||||||
files[i].Name = norm.NFD.String(files[i].Name)
|
files[i].Name = norm.NFD.String(files[i].Name)
|
||||||
}
|
}
|
||||||
m.next.IndexUpdate(nodeID, repo, files)
|
m.next.IndexUpdate(deviceID, folder, files)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) Request(nodeID NodeID, repo string, name string, offset int64, size int) ([]byte, error) {
|
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
|
||||||
name = norm.NFD.String(name)
|
name = norm.NFD.String(name)
|
||||||
return m.next.Request(nodeID, repo, name, offset, size)
|
return m.next.Request(deviceID, folder, name, offset, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) ClusterConfig(nodeID NodeID, config ClusterConfigMessage) {
|
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
|
||||||
m.next.ClusterConfig(nodeID, config)
|
m.next.ClusterConfig(deviceID, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) Close(nodeID NodeID, err error) {
|
func (m nativeModel) Close(deviceID DeviceID, err error) {
|
||||||
m.next.Close(nodeID, err)
|
m.next.Close(deviceID, err)
|
||||||
}
|
}
|
||||||
|
@ -12,22 +12,22 @@ type nativeModel struct {
|
|||||||
next Model
|
next Model
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) Index(nodeID NodeID, repo string, files []FileInfo) {
|
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
|
||||||
m.next.Index(nodeID, repo, files)
|
m.next.Index(deviceID, folder, files)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
|
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
|
||||||
m.next.IndexUpdate(nodeID, repo, files)
|
m.next.IndexUpdate(deviceID, folder, files)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) Request(nodeID NodeID, repo string, name string, offset int64, size int) ([]byte, error) {
|
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
|
||||||
return m.next.Request(nodeID, repo, name, offset, size)
|
return m.next.Request(deviceID, folder, name, offset, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) ClusterConfig(nodeID NodeID, config ClusterConfigMessage) {
|
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
|
||||||
m.next.ClusterConfig(nodeID, config)
|
m.next.ClusterConfig(deviceID, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) Close(nodeID NodeID, err error) {
|
func (m nativeModel) Close(deviceID DeviceID, err error) {
|
||||||
m.next.Close(nodeID, err)
|
m.next.Close(deviceID, err)
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ type nativeModel struct {
|
|||||||
next Model
|
next Model
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) Index(nodeID NodeID, repo string, files []FileInfo) {
|
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
|
||||||
for i, f := range files {
|
for i, f := range files {
|
||||||
if strings.ContainsAny(f.Name, disallowedCharacters) {
|
if strings.ContainsAny(f.Name, disallowedCharacters) {
|
||||||
if f.IsDeleted() {
|
if f.IsDeleted() {
|
||||||
@ -39,10 +39,10 @@ func (m nativeModel) Index(nodeID NodeID, repo string, files []FileInfo) {
|
|||||||
}
|
}
|
||||||
files[i].Name = filepath.FromSlash(f.Name)
|
files[i].Name = filepath.FromSlash(f.Name)
|
||||||
}
|
}
|
||||||
m.next.Index(nodeID, repo, files)
|
m.next.Index(deviceID, folder, files)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
|
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
|
||||||
for i, f := range files {
|
for i, f := range files {
|
||||||
if strings.ContainsAny(f.Name, disallowedCharacters) {
|
if strings.ContainsAny(f.Name, disallowedCharacters) {
|
||||||
if f.IsDeleted() {
|
if f.IsDeleted() {
|
||||||
@ -55,18 +55,18 @@ func (m nativeModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
|
|||||||
}
|
}
|
||||||
files[i].Name = filepath.FromSlash(files[i].Name)
|
files[i].Name = filepath.FromSlash(files[i].Name)
|
||||||
}
|
}
|
||||||
m.next.IndexUpdate(nodeID, repo, files)
|
m.next.IndexUpdate(deviceID, folder, files)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) Request(nodeID NodeID, repo string, name string, offset int64, size int) ([]byte, error) {
|
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
|
||||||
name = filepath.FromSlash(name)
|
name = filepath.FromSlash(name)
|
||||||
return m.next.Request(nodeID, repo, name, offset, size)
|
return m.next.Request(deviceID, folder, name, offset, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) ClusterConfig(nodeID NodeID, config ClusterConfigMessage) {
|
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
|
||||||
m.next.ClusterConfig(nodeID, config)
|
m.next.ClusterConfig(deviceID, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m nativeModel) Close(nodeID NodeID, err error) {
|
func (m nativeModel) Close(deviceID DeviceID, err error) {
|
||||||
m.next.Close(nodeID, err)
|
m.next.Close(deviceID, err)
|
||||||
}
|
}
|
||||||
|
@ -57,30 +57,30 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Model interface {
|
type Model interface {
|
||||||
// An index was received from the peer node
|
// An index was received from the peer device
|
||||||
Index(nodeID NodeID, repo string, files []FileInfo)
|
Index(deviceID DeviceID, folder string, files []FileInfo)
|
||||||
// An index update was received from the peer node
|
// An index update was received from the peer device
|
||||||
IndexUpdate(nodeID NodeID, repo string, files []FileInfo)
|
IndexUpdate(deviceID DeviceID, folder string, files []FileInfo)
|
||||||
// A request was made by the peer node
|
// A request was made by the peer device
|
||||||
Request(nodeID NodeID, repo string, name string, offset int64, size int) ([]byte, error)
|
Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error)
|
||||||
// A cluster configuration message was received
|
// A cluster configuration message was received
|
||||||
ClusterConfig(nodeID NodeID, config ClusterConfigMessage)
|
ClusterConfig(deviceID DeviceID, config ClusterConfigMessage)
|
||||||
// The peer node closed the connection
|
// The peer device closed the connection
|
||||||
Close(nodeID NodeID, err error)
|
Close(deviceID DeviceID, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type Connection interface {
|
type Connection interface {
|
||||||
ID() NodeID
|
ID() DeviceID
|
||||||
Name() string
|
Name() string
|
||||||
Index(repo string, files []FileInfo) error
|
Index(folder string, files []FileInfo) error
|
||||||
IndexUpdate(repo string, files []FileInfo) error
|
IndexUpdate(folder string, files []FileInfo) error
|
||||||
Request(repo string, name string, offset int64, size int) ([]byte, error)
|
Request(folder string, name string, offset int64, size int) ([]byte, error)
|
||||||
ClusterConfig(config ClusterConfigMessage)
|
ClusterConfig(config ClusterConfigMessage)
|
||||||
Statistics() Statistics
|
Statistics() Statistics
|
||||||
}
|
}
|
||||||
|
|
||||||
type rawConnection struct {
|
type rawConnection struct {
|
||||||
id NodeID
|
id DeviceID
|
||||||
name string
|
name string
|
||||||
receiver Model
|
receiver Model
|
||||||
state int
|
state int
|
||||||
@ -123,7 +123,7 @@ const (
|
|||||||
pingIdleTime = 60 * time.Second
|
pingIdleTime = 60 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewConnection(nodeID NodeID, reader io.Reader, writer io.Writer, receiver Model, name string, compress bool) Connection {
|
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiver Model, name string, compress bool) Connection {
|
||||||
cr := &countingReader{Reader: reader}
|
cr := &countingReader{Reader: reader}
|
||||||
cw := &countingWriter{Writer: writer}
|
cw := &countingWriter{Writer: writer}
|
||||||
|
|
||||||
@ -132,7 +132,7 @@ func NewConnection(nodeID NodeID, reader io.Reader, writer io.Writer, receiver M
|
|||||||
compThres = 128 // compress messages that are 128 bytes long or larger
|
compThres = 128 // compress messages that are 128 bytes long or larger
|
||||||
}
|
}
|
||||||
c := rawConnection{
|
c := rawConnection{
|
||||||
id: nodeID,
|
id: deviceID,
|
||||||
name: name,
|
name: name,
|
||||||
receiver: nativeModel{receiver},
|
receiver: nativeModel{receiver},
|
||||||
state: stateInitial,
|
state: stateInitial,
|
||||||
@ -152,7 +152,7 @@ func NewConnection(nodeID NodeID, reader io.Reader, writer io.Writer, receiver M
|
|||||||
return wireFormatConnection{&c}
|
return wireFormatConnection{&c}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *rawConnection) ID() NodeID {
|
func (c *rawConnection) ID() DeviceID {
|
||||||
return c.id
|
return c.id
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,34 +160,34 @@ func (c *rawConnection) Name() string {
|
|||||||
return c.name
|
return c.name
|
||||||
}
|
}
|
||||||
|
|
||||||
// Index writes the list of file information to the connected peer node
|
// Index writes the list of file information to the connected peer device
|
||||||
func (c *rawConnection) Index(repo string, idx []FileInfo) error {
|
func (c *rawConnection) Index(folder string, idx []FileInfo) error {
|
||||||
select {
|
select {
|
||||||
case <-c.closed:
|
case <-c.closed:
|
||||||
return ErrClosed
|
return ErrClosed
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
c.idxMut.Lock()
|
c.idxMut.Lock()
|
||||||
c.send(-1, messageTypeIndex, IndexMessage{repo, idx})
|
c.send(-1, messageTypeIndex, IndexMessage{folder, idx})
|
||||||
c.idxMut.Unlock()
|
c.idxMut.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IndexUpdate writes the list of file information to the connected peer node as an update
|
// IndexUpdate writes the list of file information to the connected peer device as an update
|
||||||
func (c *rawConnection) IndexUpdate(repo string, idx []FileInfo) error {
|
func (c *rawConnection) IndexUpdate(folder string, idx []FileInfo) error {
|
||||||
select {
|
select {
|
||||||
case <-c.closed:
|
case <-c.closed:
|
||||||
return ErrClosed
|
return ErrClosed
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
c.idxMut.Lock()
|
c.idxMut.Lock()
|
||||||
c.send(-1, messageTypeIndexUpdate, IndexMessage{repo, idx})
|
c.send(-1, messageTypeIndexUpdate, IndexMessage{folder, idx})
|
||||||
c.idxMut.Unlock()
|
c.idxMut.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Request returns the bytes for the specified block after fetching them from the connected peer.
|
// Request returns the bytes for the specified block after fetching them from the connected peer.
|
||||||
func (c *rawConnection) Request(repo string, name string, offset int64, size int) ([]byte, error) {
|
func (c *rawConnection) Request(folder string, name string, offset int64, size int) ([]byte, error) {
|
||||||
var id int
|
var id int
|
||||||
select {
|
select {
|
||||||
case id = <-c.nextID:
|
case id = <-c.nextID:
|
||||||
@ -203,7 +203,7 @@ func (c *rawConnection) Request(repo string, name string, offset int64, size int
|
|||||||
c.awaiting[id] = rc
|
c.awaiting[id] = rc
|
||||||
c.awaitingMut.Unlock()
|
c.awaitingMut.Unlock()
|
||||||
|
|
||||||
ok := c.send(id, messageTypeRequest, RequestMessage{repo, name, uint64(offset), uint32(size)})
|
ok := c.send(id, messageTypeRequest, RequestMessage{folder, name, uint64(offset), uint32(size)})
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, ErrClosed
|
return nil, ErrClosed
|
||||||
}
|
}
|
||||||
@ -399,20 +399,20 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
|
|||||||
|
|
||||||
func (c *rawConnection) handleIndex(im IndexMessage) {
|
func (c *rawConnection) handleIndex(im IndexMessage) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("Index(%v, %v, %d files)", c.id, im.Repository, len(im.Files))
|
l.Debugf("Index(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
|
||||||
}
|
}
|
||||||
c.receiver.Index(c.id, im.Repository, im.Files)
|
c.receiver.Index(c.id, im.Folder, im.Files)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *rawConnection) handleIndexUpdate(im IndexMessage) {
|
func (c *rawConnection) handleIndexUpdate(im IndexMessage) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("queueing IndexUpdate(%v, %v, %d files)", c.id, im.Repository, len(im.Files))
|
l.Debugf("queueing IndexUpdate(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
|
||||||
}
|
}
|
||||||
c.receiver.IndexUpdate(c.id, im.Repository, im.Files)
|
c.receiver.IndexUpdate(c.id, im.Folder, im.Files)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *rawConnection) handleRequest(msgID int, req RequestMessage) {
|
func (c *rawConnection) handleRequest(msgID int, req RequestMessage) {
|
||||||
data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))
|
data, _ := c.receiver.Request(c.id, req.Folder, req.Name, int64(req.Offset), int(req.Size))
|
||||||
|
|
||||||
c.send(msgID, messageTypeResponse, ResponseMessage{data})
|
c.send(msgID, messageTypeResponse, ResponseMessage{data})
|
||||||
}
|
}
|
||||||
|
@ -21,8 +21,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
c0ID = NewNodeID([]byte{1})
|
c0ID = NewDeviceID([]byte{1})
|
||||||
c1ID = NewNodeID([]byte{2})
|
c1ID = NewDeviceID([]byte{2})
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHeaderFunctions(t *testing.T) {
|
func TestHeaderFunctions(t *testing.T) {
|
||||||
@ -140,8 +140,8 @@ func TestPingErr(t *testing.T) {
|
|||||||
// if string(d) != "response data" {
|
// if string(d) != "response data" {
|
||||||
// t.Fatalf("Incorrect response data %q", string(d))
|
// t.Fatalf("Incorrect response data %q", string(d))
|
||||||
// }
|
// }
|
||||||
// if m0.repo != "default" {
|
// if m0.folder != "default" {
|
||||||
// t.Fatalf("Incorrect repo %q", m0.repo)
|
// t.Fatalf("Incorrect folder %q", m0.folder)
|
||||||
// }
|
// }
|
||||||
// if m0.name != "tn" {
|
// if m0.name != "tn" {
|
||||||
// t.Fatalf("Incorrect name %q", m0.name)
|
// t.Fatalf("Incorrect name %q", m0.name)
|
||||||
@ -240,13 +240,13 @@ func TestClose(t *testing.T) {
|
|||||||
|
|
||||||
func TestElementSizeExceededNested(t *testing.T) {
|
func TestElementSizeExceededNested(t *testing.T) {
|
||||||
m := ClusterConfigMessage{
|
m := ClusterConfigMessage{
|
||||||
Repositories: []Repository{
|
Folders: []Folder{
|
||||||
{ID: "longstringlongstringlongstringinglongstringlongstringlonlongstringlongstringlon"},
|
{ID: "longstringlongstringlongstringinglongstringlongstringlonlongstringlongstringlon"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := m.EncodeXDR(ioutil.Discard)
|
_, err := m.EncodeXDR(ioutil.Discard)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("ID length %d > max 64, but no error", len(m.Repositories[0].ID))
|
t.Errorf("ID length %d > max 64, but no error", len(m.Folders[0].ID))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ type wireFormatConnection struct {
|
|||||||
next Connection
|
next Connection
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c wireFormatConnection) ID() NodeID {
|
func (c wireFormatConnection) ID() DeviceID {
|
||||||
return c.next.ID()
|
return c.next.ID()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -22,7 +22,7 @@ func (c wireFormatConnection) Name() string {
|
|||||||
return c.next.Name()
|
return c.next.Name()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c wireFormatConnection) Index(repo string, fs []FileInfo) error {
|
func (c wireFormatConnection) Index(folder string, fs []FileInfo) error {
|
||||||
var myFs = make([]FileInfo, len(fs))
|
var myFs = make([]FileInfo, len(fs))
|
||||||
copy(myFs, fs)
|
copy(myFs, fs)
|
||||||
|
|
||||||
@ -30,10 +30,10 @@ func (c wireFormatConnection) Index(repo string, fs []FileInfo) error {
|
|||||||
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
|
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.next.Index(repo, myFs)
|
return c.next.Index(folder, myFs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c wireFormatConnection) IndexUpdate(repo string, fs []FileInfo) error {
|
func (c wireFormatConnection) IndexUpdate(folder string, fs []FileInfo) error {
|
||||||
var myFs = make([]FileInfo, len(fs))
|
var myFs = make([]FileInfo, len(fs))
|
||||||
copy(myFs, fs)
|
copy(myFs, fs)
|
||||||
|
|
||||||
@ -41,12 +41,12 @@ func (c wireFormatConnection) IndexUpdate(repo string, fs []FileInfo) error {
|
|||||||
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
|
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.next.IndexUpdate(repo, myFs)
|
return c.next.IndexUpdate(folder, myFs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c wireFormatConnection) Request(repo, name string, offset int64, size int) ([]byte, error) {
|
func (c wireFormatConnection) Request(folder, name string, offset int64, size int) ([]byte, error) {
|
||||||
name = norm.NFC.String(filepath.ToSlash(name))
|
name = norm.NFC.String(filepath.ToSlash(name))
|
||||||
return c.next.Request(repo, name, offset, size)
|
return c.next.Request(folder, name, offset, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c wireFormatConnection) ClusterConfig(config ClusterConfigMessage) {
|
func (c wireFormatConnection) ClusterConfig(config ClusterConfigMessage) {
|
||||||
|
@ -48,7 +48,7 @@ type CurrentFiler interface {
|
|||||||
CurrentFile(name string) protocol.FileInfo
|
CurrentFile(name string) protocol.FileInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
// Walk returns the list of files found in the local repository by scanning the
|
// Walk returns the list of files found in the local folder by scanning the
|
||||||
// file system. Files are blockwise hashed.
|
// file system. Files are blockwise hashed.
|
||||||
func (w *Walker) Walk() (chan protocol.FileInfo, error) {
|
func (w *Walker) Walk() (chan protocol.FileInfo, error) {
|
||||||
if debug {
|
if debug {
|
||||||
|
102
internal/stats/device.go
Executable file
102
internal/stats/device.go
Executable file
@ -0,0 +1,102 @@
|
|||||||
|
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||||
|
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package stats
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/internal/protocol"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
deviceStatisticTypeLastSeen = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
var deviceStatisticsTypes = []byte{
|
||||||
|
deviceStatisticTypeLastSeen,
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeviceStatistics struct {
|
||||||
|
LastSeen time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeviceStatisticsReference struct {
|
||||||
|
db *leveldb.DB
|
||||||
|
device protocol.DeviceID
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDeviceStatisticsReference(db *leveldb.DB, device protocol.DeviceID) *DeviceStatisticsReference {
|
||||||
|
return &DeviceStatisticsReference{
|
||||||
|
db: db,
|
||||||
|
device: device,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DeviceStatisticsReference) key(stat byte) []byte {
|
||||||
|
k := make([]byte, 1+1+32)
|
||||||
|
k[0] = keyTypeDeviceStatistic
|
||||||
|
k[1] = stat
|
||||||
|
copy(k[1+1:], s.device[:])
|
||||||
|
return k
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DeviceStatisticsReference) GetLastSeen() time.Time {
|
||||||
|
value, err := s.db.Get(s.key(deviceStatisticTypeLastSeen), nil)
|
||||||
|
if err != nil {
|
||||||
|
if err != leveldb.ErrNotFound {
|
||||||
|
l.Warnln("DeviceStatisticsReference: Failed loading last seen value for", s.device, ":", err)
|
||||||
|
}
|
||||||
|
return time.Unix(0, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
rtime := time.Time{}
|
||||||
|
err = rtime.UnmarshalBinary(value)
|
||||||
|
if err != nil {
|
||||||
|
l.Warnln("DeviceStatisticsReference: Failed parsing last seen value for", s.device, ":", err)
|
||||||
|
return time.Unix(0, 0)
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
l.Debugln("stats.DeviceStatisticsReference.GetLastSeen:", s.device, rtime)
|
||||||
|
}
|
||||||
|
return rtime
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DeviceStatisticsReference) WasSeen() {
|
||||||
|
if debug {
|
||||||
|
l.Debugln("stats.DeviceStatisticsReference.WasSeen:", s.device)
|
||||||
|
}
|
||||||
|
value, err := time.Now().MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
l.Warnln("DeviceStatisticsReference: Failed serializing last seen value for", s.device, ":", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.db.Put(s.key(deviceStatisticTypeLastSeen), value, nil)
|
||||||
|
if err != nil {
|
||||||
|
l.Warnln("Failed serializing last seen value for", s.device, ":", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Never called, maybe because it's worth while to keep the data
|
||||||
|
// or maybe because we have no easy way of knowing that a device has been removed.
|
||||||
|
func (s *DeviceStatisticsReference) Delete() error {
|
||||||
|
for _, stype := range deviceStatisticsTypes {
|
||||||
|
err := s.db.Delete(s.key(stype), nil)
|
||||||
|
if debug && err == nil {
|
||||||
|
l.Debugln("stats.DeviceStatisticsReference.Delete:", s.device, stype)
|
||||||
|
}
|
||||||
|
if err != nil && err != leveldb.ErrNotFound {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DeviceStatisticsReference) GetStatistics() DeviceStatistics {
|
||||||
|
return DeviceStatistics{
|
||||||
|
LastSeen: s.GetLastSeen(),
|
||||||
|
}
|
||||||
|
}
|
@ -6,5 +6,5 @@ package stats
|
|||||||
|
|
||||||
// Same key space as files/leveldb.go keyType* constants
|
// Same key space as files/leveldb.go keyType* constants
|
||||||
const (
|
const (
|
||||||
keyTypeNodeStatistic = iota + 30
|
keyTypeDeviceStatistic = iota + 30
|
||||||
)
|
)
|
||||||
|
@ -1,102 +0,0 @@
|
|||||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
|
||||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package stats
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/internal/protocol"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
nodeStatisticTypeLastSeen = iota
|
|
||||||
)
|
|
||||||
|
|
||||||
var nodeStatisticsTypes = []byte{
|
|
||||||
nodeStatisticTypeLastSeen,
|
|
||||||
}
|
|
||||||
|
|
||||||
type NodeStatistics struct {
|
|
||||||
LastSeen time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type NodeStatisticsReference struct {
|
|
||||||
db *leveldb.DB
|
|
||||||
node protocol.NodeID
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewNodeStatisticsReference(db *leveldb.DB, node protocol.NodeID) *NodeStatisticsReference {
|
|
||||||
return &NodeStatisticsReference{
|
|
||||||
db: db,
|
|
||||||
node: node,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *NodeStatisticsReference) key(stat byte) []byte {
|
|
||||||
k := make([]byte, 1+1+32)
|
|
||||||
k[0] = keyTypeNodeStatistic
|
|
||||||
k[1] = stat
|
|
||||||
copy(k[1+1:], s.node[:])
|
|
||||||
return k
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *NodeStatisticsReference) GetLastSeen() time.Time {
|
|
||||||
value, err := s.db.Get(s.key(nodeStatisticTypeLastSeen), nil)
|
|
||||||
if err != nil {
|
|
||||||
if err != leveldb.ErrNotFound {
|
|
||||||
l.Warnln("NodeStatisticsReference: Failed loading last seen value for", s.node, ":", err)
|
|
||||||
}
|
|
||||||
return time.Unix(0, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
rtime := time.Time{}
|
|
||||||
err = rtime.UnmarshalBinary(value)
|
|
||||||
if err != nil {
|
|
||||||
l.Warnln("NodeStatisticsReference: Failed parsing last seen value for", s.node, ":", err)
|
|
||||||
return time.Unix(0, 0)
|
|
||||||
}
|
|
||||||
if debug {
|
|
||||||
l.Debugln("stats.NodeStatisticsReference.GetLastSeen:", s.node, rtime)
|
|
||||||
}
|
|
||||||
return rtime
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *NodeStatisticsReference) WasSeen() {
|
|
||||||
if debug {
|
|
||||||
l.Debugln("stats.NodeStatisticsReference.WasSeen:", s.node)
|
|
||||||
}
|
|
||||||
value, err := time.Now().MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
l.Warnln("NodeStatisticsReference: Failed serializing last seen value for", s.node, ":", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = s.db.Put(s.key(nodeStatisticTypeLastSeen), value, nil)
|
|
||||||
if err != nil {
|
|
||||||
l.Warnln("Failed serializing last seen value for", s.node, ":", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Never called, maybe because it's worth while to keep the data
|
|
||||||
// or maybe because we have no easy way of knowing that a node has been removed.
|
|
||||||
func (s *NodeStatisticsReference) Delete() error {
|
|
||||||
for _, stype := range nodeStatisticsTypes {
|
|
||||||
err := s.db.Delete(s.key(stype), nil)
|
|
||||||
if debug && err == nil {
|
|
||||||
l.Debugln("stats.NodeStatisticsReference.Delete:", s.node, stype)
|
|
||||||
}
|
|
||||||
if err != nil && err != leveldb.ErrNotFound {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *NodeStatisticsReference) GetStatistics() NodeStatistics {
|
|
||||||
return NodeStatistics{
|
|
||||||
LastSeen: s.GetLastSeen(),
|
|
||||||
}
|
|
||||||
}
|
|
@ -21,11 +21,11 @@ func init() {
|
|||||||
// The type holds our configuration
|
// The type holds our configuration
|
||||||
type Simple struct {
|
type Simple struct {
|
||||||
keep int
|
keep int
|
||||||
repoPath string
|
folderPath string
|
||||||
}
|
}
|
||||||
|
|
||||||
// The constructor function takes a map of parameters and creates the type.
|
// The constructor function takes a map of parameters and creates the type.
|
||||||
func NewSimple(repoID, repoPath string, params map[string]string) Versioner {
|
func NewSimple(folderID, folderPath string, params map[string]string) Versioner {
|
||||||
keep, err := strconv.Atoi(params["keep"])
|
keep, err := strconv.Atoi(params["keep"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
keep = 5 // A reasonable default
|
keep = 5 // A reasonable default
|
||||||
@ -33,7 +33,7 @@ func NewSimple(repoID, repoPath string, params map[string]string) Versioner {
|
|||||||
|
|
||||||
s := Simple{
|
s := Simple{
|
||||||
keep: keep,
|
keep: keep,
|
||||||
repoPath: repoPath,
|
folderPath: folderPath,
|
||||||
}
|
}
|
||||||
|
|
||||||
if debug {
|
if debug {
|
||||||
@ -57,7 +57,7 @@ func (v Simple) Archive(filePath string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
versionsDir := filepath.Join(v.repoPath, ".stversions")
|
versionsDir := filepath.Join(v.folderPath, ".stversions")
|
||||||
_, err = os.Stat(versionsDir)
|
_, err = os.Stat(versionsDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
@ -76,12 +76,12 @@ func (v Simple) Archive(filePath string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
file := filepath.Base(filePath)
|
file := filepath.Base(filePath)
|
||||||
inRepoPath, err := filepath.Rel(v.repoPath, filepath.Dir(filePath))
|
inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
dir := filepath.Join(versionsDir, inRepoPath)
|
dir := filepath.Join(versionsDir, inFolderPath)
|
||||||
err = os.MkdirAll(dir, 0755)
|
err = os.MkdirAll(dir, 0755)
|
||||||
if err != nil && !os.IsExist(err) {
|
if err != nil && !os.IsExist(err) {
|
||||||
return err
|
return err
|
||||||
|
@ -30,7 +30,7 @@ type Interval struct {
|
|||||||
type Staggered struct {
|
type Staggered struct {
|
||||||
versionsPath string
|
versionsPath string
|
||||||
cleanInterval int64
|
cleanInterval int64
|
||||||
repoPath string
|
folderPath string
|
||||||
interval [4]Interval
|
interval [4]Interval
|
||||||
mutex *sync.Mutex
|
mutex *sync.Mutex
|
||||||
}
|
}
|
||||||
@ -83,7 +83,7 @@ func (v Staggered) renameOld() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The constructor function takes a map of parameters and creates the type.
|
// The constructor function takes a map of parameters and creates the type.
|
||||||
func NewStaggered(repoID, repoPath string, params map[string]string) Versioner {
|
func NewStaggered(folderID, folderPath string, params map[string]string) Versioner {
|
||||||
maxAge, err := strconv.ParseInt(params["maxAge"], 10, 0)
|
maxAge, err := strconv.ParseInt(params["maxAge"], 10, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
maxAge = 31536000 // Default: ~1 year
|
maxAge = 31536000 // Default: ~1 year
|
||||||
@ -93,13 +93,13 @@ func NewStaggered(repoID, repoPath string, params map[string]string) Versioner {
|
|||||||
cleanInterval = 3600 // Default: clean once per hour
|
cleanInterval = 3600 // Default: clean once per hour
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use custom path if set, otherwise .stversions in repoPath
|
// Use custom path if set, otherwise .stversions in folderPath
|
||||||
var versionsDir string
|
var versionsDir string
|
||||||
if params["versionsPath"] == "" {
|
if params["versionsPath"] == "" {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugln("using default dir .stversions")
|
l.Debugln("using default dir .stversions")
|
||||||
}
|
}
|
||||||
versionsDir = filepath.Join(repoPath, ".stversions")
|
versionsDir = filepath.Join(folderPath, ".stversions")
|
||||||
} else {
|
} else {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugln("using dir", params["versionsPath"])
|
l.Debugln("using dir", params["versionsPath"])
|
||||||
@ -111,7 +111,7 @@ func NewStaggered(repoID, repoPath string, params map[string]string) Versioner {
|
|||||||
s := Staggered{
|
s := Staggered{
|
||||||
versionsPath: versionsDir,
|
versionsPath: versionsDir,
|
||||||
cleanInterval: cleanInterval,
|
cleanInterval: cleanInterval,
|
||||||
repoPath: repoPath,
|
folderPath: folderPath,
|
||||||
interval: [4]Interval{
|
interval: [4]Interval{
|
||||||
Interval{30, 3600}, // first hour -> 30 sec between versions
|
Interval{30, 3600}, // first hour -> 30 sec between versions
|
||||||
Interval{3600, 86400}, // next day -> 1 h between versions
|
Interval{3600, 86400}, // next day -> 1 h between versions
|
||||||
@ -320,12 +320,12 @@ func (v Staggered) Archive(filePath string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
file := filepath.Base(filePath)
|
file := filepath.Base(filePath)
|
||||||
inRepoPath, err := filepath.Rel(v.repoPath, filepath.Dir(filePath))
|
inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
dir := filepath.Join(v.versionsPath, inRepoPath)
|
dir := filepath.Join(v.versionsPath, inFolderPath)
|
||||||
err = os.MkdirAll(dir, 0755)
|
err = os.MkdirAll(dir, 0755)
|
||||||
if err != nil && !os.IsExist(err) {
|
if err != nil && !os.IsExist(err) {
|
||||||
return err
|
return err
|
||||||
|
@ -10,4 +10,4 @@ type Versioner interface {
|
|||||||
Archive(filePath string) error
|
Archive(filePath string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
var Factories = map[string]func(repoID string, repoDir string, params map[string]string) Versioner{}
|
var Factories = map[string]func(folderID string, folderDir string, params map[string]string) Versioner{}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
Node Discovery Protocol v2
|
Device Discovery Protocol v2
|
||||||
==========================
|
==========================
|
||||||
|
|
||||||
Mode of Operation
|
Mode of Operation
|
||||||
@ -9,19 +9,19 @@ segment (broadcast domain) and "global discovery" performed over the
|
|||||||
Internet in general with the support of a well known server.
|
Internet in general with the support of a well known server.
|
||||||
|
|
||||||
Local discovery does not use Query packets. Instead Announcement packets
|
Local discovery does not use Query packets. Instead Announcement packets
|
||||||
are sent periodically and each participating node keeps a table of the
|
are sent periodically and each participating device keeps a table of the
|
||||||
announcements it has seen. On multihomed hosts the announcement packets
|
announcements it has seen. On multihomed hosts the announcement packets
|
||||||
should be sent on each interface that syncthing will accept connections.
|
should be sent on each interface that syncthing will accept connections.
|
||||||
|
|
||||||
It is recommended that local discovery Announcement packets are sent on
|
It is recommended that local discovery Announcement packets are sent on
|
||||||
a 30 to 60 second interval, possibly with forced transmissions when a
|
a 30 to 60 second interval, possibly with forced transmissions when a
|
||||||
previously unknown node is discovered.
|
previously unknown device is discovered.
|
||||||
|
|
||||||
Global discovery is made possible by periodically updating a global server
|
Global discovery is made possible by periodically updating a global server
|
||||||
using Announcement packets indentical to those transmitted for local
|
using Announcement packets indentical to those transmitted for local
|
||||||
discovery. The node performing discovery will transmit a Query packet to
|
discovery. The device performing discovery will transmit a Query packet to
|
||||||
the global server and expect an Announcement packet in response. In case
|
the global server and expect an Announcement packet in response. In case
|
||||||
the global server has no knowledge of the queried node ID, there will be
|
the global server has no knowledge of the queried device ID, there will be
|
||||||
no response. A timeout is to be used to determine lookup failure.
|
no response. A timeout is to be used to determine lookup failure.
|
||||||
|
|
||||||
There is no message to unregister from the global server; instead
|
There is no message to unregister from the global server; instead
|
||||||
@ -39,17 +39,17 @@ The Announcement packet has the following structure:
|
|||||||
| Magic (0x9D79BC39) |
|
| Magic (0x9D79BC39) |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ Node Structure \
|
\ Device Structure \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Number of Extra Nodes |
|
| Number of Extra Devices |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ Zero or more Node Structures \
|
\ Zero or more Device Structures \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
|
||||||
Node Structure:
|
Device Structure:
|
||||||
|
|
||||||
0 1 2 3
|
0 1 2 3
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
@ -85,11 +85,11 @@ This is the XDR encoding of:
|
|||||||
|
|
||||||
struct Announcement {
|
struct Announcement {
|
||||||
unsigned int Magic;
|
unsigned int Magic;
|
||||||
Node This;
|
Device This;
|
||||||
Node Extra<>;
|
Device Extra<>;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Node {
|
struct Device {
|
||||||
string ID<>;
|
string ID<>;
|
||||||
Address Addresses<>;
|
Address Addresses<>;
|
||||||
}
|
}
|
||||||
@ -99,16 +99,16 @@ This is the XDR encoding of:
|
|||||||
unsigned short Port;
|
unsigned short Port;
|
||||||
}
|
}
|
||||||
|
|
||||||
The first Node structure contains information about the sending node.
|
The first Device structure contains information about the sending device.
|
||||||
The following zero or more Extra nodes contain information about other
|
The following zero or more Extra devices contain information about other
|
||||||
nodes known to the sending node.
|
devices known to the sending device.
|
||||||
|
|
||||||
In the Address structure, the IP field can be of three differnt kinds;
|
In the Address structure, the IP field can be of three differnt kinds;
|
||||||
|
|
||||||
- A zero length indicates that the IP address should be taken from the
|
- A zero length indicates that the IP address should be taken from the
|
||||||
source address of the announcement packet, be it IPv4 or IPv6. The
|
source address of the announcement packet, be it IPv4 or IPv6. The
|
||||||
source address must be a valid unicast address. This is only valid
|
source address must be a valid unicast address. This is only valid
|
||||||
in the first node structure, not in the list of extras.
|
in the first device structure, not in the list of extras.
|
||||||
|
|
||||||
- A four byte length indicates that the address is an IPv4 unicast
|
- A four byte length indicates that the address is an IPv4 unicast
|
||||||
address.
|
address.
|
||||||
@ -123,10 +123,10 @@ The Query packet has the following structure:
|
|||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Magic Number (0x2CA856F5) |
|
| Magic Number (0x2CA856F5) |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Length of Node ID |
|
| Length of Device ID |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ Node ID (variable length) \
|
\ Device ID (variable length) \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
|
||||||
@ -134,5 +134,5 @@ This is the XDR encoding of:
|
|||||||
|
|
||||||
struct Announcement {
|
struct Announcement {
|
||||||
unsigned int MagicNumber;
|
unsigned int MagicNumber;
|
||||||
string NodeID<>;
|
string DeviceID<>;
|
||||||
}
|
}
|
||||||
|
@ -4,14 +4,14 @@ Block Exchange Protocol v1
|
|||||||
Introduction and Definitions
|
Introduction and Definitions
|
||||||
----------------------------
|
----------------------------
|
||||||
|
|
||||||
BEP is used between two or more _nodes_ thus forming a _cluster_. Each
|
BEP is used between two or more _devices_ thus forming a _cluster_. Each
|
||||||
node has one or more _repositories_ of files described by the _local
|
device has one or more _folders_ of files described by the _local
|
||||||
model_, containing metadata and block hashes. The local model is sent to
|
model_, containing metadata and block hashes. The local model is sent to
|
||||||
the other nodes in the cluster. The union of all files in the local
|
the other devices in the cluster. The union of all files in the local
|
||||||
models, with files selected for highest change version, forms the
|
models, with files selected for highest change version, forms the
|
||||||
_global model_. Each node strives to get its repositories in sync with
|
_global model_. Each device strives to get its folders in sync with
|
||||||
the global model by requesting missing or outdated blocks from the other
|
the global model by requesting missing or outdated blocks from the other
|
||||||
nodes in the cluster.
|
devices in the cluster.
|
||||||
|
|
||||||
File data is described and transferred in units of _blocks_, each being
|
File data is described and transferred in units of _blocks_, each being
|
||||||
128 KiB (131072 bytes) in size.
|
128 KiB (131072 bytes) in size.
|
||||||
@ -50,7 +50,7 @@ connection. Possibilities include certificates signed by a common
|
|||||||
trusted CA, preshared certificates, preshared certificate fingerprints
|
trusted CA, preshared certificates, preshared certificate fingerprints
|
||||||
or certificate pinning combined with some out of band first
|
or certificate pinning combined with some out of band first
|
||||||
verification. The reference implementation uses preshared certificate
|
verification. The reference implementation uses preshared certificate
|
||||||
fingerprints (SHA-256) referred to as "Node IDs".
|
fingerprints (SHA-256) referred to as "Device IDs".
|
||||||
|
|
||||||
There is no required order or synchronization among BEP messages except
|
There is no required order or synchronization among BEP messages except
|
||||||
as noted per message type - any message type may be sent at any time and
|
as noted per message type - any message type may be sent at any time and
|
||||||
@ -158,10 +158,10 @@ Cluster Config messages MUST NOT be sent after the initial exchange.
|
|||||||
\ ClientVersion (variable length) \
|
\ ClientVersion (variable length) \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Number of Repositories |
|
| Number of Folders |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ Zero or more Repository Structures \
|
\ Zero or more Folder Structures \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Number of Options |
|
| Number of Options |
|
||||||
@ -172,7 +172,7 @@ Cluster Config messages MUST NOT be sent after the initial exchange.
|
|||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
|
||||||
|
|
||||||
Repository Structure:
|
Folder Structure:
|
||||||
|
|
||||||
0 1 2 3
|
0 1 2 3
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
@ -183,15 +183,15 @@ Cluster Config messages MUST NOT be sent after the initial exchange.
|
|||||||
\ ID (variable length) \
|
\ ID (variable length) \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Number of Nodes |
|
| Number of Devices |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ Zero or more Node Structures \
|
\ Zero or more Device Structures \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
|
||||||
|
|
||||||
Node Structure:
|
Device Structure:
|
||||||
|
|
||||||
0 1 2 3
|
0 1 2 3
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
@ -238,13 +238,13 @@ ClientVersion is "v0.7.2". The ClientVersion field SHOULD follow the
|
|||||||
patterns laid out in the [Semantic Versioning](http://semver.org/)
|
patterns laid out in the [Semantic Versioning](http://semver.org/)
|
||||||
standard.
|
standard.
|
||||||
|
|
||||||
The Repositories field lists all repositories that will be synchronized
|
The Folders field lists all folders that will be synchronized
|
||||||
over the current connection. Each repository has a list of participating
|
over the current connection. Each folder has a list of participating
|
||||||
Nodes. Each node has an associated Flags field to indicate the sharing
|
Devices. Each device has an associated Flags field to indicate the sharing
|
||||||
mode of that node for the repository in question. See the discussion on
|
mode of that device for the folder in question. See the discussion on
|
||||||
Sharing Modes.
|
Sharing Modes.
|
||||||
|
|
||||||
The Node Flags field contains the following single bit flags:
|
The Device Flags field contains the following single bit flags:
|
||||||
|
|
||||||
0 1 2 3
|
0 1 2 3
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
@ -252,40 +252,40 @@ The Node Flags field contains the following single bit flags:
|
|||||||
| Reserved |Pri| Reserved |I|R|T|
|
| Reserved |Pri| Reserved |I|R|T|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
|
||||||
- Bit 31 ("T", Trusted) is set for nodes that participate in trusted
|
- Bit 31 ("T", Trusted) is set for devices that participate in trusted
|
||||||
mode.
|
mode.
|
||||||
|
|
||||||
- Bit 30 ("R", Read Only) is set for nodes that participate in read
|
- Bit 30 ("R", Read Only) is set for devices that participate in read
|
||||||
only mode.
|
only mode.
|
||||||
|
|
||||||
- Bit 29 ("I", Introducer) is set for nodes that are trusted as cluster
|
- Bit 29 ("I", Introducer) is set for devices that are trusted as cluster
|
||||||
introducers.
|
introducers.
|
||||||
|
|
||||||
- Bits 16 through 28 are reserved and MUST be set to zero.
|
- Bits 16 through 28 are reserved and MUST be set to zero.
|
||||||
|
|
||||||
- Bits 14-15 ("Pri) indicate the node's upload priority for this
|
- Bits 14-15 ("Pri) indicate the device's upload priority for this
|
||||||
repository. Possible values are:
|
folder. Possible values are:
|
||||||
|
|
||||||
- 00: The default. Normal priority.
|
- 00: The default. Normal priority.
|
||||||
|
|
||||||
- 01: High priority. Other nodes SHOULD favour requesting files from
|
- 01: High priority. Other devices SHOULD favour requesting files from
|
||||||
this node over nodes with normal or low priority.
|
this device over devices with normal or low priority.
|
||||||
|
|
||||||
- 10: Low priority. Other nodes SHOULD avoid requesting files from
|
- 10: Low priority. Other devices SHOULD avoid requesting files from
|
||||||
this node when they are available from other nodes.
|
this device when they are available from other devices.
|
||||||
|
|
||||||
- 11: Sharing disabled. Other nodes SHOULD NOT request files from
|
- 11: Sharing disabled. Other devices SHOULD NOT request files from
|
||||||
this node.
|
this device.
|
||||||
|
|
||||||
- Bits 0 through 14 are reserved and MUST be set to zero.
|
- Bits 0 through 14 are reserved and MUST be set to zero.
|
||||||
|
|
||||||
Exactly one of the T and R bits MUST be set.
|
Exactly one of the T and R bits MUST be set.
|
||||||
|
|
||||||
The per node Max Local Version field contains the highest local file
|
The per device Max Local Version field contains the highest local file
|
||||||
version number of the files already known to be in the index sent by
|
version number of the files already known to be in the index sent by
|
||||||
this node. If nothing is known about the index of a given node, this
|
this device. If nothing is known about the index of a given device, this
|
||||||
field MUST be set to zero. When receiving a Cluster Config message with
|
field MUST be set to zero. When receiving a Cluster Config message with
|
||||||
a non-zero Max Local Version for the local node ID, a node MAY elect to
|
a non-zero Max Local Version for the local device ID, a device MAY elect to
|
||||||
send an Index Update message containing only files with higher local
|
send an Index Update message containing only files with higher local
|
||||||
version numbers in place of the initial Index message.
|
version numbers in place of the initial Index message.
|
||||||
|
|
||||||
@ -295,10 +295,10 @@ items, although it is transmitted in the form of a list of (Key, Value)
|
|||||||
pairs, both of string type. Key ID:s are implementation specific. An
|
pairs, both of string type. Key ID:s are implementation specific. An
|
||||||
implementation MUST ignore unknown keys. An implementation MAY impose
|
implementation MUST ignore unknown keys. An implementation MAY impose
|
||||||
limits on the length keys and values. The options list may be used to
|
limits on the length keys and values. The options list may be used to
|
||||||
inform nodes of relevant local configuration options such as rate
|
inform devices of relevant local configuration options such as rate
|
||||||
limiting or make recommendations about request parallelism, node
|
limiting or make recommendations about request parallelism, device
|
||||||
priorities, etc. An empty options list is valid for nodes not having any
|
priorities, etc. An empty options list is valid for devices not having any
|
||||||
such information to share. Nodes MAY NOT make any assumptions about
|
such information to share. Devices MAY NOT make any assumptions about
|
||||||
peers acting in a specific manner as a result of sent options.
|
peers acting in a specific manner as a result of sent options.
|
||||||
|
|
||||||
#### XDR
|
#### XDR
|
||||||
@ -306,16 +306,16 @@ peers acting in a specific manner as a result of sent options.
|
|||||||
struct ClusterConfigMessage {
|
struct ClusterConfigMessage {
|
||||||
string ClientName<>;
|
string ClientName<>;
|
||||||
string ClientVersion<>;
|
string ClientVersion<>;
|
||||||
Repository Repositories<>;
|
Folder Folders<>;
|
||||||
Option Options<>;
|
Option Options<>;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Repository {
|
struct Folder {
|
||||||
string ID<>;
|
string ID<>;
|
||||||
Node Nodes<>;
|
Device Devices<>;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Node {
|
struct Device {
|
||||||
string ID<>;
|
string ID<>;
|
||||||
unsigned int Flags;
|
unsigned int Flags;
|
||||||
unsigned hyper MaxLocalVersion;
|
unsigned hyper MaxLocalVersion;
|
||||||
@ -329,18 +329,18 @@ peers acting in a specific manner as a result of sent options.
|
|||||||
### Index (Type = 1) and Index Update (Type = 6)
|
### Index (Type = 1) and Index Update (Type = 6)
|
||||||
|
|
||||||
The Index and Index Update messages define the contents of the senders
|
The Index and Index Update messages define the contents of the senders
|
||||||
repository. An Index message represents the full contents of the
|
folder. An Index message represents the full contents of the
|
||||||
repository and thus supersedes any previous index. An Index Update
|
folder and thus supersedes any previous index. An Index Update
|
||||||
amends an existing index with new information, not affecting any entries
|
amends an existing index with new information, not affecting any entries
|
||||||
not included in the message. An Index Update MAY NOT be sent unless
|
not included in the message. An Index Update MAY NOT be sent unless
|
||||||
preceded by an Index, unless a non-zero Max Local Version has been
|
preceded by an Index, unless a non-zero Max Local Version has been
|
||||||
announced for the given repository by the peer node.
|
announced for the given folder by the peer device.
|
||||||
|
|
||||||
An Index or Index Update message MUST be sent for each repository
|
An Index or Index Update message MUST be sent for each folder
|
||||||
included in the Cluster Config message, and MUST be sent before any
|
included in the Cluster Config message, and MUST be sent before any
|
||||||
other message referring to that repository. A node with no data to
|
other message referring to that folder. A device with no data to
|
||||||
advertise MUST send an empty Index message (a file list of zero length).
|
advertise MUST send an empty Index message (a file list of zero length).
|
||||||
If the repository contents change from non-empty to empty, an empty
|
If the folder contents change from non-empty to empty, an empty
|
||||||
Index message MUST be sent. There is no response to the Index message.
|
Index message MUST be sent. There is no response to the Index message.
|
||||||
|
|
||||||
#### Graphical Representation
|
#### Graphical Representation
|
||||||
@ -350,10 +350,10 @@ Index message MUST be sent. There is no response to the Index message.
|
|||||||
0 1 2 3
|
0 1 2 3
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Length of Repository |
|
| Length of Folder |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ Repository (variable length) \
|
\ Folder (variable length) \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Number of Files |
|
| Number of Files |
|
||||||
@ -413,24 +413,24 @@ Index message MUST be sent. There is no response to the Index message.
|
|||||||
|
|
||||||
#### Fields
|
#### Fields
|
||||||
|
|
||||||
The Repository field identifies the repository that the index message
|
The Folder field identifies the folder that the index message
|
||||||
pertains to. For single repository implementations the node MAY send an
|
pertains to. For single folder implementations the device MAY send an
|
||||||
empty repository ID or use the string "default".
|
empty folder ID or use the string "default".
|
||||||
|
|
||||||
The Name is the file name path relative to the repository root. Like all
|
The Name is the file name path relative to the folder root. Like all
|
||||||
strings in BEP, the Name is always in UTF-8 NFC regardless of operating
|
strings in BEP, the Name is always in UTF-8 NFC regardless of operating
|
||||||
system or file system specific conventions. The Name field uses the
|
system or file system specific conventions. The Name field uses the
|
||||||
slash character ("/") as path separator, regardless of the
|
slash character ("/") as path separator, regardless of the
|
||||||
implementation's operating system conventions. The combination of
|
implementation's operating system conventions. The combination of
|
||||||
Repository and Name uniquely identifies each file in a cluster.
|
Folder and Name uniquely identifies each file in a cluster.
|
||||||
|
|
||||||
The Version field is the value of a cluster wide Lamport clock
|
The Version field is the value of a cluster wide Lamport clock
|
||||||
indicating when the change was detected. The clock ticks on every
|
indicating when the change was detected. The clock ticks on every
|
||||||
detected and received change. The combination of Repository, Name and
|
detected and received change. The combination of Folder, Name and
|
||||||
Version uniquely identifies the contents of a file at a given point in
|
Version uniquely identifies the contents of a file at a given point in
|
||||||
time.
|
time.
|
||||||
|
|
||||||
The Local Version field is the value of a node local monotonic clock at
|
The Local Version field is the value of a device local monotonic clock at
|
||||||
the time of last local database update to a file. The clock ticks on
|
the time of last local database update to a file. The clock ticks on
|
||||||
every local database update.
|
every local database update.
|
||||||
|
|
||||||
@ -471,7 +471,7 @@ The Modified time is expressed as the number of seconds since the Unix
|
|||||||
Epoch (1970-01-01 00:00:00 UTC).
|
Epoch (1970-01-01 00:00:00 UTC).
|
||||||
|
|
||||||
In the rare occasion that a file is simultaneously and independently
|
In the rare occasion that a file is simultaneously and independently
|
||||||
modified by two nodes in the same cluster and thus end up on the same
|
modified by two devices in the same cluster and thus end up on the same
|
||||||
Version number after modification, the Modified field is used as a tie
|
Version number after modification, the Modified field is used as a tie
|
||||||
breaker (higher being better), followed by the hash values of the file
|
breaker (higher being better), followed by the hash values of the file
|
||||||
blocks (lower being better).
|
blocks (lower being better).
|
||||||
@ -483,7 +483,7 @@ block which may represent a smaller amount of data.
|
|||||||
#### XDR
|
#### XDR
|
||||||
|
|
||||||
struct IndexMessage {
|
struct IndexMessage {
|
||||||
string Repository<>;
|
string Folder<>;
|
||||||
FileInfo Files<>;
|
FileInfo Files<>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -504,7 +504,7 @@ block which may represent a smaller amount of data.
|
|||||||
### Request (Type = 2)
|
### Request (Type = 2)
|
||||||
|
|
||||||
The Request message expresses the desire to receive a data block
|
The Request message expresses the desire to receive a data block
|
||||||
corresponding to a part of a certain file in the peer's repository.
|
corresponding to a part of a certain file in the peer's folder.
|
||||||
|
|
||||||
#### Graphical Representation
|
#### Graphical Representation
|
||||||
|
|
||||||
@ -513,10 +513,10 @@ corresponding to a part of a certain file in the peer's repository.
|
|||||||
0 1 2 3
|
0 1 2 3
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Length of Repository |
|
| Length of Folder |
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
/ /
|
/ /
|
||||||
\ Repository (variable length) \
|
\ Folder (variable length) \
|
||||||
/ /
|
/ /
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
| Length of Name |
|
| Length of Name |
|
||||||
@ -534,7 +534,7 @@ corresponding to a part of a certain file in the peer's repository.
|
|||||||
|
|
||||||
#### Fields
|
#### Fields
|
||||||
|
|
||||||
The Repository and Name fields are as documented for the Index message.
|
The Folder and Name fields are as documented for the Index message.
|
||||||
The Offset and Size fields specify the region of the file to be
|
The Offset and Size fields specify the region of the file to be
|
||||||
transferred. This SHOULD equate to exactly one block as seen in an Index
|
transferred. This SHOULD equate to exactly one block as seen in an Index
|
||||||
message.
|
message.
|
||||||
@ -542,7 +542,7 @@ message.
|
|||||||
#### XDR
|
#### XDR
|
||||||
|
|
||||||
struct RequestMessage {
|
struct RequestMessage {
|
||||||
string Repository<>;
|
string Folder<>;
|
||||||
string Name<>;
|
string Name<>;
|
||||||
unsigned hyper Offset;
|
unsigned hyper Offset;
|
||||||
unsigned int Size;
|
unsigned int Size;
|
||||||
@ -624,20 +624,20 @@ directions.
|
|||||||
|
|
||||||
+------------+ Updates /---------\
|
+------------+ Updates /---------\
|
||||||
| | -----------> / \
|
| | -----------> / \
|
||||||
| Node | | Cluster |
|
| Device | | Cluster |
|
||||||
| | <----------- \ /
|
| | <----------- \ /
|
||||||
+------------+ Updates \---------/
|
+------------+ Updates \---------/
|
||||||
|
|
||||||
### Read Only
|
### Read Only
|
||||||
|
|
||||||
In read only mode, a node does not synchronize the local repository to
|
In read only mode, a device does not synchronize the local folder to
|
||||||
the cluster, but publishes changes to its local repository contents as
|
the cluster, but publishes changes to its local folder contents as
|
||||||
usual. The local repository can be seen as a "master copy" that is never
|
usual. The local folder can be seen as a "master copy" that is never
|
||||||
affected by the actions of other cluster nodes.
|
affected by the actions of other cluster devices.
|
||||||
|
|
||||||
+------------+ Updates /---------\
|
+------------+ Updates /---------\
|
||||||
| | -----------> / \
|
| | -----------> / \
|
||||||
| Node | | Cluster |
|
| Device | | Cluster |
|
||||||
| | \ /
|
| | \ /
|
||||||
+------------+ \---------/
|
+------------+ \---------/
|
||||||
|
|
||||||
@ -651,7 +651,7 @@ restrictive than the following:
|
|||||||
|
|
||||||
### Index and Index Update Messages
|
### Index and Index Update Messages
|
||||||
|
|
||||||
- Repository: 64 bytes
|
- Folder: 64 bytes
|
||||||
- Number of Files: 10.000.000
|
- Number of Files: 10.000.000
|
||||||
- Name: 1024 bytes
|
- Name: 1024 bytes
|
||||||
- Number of Blocks: 1.000.000
|
- Number of Blocks: 1.000.000
|
||||||
@ -659,7 +659,7 @@ restrictive than the following:
|
|||||||
|
|
||||||
### Request Messages
|
### Request Messages
|
||||||
|
|
||||||
- Repository: 64 bytes
|
- Folder: 64 bytes
|
||||||
- Name: 1024 bytes
|
- Name: 1024 bytes
|
||||||
|
|
||||||
### Response Messages
|
### Response Messages
|
||||||
@ -695,8 +695,8 @@ The Index records are received and both peers recompute their knowledge
|
|||||||
of the data in the cluster. In this example, peer A has four missing or
|
of the data in the cluster. In this example, peer A has four missing or
|
||||||
outdated blocks. At 2 through 5 peer A sends requests for these blocks.
|
outdated blocks. At 2 through 5 peer A sends requests for these blocks.
|
||||||
The requests are received by peer B, who retrieves the data from the
|
The requests are received by peer B, who retrieves the data from the
|
||||||
repository and transmits Response records (6 through 9). Node A updates
|
folder and transmits Response records (6 through 9). Device A updates
|
||||||
their repository contents and transmits an Index Update message (10).
|
their folder contents and transmits an Index Update message (10).
|
||||||
Both peers enter idle state after 10. At some later time 11, peer A
|
Both peers enter idle state after 10. At some later time 11, peer A
|
||||||
determines that it has not seen data from B for some time and sends a
|
determines that it has not seen data from B for some time and sends a
|
||||||
Ping request. A response is sent at 12.
|
Ping request. A response is sent at 12.
|
||||||
|
@ -1,15 +1,15 @@
|
|||||||
<configuration version="2">
|
<configuration version="2">
|
||||||
<repository id="default" directory="s1" ro="true" ignorePerms="false">
|
<folder id="default" directory="s1" ro="true" ignorePerms="false">
|
||||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
|
<device id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></device>
|
||||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
|
<device id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></device>
|
||||||
<versioning></versioning>
|
<versioning></versioning>
|
||||||
</repository>
|
</folder>
|
||||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="f1">
|
<device id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="f1">
|
||||||
<address>127.0.0.1:22001</address>
|
<address>127.0.0.1:22001</address>
|
||||||
</node>
|
</device>
|
||||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="f2">
|
<device id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="f2">
|
||||||
<address>127.0.0.1:22002</address>
|
<address>127.0.0.1:22002</address>
|
||||||
</node>
|
</device>
|
||||||
<gui enabled="true" tls="false">
|
<gui enabled="true" tls="false">
|
||||||
<address>127.0.0.1:8081</address>
|
<address>127.0.0.1:8081</address>
|
||||||
<apikey>abc123</apikey>
|
<apikey>abc123</apikey>
|
||||||
|
@ -1,17 +1,17 @@
|
|||||||
<configuration version="2">
|
<configuration version="2">
|
||||||
<repository id="default" directory="s2" ro="false" ignorePerms="false">
|
<folder id="default" directory="s2" ro="false" ignorePerms="false">
|
||||||
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
|
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
|
||||||
<versioning type="simple">
|
<versioning type="simple">
|
||||||
<param key="keep" val="5"></param>
|
<param key="keep" val="5"></param>
|
||||||
</versioning>
|
</versioning>
|
||||||
</repository>
|
</folder>
|
||||||
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="f1">
|
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="f1">
|
||||||
<address>127.0.0.1:22001</address>
|
<address>127.0.0.1:22001</address>
|
||||||
</node>
|
</device>
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="f2">
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="f2">
|
||||||
<address>127.0.0.1:22002</address>
|
<address>127.0.0.1:22002</address>
|
||||||
</node>
|
</device>
|
||||||
<gui enabled="true" tls="false">
|
<gui enabled="true" tls="false">
|
||||||
<address>127.0.0.1:8082</address>
|
<address>127.0.0.1:8082</address>
|
||||||
<apikey>abc123</apikey>
|
<apikey>abc123</apikey>
|
||||||
|
@ -1,27 +1,27 @@
|
|||||||
<configuration version="4">
|
<configuration version="4">
|
||||||
<repository id="default" directory="s1" ro="false" rescanIntervalS="10" ignorePerms="false">
|
<folder id="default" directory="s1" ro="false" rescanIntervalS="10" ignorePerms="false">
|
||||||
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
|
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
|
||||||
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></node>
|
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
|
||||||
<versioning></versioning>
|
<versioning></versioning>
|
||||||
</repository>
|
</folder>
|
||||||
<repository id="s12" directory="s12-1" ro="false" rescanIntervalS="10" ignorePerms="false">
|
<folder id="s12" directory="s12-1" ro="false" rescanIntervalS="10" ignorePerms="false">
|
||||||
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
|
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
|
||||||
<versioning></versioning>
|
<versioning></versioning>
|
||||||
</repository>
|
</folder>
|
||||||
<node id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK" name="s4" compression="true">
|
<device id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK" name="s4" compression="true">
|
||||||
<address>127.0.0.1:22004</address>
|
<address>127.0.0.1:22004</address>
|
||||||
</node>
|
</device>
|
||||||
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
|
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
|
||||||
<address>127.0.0.1:22001</address>
|
<address>127.0.0.1:22001</address>
|
||||||
</node>
|
</device>
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
|
||||||
<address>127.0.0.1:22002</address>
|
<address>127.0.0.1:22002</address>
|
||||||
</node>
|
</device>
|
||||||
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
|
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
|
||||||
<address>127.0.0.1:22003</address>
|
<address>127.0.0.1:22003</address>
|
||||||
</node>
|
</device>
|
||||||
<gui enabled="true" tls="false">
|
<gui enabled="true" tls="false">
|
||||||
<address>127.0.0.1:8081</address>
|
<address>127.0.0.1:8081</address>
|
||||||
<user>testuser</user>
|
<user>testuser</user>
|
||||||
|
@ -1,29 +1,29 @@
|
|||||||
<configuration version="4">
|
<configuration version="4">
|
||||||
<repository id="default" directory="s2" ro="false" rescanIntervalS="15" ignorePerms="false">
|
<folder id="default" directory="s2" ro="false" rescanIntervalS="15" ignorePerms="false">
|
||||||
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
|
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
|
||||||
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></node>
|
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
|
||||||
<versioning></versioning>
|
<versioning></versioning>
|
||||||
</repository>
|
</folder>
|
||||||
<repository id="s12" directory="s12-2" ro="false" rescanIntervalS="15" ignorePerms="false">
|
<folder id="s12" directory="s12-2" ro="false" rescanIntervalS="15" ignorePerms="false">
|
||||||
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
|
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
|
||||||
<versioning></versioning>
|
<versioning></versioning>
|
||||||
</repository>
|
</folder>
|
||||||
<repository id="s23" directory="s23-2" ro="false" rescanIntervalS="15" ignorePerms="false">
|
<folder id="s23" directory="s23-2" ro="false" rescanIntervalS="15" ignorePerms="false">
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
|
||||||
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></node>
|
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
|
||||||
<versioning></versioning>
|
<versioning></versioning>
|
||||||
</repository>
|
</folder>
|
||||||
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
|
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
|
||||||
<address>127.0.0.1:22001</address>
|
<address>127.0.0.1:22001</address>
|
||||||
</node>
|
</device>
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
|
||||||
<address>127.0.0.1:22002</address>
|
<address>127.0.0.1:22002</address>
|
||||||
</node>
|
</device>
|
||||||
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
|
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
|
||||||
<address>127.0.0.1:22003</address>
|
<address>127.0.0.1:22003</address>
|
||||||
</node>
|
</device>
|
||||||
<gui enabled="true" tls="false">
|
<gui enabled="true" tls="false">
|
||||||
<address>127.0.0.1:8082</address>
|
<address>127.0.0.1:8082</address>
|
||||||
<apikey>abc123</apikey>
|
<apikey>abc123</apikey>
|
||||||
|
@ -1,26 +1,26 @@
|
|||||||
<configuration version="4">
|
<configuration version="4">
|
||||||
<repository id="s23" directory="s23-3" ro="false" rescanIntervalS="20" ignorePerms="false">
|
<folder id="s23" directory="s23-3" ro="false" rescanIntervalS="20" ignorePerms="false">
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
|
||||||
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></node>
|
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
|
||||||
<versioning></versioning>
|
<versioning></versioning>
|
||||||
</repository>
|
</folder>
|
||||||
<repository id="default" directory="s3" ro="false" rescanIntervalS="20" ignorePerms="false">
|
<folder id="default" directory="s3" ro="false" rescanIntervalS="20" ignorePerms="false">
|
||||||
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
|
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
|
||||||
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></node>
|
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
|
||||||
<versioning type="simple">
|
<versioning type="simple">
|
||||||
<param key="keep" val="5"></param>
|
<param key="keep" val="5"></param>
|
||||||
</versioning>
|
</versioning>
|
||||||
</repository>
|
</folder>
|
||||||
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
|
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
|
||||||
<address>127.0.0.1:22001</address>
|
<address>127.0.0.1:22001</address>
|
||||||
</node>
|
</device>
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
|
||||||
<address>127.0.0.1:22002</address>
|
<address>127.0.0.1:22002</address>
|
||||||
</node>
|
</device>
|
||||||
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
|
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
|
||||||
<address>127.0.0.1:22003</address>
|
<address>127.0.0.1:22003</address>
|
||||||
</node>
|
</device>
|
||||||
<gui enabled="true" tls="false">
|
<gui enabled="true" tls="false">
|
||||||
<address>127.0.0.1:8083</address>
|
<address>127.0.0.1:8083</address>
|
||||||
<apikey>abc123</apikey>
|
<apikey>abc123</apikey>
|
||||||
|
@ -1,28 +1,28 @@
|
|||||||
<configuration version="4">
|
<configuration version="4">
|
||||||
<repository id="unique" directory="s4" ro="false" rescanIntervalS="60" ignorePerms="false">
|
<folder id="unique" directory="s4" ro="false" rescanIntervalS="60" ignorePerms="false">
|
||||||
<node id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK"></node>
|
<device id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK"></device>
|
||||||
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
|
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
|
||||||
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></node>
|
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
|
||||||
<versioning></versioning>
|
<versioning></versioning>
|
||||||
</repository>
|
</folder>
|
||||||
<repository id="default" directory="s4d" ro="false" rescanIntervalS="60" ignorePerms="false">
|
<folder id="default" directory="s4d" ro="false" rescanIntervalS="60" ignorePerms="false">
|
||||||
<node id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK"></node>
|
<device id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK"></device>
|
||||||
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
|
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
|
||||||
<versioning></versioning>
|
<versioning></versioning>
|
||||||
</repository>
|
</folder>
|
||||||
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
|
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
|
||||||
<address>127.0.0.1:22001</address>
|
<address>127.0.0.1:22001</address>
|
||||||
</node>
|
</device>
|
||||||
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
|
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
|
||||||
<address>127.0.0.1:22002</address>
|
<address>127.0.0.1:22002</address>
|
||||||
</node>
|
</device>
|
||||||
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
|
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
|
||||||
<address>127.0.0.1:22003</address>
|
<address>127.0.0.1:22003</address>
|
||||||
</node>
|
</device>
|
||||||
<node id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK" name="s4" compression="true">
|
<device id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK" name="s4" compression="true">
|
||||||
<address>dynamic</address>
|
<address>dynamic</address>
|
||||||
</node>
|
</device>
|
||||||
<gui enabled="true" tls="false">
|
<gui enabled="true" tls="false">
|
||||||
<address>127.0.0.1:8084</address>
|
<address>127.0.0.1:8084</address>
|
||||||
<apikey>abc123</apikey>
|
<apikey>abc123</apikey>
|
||||||
|
@ -29,16 +29,16 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var jsonEndpoints = []string{
|
var jsonEndpoints = []string{
|
||||||
"/rest/completion?node=I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU&repo=default",
|
"/rest/completion?device=I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU&folder=default",
|
||||||
"/rest/config",
|
"/rest/config",
|
||||||
"/rest/config/sync",
|
"/rest/config/sync",
|
||||||
"/rest/connections",
|
"/rest/connections",
|
||||||
"/rest/errors",
|
"/rest/errors",
|
||||||
"/rest/events",
|
"/rest/events",
|
||||||
"/rest/lang",
|
"/rest/lang",
|
||||||
"/rest/model?repo=default",
|
"/rest/model?folder=default",
|
||||||
"/rest/need",
|
"/rest/need",
|
||||||
"/rest/nodeid?id=I6KAH7666SLLLB5PFXSOAUFJCDZCYAOMLEKCP2GB32BV5RQST3PSROAU",
|
"/rest/deviceid?id=I6KAH7666SLLLB5PFXSOAUFJCDZCYAOMLEKCP2GB32BV5RQST3PSROAU",
|
||||||
"/rest/report",
|
"/rest/report",
|
||||||
"/rest/system",
|
"/rest/system",
|
||||||
}
|
}
|
||||||
|
@ -55,7 +55,7 @@ testConvergence() {
|
|||||||
tot=$(($s1comp + $s2comp))
|
tot=$(($s1comp + $s2comp))
|
||||||
echo $tot / 200
|
echo $tot / 200
|
||||||
if [[ $tot == 200 ]] ; then
|
if [[ $tot == 200 ]] ; then
|
||||||
# when fixing up directories, a node will announce completion
|
# when fixing up directories, a device will announce completion
|
||||||
# slightly before it's actually complete. this is arguably a bug,
|
# slightly before it's actually complete. this is arguably a bug,
|
||||||
# but we let it slide for the moment as long as it gets there
|
# but we let it slide for the moment as long as it gets there
|
||||||
# eventually.
|
# eventually.
|
||||||
@ -71,7 +71,7 @@ testConvergence() {
|
|||||||
popd >/dev/null
|
popd >/dev/null
|
||||||
|
|
||||||
if ! cmp dirs-1 dirs-2 ; then
|
if ! cmp dirs-1 dirs-2 ; then
|
||||||
echo Repos differ
|
echo Folders differ
|
||||||
stop
|
stop
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
@ -74,7 +74,7 @@ loop:
|
|||||||
for _, ev := range evs {
|
for _, ev := range evs {
|
||||||
if ev.Type == "StateChanged" {
|
if ev.Type == "StateChanged" {
|
||||||
data := ev.Data.(map[string]interface{})
|
data := ev.Data.(map[string]interface{})
|
||||||
if data["repo"].(string) != "default" {
|
if data["folder"].(string) != "default" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.Println(ev)
|
log.Println(ev)
|
||||||
|
Loading…
Reference in New Issue
Block a user