Rename Repository -> Folder, Node -> Device (fixes #739)

This commit is contained in:
Audrius Butkevicius 2014-09-28 12:00:38 +01:00
parent 9d816694ba
commit 6c352dca74
61 changed files with 2118 additions and 2118 deletions

View File

@ -12,7 +12,7 @@ least the following:
- What operating system, operating system version and version of
Syncthing you are running
- The same for other connected nodes, where relevant
- The same for other connected devices, where relevant
- Screenshot if the issue concerns something visible in the GUI

View File

@ -7,8 +7,8 @@ syncthing
This is the `syncthing` project. The following are the project goals:
1. Define a protocol for synchronization of a file repository between a
number of collaborating nodes. The protocol should be well defined,
1. Define a protocol for synchronization of a file folder between a
number of collaborating devices. The protocol should be well defined,
unambiguous, easily understood, free to use, efficient, secure and
language neutral. This is the [Block Exchange
Protocol](https://github.com/syncthing/syncthing/blob/master/protocol/PROTOCOL.md).

View File

@ -19,8 +19,8 @@ func main() {
log.SetFlags(0)
log.SetOutput(os.Stdout)
repo := flag.String("repo", "default", "Repository ID")
node := flag.String("node", "", "Node ID (blank for global)")
folder := flag.String("folder", "default", "Folder ID")
device := flag.String("device", "", "Device ID (blank for global)")
flag.Parse()
db, err := leveldb.OpenFile(flag.Arg(0), nil)
@ -28,10 +28,10 @@ func main() {
log.Fatal(err)
}
fs := files.NewSet(*repo, db)
fs := files.NewSet(*folder, db)
if *node == "" {
log.Printf("*** Global index for repo %q", *repo)
if *device == "" {
log.Printf("*** Global index for folder %q", *folder)
fs.WithGlobalTruncated(func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfoTruncated)
fmt.Println(f)
@ -39,11 +39,11 @@ func main() {
return true
})
} else {
n, err := protocol.NodeIDFromString(*node)
n, err := protocol.DeviceIDFromString(*device)
if err != nil {
log.Fatal(err)
}
log.Printf("*** Have index for repo %q node %q", *repo, n)
log.Printf("*** Have index for folder %q device %q", *folder, n)
fs.WithHaveTruncated(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfoTruncated)
fmt.Println(f)

View File

@ -88,12 +88,12 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
getRestMux.HandleFunc("/rest/lang", restGetLang)
getRestMux.HandleFunc("/rest/model", withModel(m, restGetModel))
getRestMux.HandleFunc("/rest/need", withModel(m, restGetNeed))
getRestMux.HandleFunc("/rest/nodeid", restGetNodeID)
getRestMux.HandleFunc("/rest/deviceid", restGetDeviceID)
getRestMux.HandleFunc("/rest/report", withModel(m, restGetReport))
getRestMux.HandleFunc("/rest/system", restGetSystem)
getRestMux.HandleFunc("/rest/upgrade", restGetUpgrade)
getRestMux.HandleFunc("/rest/version", restGetVersion)
getRestMux.HandleFunc("/rest/stats/node", withModel(m, restGetNodeStats))
getRestMux.HandleFunc("/rest/stats/device", withModel(m, restGetDeviceStats))
// Debug endpoints, not for general use
getRestMux.HandleFunc("/rest/debug/peerCompletion", withModel(m, restGetPeerCompletion))
@ -220,17 +220,17 @@ func restGetVersion(w http.ResponseWriter, r *http.Request) {
func restGetCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var repo = qs.Get("repo")
var nodeStr = qs.Get("node")
var folder = qs.Get("folder")
var deviceStr = qs.Get("device")
node, err := protocol.NodeIDFromString(nodeStr)
device, err := protocol.DeviceIDFromString(deviceStr)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
res := map[string]float64{
"completion": m.Completion(node, repo),
"completion": m.Completion(device, folder),
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
@ -239,29 +239,29 @@ func restGetCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var repo = qs.Get("repo")
var folder = qs.Get("folder")
var res = make(map[string]interface{})
for _, cr := range cfg.Repositories {
if cr.ID == repo {
for _, cr := range cfg.Folders {
if cr.ID == folder {
res["invalid"] = cr.Invalid
break
}
}
globalFiles, globalDeleted, globalBytes := m.GlobalSize(repo)
globalFiles, globalDeleted, globalBytes := m.GlobalSize(folder)
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
localFiles, localDeleted, localBytes := m.LocalSize(repo)
localFiles, localDeleted, localBytes := m.LocalSize(folder)
res["localFiles"], res["localDeleted"], res["localBytes"] = localFiles, localDeleted, localBytes
needFiles, needBytes := m.NeedSize(repo)
needFiles, needBytes := m.NeedSize(folder)
res["needFiles"], res["needBytes"] = needFiles, needBytes
res["inSyncFiles"], res["inSyncBytes"] = globalFiles-needFiles, globalBytes-needBytes
res["state"], res["stateChanged"] = m.State(repo)
res["version"] = m.CurrentLocalVersion(repo) + m.RemoteLocalVersion(repo)
res["state"], res["stateChanged"] = m.State(folder)
res["version"] = m.CurrentLocalVersion(folder) + m.RemoteLocalVersion(folder)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
@ -269,15 +269,15 @@ func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
func restPostOverride(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var repo = qs.Get("repo")
go m.Override(repo)
var folder = qs.Get("folder")
go m.Override(folder)
}
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var repo = qs.Get("repo")
var folder = qs.Get("folder")
files := m.NeedFilesRepoLimited(repo, 100, 2500) // max 100 files or 2500 blocks
files := m.NeedFilesFolderLimited(folder, 100, 2500) // max 100 files or 2500 blocks
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(files)
@ -289,8 +289,8 @@ func restGetConnections(m *model.Model, w http.ResponseWriter, r *http.Request)
json.NewEncoder(w).Encode(res)
}
func restGetNodeStats(m *model.Model, w http.ResponseWriter, r *http.Request) {
var res = m.NodeStatistics()
func restGetDeviceStats(m *model.Model, w http.ResponseWriter, r *http.Request) {
var res = m.DeviceStatistics()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
@ -357,8 +357,8 @@ func restPostRestart(w http.ResponseWriter, r *http.Request) {
}
func restPostReset(w http.ResponseWriter, r *http.Request) {
flushResponse(`{"ok": "resetting repos"}`, w)
resetRepositories()
flushResponse(`{"ok": "resetting folders"}`, w)
resetFolders()
go restart()
}
@ -431,10 +431,10 @@ func showGuiError(l logger.LogLevel, err string) {
func restPostDiscoveryHint(w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var node = qs.Get("node")
var device = qs.Get("device")
var addr = qs.Get("addr")
if len(node) != 0 && len(addr) != 0 && discoverer != nil {
discoverer.Hint(node, []string{addr})
if len(device) != 0 && len(addr) != 0 && discoverer != nil {
discoverer.Hint(device, []string{addr})
}
}
@ -451,7 +451,7 @@ func restGetIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
ignores, err := m.GetIgnores(qs.Get("repo"))
ignores, err := m.GetIgnores(qs.Get("folder"))
if err != nil {
http.Error(w, err.Error(), 500)
return
@ -473,7 +473,7 @@ func restPostIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
return
}
err = m.SetIgnores(qs.Get("repo"), data["ignore"])
err = m.SetIgnores(qs.Get("folder"), data["ignore"])
if err != nil {
http.Error(w, err.Error(), 500)
return
@ -519,10 +519,10 @@ func restGetUpgrade(w http.ResponseWriter, r *http.Request) {
json.NewEncoder(w).Encode(res)
}
func restGetNodeID(w http.ResponseWriter, r *http.Request) {
func restGetDeviceID(w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
idStr := qs.Get("id")
id, err := protocol.NodeIDFromString(idStr)
id, err := protocol.DeviceIDFromString(idStr)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if err == nil {
json.NewEncoder(w).Encode(map[string]string{
@ -570,9 +570,9 @@ func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
func restPostScan(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
repo := qs.Get("repo")
folder := qs.Get("folder")
sub := qs.Get("sub")
err := m.ScanRepoSub(repo, sub)
err := m.ScanFolderSub(folder, sub)
if err != nil {
http.Error(w, err.Error(), 500)
}
@ -595,21 +595,21 @@ func restGetPeerCompletion(m *model.Model, w http.ResponseWriter, r *http.Reques
tot := map[string]float64{}
count := map[string]float64{}
for _, repo := range cfg.Repositories {
for _, node := range repo.NodeIDs() {
nodeStr := node.String()
if m.ConnectedTo(node) {
tot[nodeStr] += m.Completion(node, repo.ID)
for _, folder := range cfg.Folders {
for _, device := range folder.DeviceIDs() {
deviceStr := device.String()
if m.ConnectedTo(device) {
tot[deviceStr] += m.Completion(device, folder.ID)
} else {
tot[nodeStr] = 0
tot[deviceStr] = 0
}
count[nodeStr]++
count[deviceStr]++
}
}
comp := map[string]int{}
for node := range tot {
comp[node] = int(tot[node] / count[node])
for device := range tot {
comp[device] = int(tot[device] / count[device])
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")

View File

@ -84,7 +84,7 @@ func init() {
var (
cfg config.Configuration
myID protocol.NodeID
myID protocol.DeviceID
confDir string
logFlags int = log.Ltime
writeRateLimit *ratelimit.Bucket
@ -208,7 +208,7 @@ func main() {
cert, err := loadCert(dir, "")
if err == nil {
l.Warnln("Key exists; will not overwrite.")
l.Infoln("Node ID:", protocol.NewNodeID(cert.Certificate[0]))
l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0]))
return
}
@ -218,7 +218,7 @@ func main() {
l.Fatalln("load cert:", err)
}
if err == nil {
l.Infoln("Node ID:", protocol.NewNodeID(cert.Certificate[0]))
l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0]))
}
return
}
@ -264,7 +264,7 @@ func main() {
}
if reset {
resetRepositories()
resetFolders()
return
}
@ -319,7 +319,7 @@ func syncthingMain() {
}
}
myID = protocol.NewNodeID(cert.Certificate[0])
myID = protocol.NewDeviceID(cert.Certificate[0])
l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))
l.Infoln(LongVersion)
@ -336,7 +336,7 @@ func syncthingMain() {
cfg, err = config.Load(cfgFile, myID)
if err == nil {
myCfg := cfg.GetNodeConfiguration(myID)
myCfg := cfg.GetDeviceConfiguration(myID)
if myCfg == nil || myCfg.Name == "" {
myName, _ = os.Hostname()
} else {
@ -345,20 +345,20 @@ func syncthingMain() {
} else {
l.Infoln("No config file; starting with empty defaults")
myName, _ = os.Hostname()
defaultRepo := filepath.Join(getHomeDir(), "Sync")
defaultFolder := filepath.Join(getHomeDir(), "Sync")
cfg = config.New(cfgFile, myID)
cfg.Repositories = []config.RepositoryConfiguration{
cfg.Folders = []config.FolderConfiguration{
{
ID: "default",
Directory: defaultRepo,
Directory: defaultFolder,
RescanIntervalS: 60,
Nodes: []config.RepositoryNodeConfiguration{{NodeID: myID}},
Devices: []config.FolderDeviceConfiguration{{DeviceID: myID}},
},
}
cfg.Nodes = []config.NodeConfiguration{
cfg.Devices = []config.DeviceConfiguration{
{
NodeID: myID,
DeviceID: myID,
Addresses: []string{"dynamic"},
Name: myName,
},
@ -422,48 +422,48 @@ func syncthingMain() {
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
}
// Remove database entries for repos that no longer exist in the config
repoMap := cfg.RepoMap()
for _, repo := range files.ListRepos(db) {
if _, ok := repoMap[repo]; !ok {
l.Infof("Cleaning data for dropped repo %q", repo)
files.DropRepo(db, repo)
// Remove database entries for folders that no longer exist in the config
folderMap := cfg.FolderMap()
for _, folder := range files.ListFolders(db) {
if _, ok := folderMap[folder]; !ok {
l.Infof("Cleaning data for dropped folder %q", folder)
files.DropFolder(db, folder)
}
}
m := model.NewModel(confDir, &cfg, myName, "syncthing", Version, db)
nextRepo:
for i, repo := range cfg.Repositories {
if repo.Invalid != "" {
nextFolder:
for i, folder := range cfg.Folders {
if folder.Invalid != "" {
continue
}
repo.Directory = expandTilde(repo.Directory)
m.AddRepo(repo)
folder.Directory = expandTilde(folder.Directory)
m.AddFolder(folder)
fi, err := os.Stat(repo.Directory)
if m.CurrentLocalVersion(repo.ID) > 0 {
fi, err := os.Stat(folder.Directory)
if m.CurrentLocalVersion(folder.ID) > 0 {
// Safety check. If the cached index contains files but the
// repository doesn't exist, we have a problem. We would assume
// folder doesn't exist, we have a problem. We would assume
// that all files have been deleted which might not be the case,
// so mark it as invalid instead.
if err != nil || !fi.IsDir() {
l.Warnf("Stopping repository %q - directory missing, but has files in index", repo.ID)
cfg.Repositories[i].Invalid = "repo directory missing"
continue nextRepo
l.Warnf("Stopping folder %q - directory missing, but has files in index", folder.ID)
cfg.Folders[i].Invalid = "folder directory missing"
continue nextFolder
}
} else if os.IsNotExist(err) {
// If we don't have any files in the index, and the directory
// doesn't exist, try creating it.
err = os.MkdirAll(repo.Directory, 0700)
err = os.MkdirAll(folder.Directory, 0700)
}
if err != nil {
// If there was another error or we could not create the
// directory, the repository is invalid.
l.Warnf("Stopping repository %q - %v", err)
cfg.Repositories[i].Invalid = err.Error()
continue nextRepo
// directory, the folder is invalid.
l.Warnf("Stopping folder %q - %v", err)
cfg.Folders[i].Invalid = err.Error()
continue nextFolder
}
}
@ -507,33 +507,33 @@ nextRepo:
}
}
// Clear out old indexes for other nodes. Otherwise we'll start up and
// Clear out old indexes for other devices. Otherwise we'll start up and
// start needing a bunch of files which are nowhere to be found. This
// needs to be changed when we correctly do persistent indexes.
for _, repoCfg := range cfg.Repositories {
if repoCfg.Invalid != "" {
for _, folderCfg := range cfg.Folders {
if folderCfg.Invalid != "" {
continue
}
for _, node := range repoCfg.NodeIDs() {
if node == myID {
for _, device := range folderCfg.DeviceIDs() {
if device == myID {
continue
}
m.Index(node, repoCfg.ID, nil)
m.Index(device, folderCfg.ID, nil)
}
}
// Walk the repository and update the local model before establishing any
// connections to other nodes.
// Walk the folder and update the local model before establishing any
// connections to other devices.
m.CleanRepos()
l.Infoln("Performing initial repository scan")
m.ScanRepos()
m.CleanFolders()
l.Infoln("Performing initial folder scan")
m.ScanFolders()
// Remove all .idx* files that don't belong to an active repo.
// Remove all .idx* files that don't belong to an active folder.
validIndexes := make(map[string]bool)
for _, repo := range cfg.Repositories {
dir := expandTilde(repo.Directory)
for _, folder := range cfg.Folders {
dir := expandTilde(folder.Directory)
id := fmt.Sprintf("%x", sha1.Sum([]byte(dir)))
validIndexes[id] = true
}
@ -566,23 +566,23 @@ nextRepo:
setupUPnP()
}
// Routine to connect out to configured nodes
// Routine to connect out to configured devices
discoverer = discovery(externalPort)
go listenConnect(myID, m, tlsCfg)
for _, repo := range cfg.Repositories {
if repo.Invalid != "" {
for _, folder := range cfg.Folders {
if folder.Invalid != "" {
continue
}
// Routine to pull blocks from other nodes to synchronize the local
// repository. Does not run when we are in read only (publish only) mode.
if repo.ReadOnly {
l.Okf("Ready to synchronize %s (read only; no external updates accepted)", repo.ID)
m.StartRepoRO(repo.ID)
// Routine to pull blocks from other devices to synchronize the local
// folder. Does not run when we are in read only (publish only) mode.
if folder.ReadOnly {
l.Okf("Ready to synchronize %s (read only; no external updates accepted)", folder.ID)
m.StartFolderRO(folder.ID)
} else {
l.Okf("Ready to synchronize %s (read-write)", repo.ID)
m.StartRepoRW(repo.ID)
l.Okf("Ready to synchronize %s (read-write)", folder.ID)
m.StartFolderRW(folder.ID)
}
}
@ -595,9 +595,9 @@ nextRepo:
defer pprof.StopCPUProfile()
}
for _, node := range cfg.Nodes {
if len(node.Name) > 0 {
l.Infof("Node %s is %q at %v", node.NodeID, node.Name, node.Addresses)
for _, device := range cfg.Devices {
if len(device.Name) > 0 {
l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses)
}
}
@ -668,7 +668,7 @@ func setupUPnP() {
}
func setupExternalPort(igd *upnp.IGD, port int) int {
// We seed the random number generator with the node ID to get a
// We seed the random number generator with the device ID to get a
// repeatable sequence of random external ports.
rnd := rand.NewSource(certSeed(cert.Certificate[0]))
for i := 0; i < 10; i++ {
@ -714,12 +714,12 @@ func renewUPnP(port int) {
}
}
func resetRepositories() {
func resetFolders() {
suffix := fmt.Sprintf(".syncthing-reset-%d", time.Now().UnixNano())
for _, repo := range cfg.Repositories {
if _, err := os.Stat(repo.Directory); err == nil {
l.Infof("Reset: Moving %s -> %s", repo.Directory, repo.Directory+suffix)
os.Rename(repo.Directory, repo.Directory+suffix)
for _, folder := range cfg.Folders {
if _, err := os.Stat(folder.Directory); err == nil {
l.Infof("Reset: Moving %s -> %s", folder.Directory, folder.Directory+suffix)
os.Rename(folder.Directory, folder.Directory+suffix)
}
}
@ -773,7 +773,7 @@ func shutdown() {
stop <- exitSuccess
}
func listenConnect(myID protocol.NodeID, m *model.Model, tlsCfg *tls.Config) {
func listenConnect(myID protocol.DeviceID, m *model.Model, tlsCfg *tls.Config) {
var conns = make(chan *tls.Conn)
// Listen
@ -793,7 +793,7 @@ next:
continue
}
remoteCert := certs[0]
remoteID := protocol.NewNodeID(remoteCert.Raw)
remoteID := protocol.NewDeviceID(remoteCert.Raw)
if remoteID == myID {
l.Infof("Connected to myself (%s) - should not happen", remoteID)
@ -802,17 +802,17 @@ next:
}
if m.ConnectedTo(remoteID) {
l.Infof("Connected to already connected node (%s)", remoteID)
l.Infof("Connected to already connected device (%s)", remoteID)
conn.Close()
continue
}
for _, nodeCfg := range cfg.Nodes {
if nodeCfg.NodeID == remoteID {
for _, deviceCfg := range cfg.Devices {
if deviceCfg.DeviceID == remoteID {
// Verify the name on the certificate. By default we set it to
// "syncthing" when generating, but the user may have replaced
// the certificate and used another name.
certName := nodeCfg.CertName
certName := deviceCfg.CertName
if certName == "" {
certName = "syncthing"
}
@ -839,13 +839,13 @@ next:
}
name := fmt.Sprintf("%s-%s", conn.LocalAddr(), conn.RemoteAddr())
protoConn := protocol.NewConnection(remoteID, rd, wr, m, name, nodeCfg.Compression)
protoConn := protocol.NewConnection(remoteID, rd, wr, m, name, deviceCfg.Compression)
l.Infof("Established secure connection to %s at %s", remoteID, name)
if debugNet {
l.Debugf("cipher suite %04X", conn.ConnectionState().CipherSuite)
}
events.Default.Log(events.NodeConnected, map[string]string{
events.Default.Log(events.DeviceConnected, map[string]string{
"id": remoteID.String(),
"addr": conn.RemoteAddr().String(),
})
@ -855,11 +855,11 @@ next:
}
}
events.Default.Log(events.NodeRejected, map[string]string{
"node": remoteID.String(),
events.Default.Log(events.DeviceRejected, map[string]string{
"device": remoteID.String(),
"address": conn.RemoteAddr().String(),
})
l.Infof("Connection from %s with unknown node ID %s; ignoring", conn.RemoteAddr(), remoteID)
l.Infof("Connection from %s with unknown device ID %s; ignoring", conn.RemoteAddr(), remoteID)
conn.Close()
}
}
@ -908,21 +908,21 @@ func listenTLS(conns chan *tls.Conn, addr string, tlsCfg *tls.Config) {
func dialTLS(m *model.Model, conns chan *tls.Conn, tlsCfg *tls.Config) {
var delay time.Duration = 1 * time.Second
for {
nextNode:
for _, nodeCfg := range cfg.Nodes {
if nodeCfg.NodeID == myID {
nextDevice:
for _, deviceCfg := range cfg.Devices {
if deviceCfg.DeviceID == myID {
continue
}
if m.ConnectedTo(nodeCfg.NodeID) {
if m.ConnectedTo(deviceCfg.DeviceID) {
continue
}
var addrs []string
for _, addr := range nodeCfg.Addresses {
for _, addr := range deviceCfg.Addresses {
if addr == "dynamic" {
if discoverer != nil {
t := discoverer.Lookup(nodeCfg.NodeID)
t := discoverer.Lookup(deviceCfg.DeviceID)
if len(t) == 0 {
continue
}
@ -943,7 +943,7 @@ func dialTLS(m *model.Model, conns chan *tls.Conn, tlsCfg *tls.Config) {
addr = net.JoinHostPort(host, "22000")
}
if debugNet {
l.Debugln("dial", nodeCfg.NodeID, addr)
l.Debugln("dial", deviceCfg.DeviceID, addr)
}
raddr, err := net.ResolveTCPAddr("tcp", addr)
@ -973,7 +973,7 @@ func dialTLS(m *model.Model, conns chan *tls.Conn, tlsCfg *tls.Config) {
}
conns <- tc
continue nextNode
continue nextDevice
}
}

View File

@ -31,13 +31,13 @@ func reportData(m *model.Model) map[string]interface{} {
res["version"] = Version
res["longVersion"] = LongVersion
res["platform"] = runtime.GOOS + "-" + runtime.GOARCH
res["numRepos"] = len(cfg.Repositories)
res["numNodes"] = len(cfg.Nodes)
res["numFolders"] = len(cfg.Folders)
res["numDevices"] = len(cfg.Devices)
var totFiles, maxFiles int
var totBytes, maxBytes int64
for _, repo := range cfg.Repositories {
files, _, bytes := m.GlobalSize(repo.ID)
for _, folder := range cfg.Folders {
files, _, bytes := m.GlobalSize(folder.ID)
totFiles += files
totBytes += bytes
if files > maxFiles {
@ -49,9 +49,9 @@ func reportData(m *model.Model) map[string]interface{} {
}
res["totFiles"] = totFiles
res["repoMaxFiles"] = maxFiles
res["folderMaxFiles"] = maxFiles
res["totMiB"] = totBytes / 1024 / 1024
res["repoMaxMiB"] = maxBytes / 1024 / 1024
res["folderMaxMiB"] = maxBytes / 1024 / 1024
var mem runtime.MemStats
runtime.ReadMemStats(&mem)

View File

@ -83,11 +83,11 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.errors = [];
$scope.model = {};
$scope.myID = '';
$scope.nodes = [];
$scope.devices = [];
$scope.protocolChanged = false;
$scope.reportData = {};
$scope.reportPreview = false;
$scope.repos = {};
$scope.folders = {};
$scope.seenError = '';
$scope.upgradeInfo = {};
$scope.stats = {};
@ -180,33 +180,33 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.$on('StateChanged', function (event, arg) {
var data = arg.data;
if ($scope.model[data.repo]) {
$scope.model[data.repo].state = data.to;
if ($scope.model[data.folder]) {
$scope.model[data.folder].state = data.to;
}
});
$scope.$on('LocalIndexUpdated', function (event, arg) {
var data = arg.data;
refreshRepo(data.repo);
refreshFolder(data.folder);
// Update completion status for all nodes that we share this repo with.
$scope.repos[data.repo].Nodes.forEach(function (nodeCfg) {
refreshCompletion(nodeCfg.NodeID, data.repo);
// Update completion status for all devices that we share this folder with.
$scope.folders[data.folder].Devices.forEach(function (deviceCfg) {
refreshCompletion(deviceCfg.DeviceID, data.folder);
});
});
$scope.$on('RemoteIndexUpdated', function (event, arg) {
var data = arg.data;
refreshRepo(data.repo);
refreshCompletion(data.node, data.repo);
refreshFolder(data.folder);
refreshCompletion(data.device, data.folder);
});
$scope.$on('NodeDisconnected', function (event, arg) {
$scope.$on('DeviceDisconnected', function (event, arg) {
delete $scope.connections[arg.data.id];
refreshNodeStats();
refreshDeviceStats();
});
$scope.$on('NodeConnected', function (event, arg) {
$scope.$on('DeviceConnected', function (event, arg) {
if (!$scope.connections[arg.data.id]) {
$scope.connections[arg.data.id] = {
inbps: 0,
@ -251,13 +251,13 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
var debouncedFuncs = {};
function refreshRepo(repo) {
var key = "refreshRepo" + repo;
function refreshFolder(folder) {
var key = "refreshFolder" + folder;
if (!debouncedFuncs[key]) {
debouncedFuncs[key] = debounce(function () {
$http.get(urlbase + '/model?repo=' + encodeURIComponent(repo)).success(function (data) {
$scope.model[repo] = data;
console.log("refreshRepo", repo, data);
$http.get(urlbase + '/model?folder=' + encodeURIComponent(folder)).success(function (data) {
$scope.model[folder] = data;
console.log("refreshFolder", folder, data);
});
}, 1000, true);
}
@ -270,19 +270,19 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.config = config;
$scope.config.Options.ListenStr = $scope.config.Options.ListenAddress.join(', ');
$scope.nodes = $scope.config.Nodes;
$scope.nodes.forEach(function (nodeCfg) {
$scope.completion[nodeCfg.NodeID] = {
$scope.devices = $scope.config.Devices;
$scope.devices.forEach(function (deviceCfg) {
$scope.completion[deviceCfg.DeviceID] = {
_total: 100,
};
});
$scope.nodes.sort(nodeCompare);
$scope.devices.sort(deviceCompare);
$scope.repos = repoMap($scope.config.Repositories);
Object.keys($scope.repos).forEach(function (repo) {
refreshRepo(repo);
$scope.repos[repo].Nodes.forEach(function (nodeCfg) {
refreshCompletion(nodeCfg.NodeID, repo);
$scope.folders = folderMap($scope.config.Folders);
Object.keys($scope.folders).forEach(function (folder) {
refreshFolder(folder);
$scope.folders[folder].Devices.forEach(function (deviceCfg) {
refreshCompletion(deviceCfg.DeviceID, folder);
});
});
@ -299,32 +299,32 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
});
}
function refreshCompletion(node, repo) {
if (node === $scope.myID) {
function refreshCompletion(device, folder) {
if (device === $scope.myID) {
return;
}
var key = "refreshCompletion" + node + repo;
var key = "refreshCompletion" + device + folder;
if (!debouncedFuncs[key]) {
debouncedFuncs[key] = debounce(function () {
$http.get(urlbase + '/completion?node=' + node + '&repo=' + encodeURIComponent(repo)).success(function (data) {
if (!$scope.completion[node]) {
$scope.completion[node] = {};
$http.get(urlbase + '/completion?device=' + device + '&folder=' + encodeURIComponent(folder)).success(function (data) {
if (!$scope.completion[device]) {
$scope.completion[device] = {};
}
$scope.completion[node][repo] = data.completion;
$scope.completion[device][folder] = data.completion;
var tot = 0,
cnt = 0;
for (var cmp in $scope.completion[node]) {
for (var cmp in $scope.completion[device]) {
if (cmp === "_total") {
continue;
}
tot += $scope.completion[node][cmp];
tot += $scope.completion[device][cmp];
cnt += 1;
}
$scope.completion[node]._total = tot / cnt;
$scope.completion[device]._total = tot / cnt;
console.log("refreshCompletion", node, repo, $scope.completion[node]);
console.log("refreshCompletion", device, folder, $scope.completion[device]);
});
}, 1000, true);
}
@ -373,14 +373,14 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
});
}
var refreshNodeStats = debounce(function () {
$http.get(urlbase + "/stats/node").success(function (data) {
var refreshDeviceStats = debounce(function () {
$http.get(urlbase + "/stats/device").success(function (data) {
$scope.stats = data;
for (var node in $scope.stats) {
$scope.stats[node].LastSeen = new Date($scope.stats[node].LastSeen);
$scope.stats[node].LastSeenDays = (new Date() - $scope.stats[node].LastSeen) / 1000 / 86400;
for (var device in $scope.stats) {
$scope.stats[device].LastSeen = new Date($scope.stats[device].LastSeen);
$scope.stats[device].LastSeenDays = (new Date() - $scope.stats[device].LastSeen) / 1000 / 86400;
}
console.log("refreshNodeStats", data);
console.log("refreshDeviceStats", data);
});
}, 500);
@ -388,7 +388,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
refreshSystem();
refreshConfig();
refreshConnectionStats();
refreshNodeStats();
refreshDeviceStats();
$http.get(urlbase + '/version').success(function (data) {
$scope.version = data.version;
@ -411,28 +411,28 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
refreshErrors();
};
$scope.repoStatus = function (repo) {
if (typeof $scope.model[repo] === 'undefined') {
$scope.folderStatus = function (folder) {
if (typeof $scope.model[folder] === 'undefined') {
return 'unknown';
}
if ($scope.model[repo].invalid !== '') {
if ($scope.model[folder].invalid !== '') {
return 'stopped';
}
return '' + $scope.model[repo].state;
return '' + $scope.model[folder].state;
};
$scope.repoClass = function (repo) {
if (typeof $scope.model[repo] === 'undefined') {
$scope.folderClass = function (folder) {
if (typeof $scope.model[folder] === 'undefined') {
return 'info';
}
if ($scope.model[repo].invalid !== '') {
if ($scope.model[folder].invalid !== '') {
return 'danger';
}
var state = '' + $scope.model[repo].state;
var state = '' + $scope.model[folder].state;
if (state == 'idle') {
return 'success';
}
@ -445,21 +445,21 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
return 'info';
};
$scope.syncPercentage = function (repo) {
if (typeof $scope.model[repo] === 'undefined') {
$scope.syncPercentage = function (folder) {
if (typeof $scope.model[folder] === 'undefined') {
return 100;
}
if ($scope.model[repo].globalBytes === 0) {
if ($scope.model[folder].globalBytes === 0) {
return 100;
}
var pct = 100 * $scope.model[repo].inSyncBytes / $scope.model[repo].globalBytes;
var pct = 100 * $scope.model[folder].inSyncBytes / $scope.model[folder].globalBytes;
return Math.floor(pct);
};
$scope.nodeIcon = function (nodeCfg) {
if ($scope.connections[nodeCfg.NodeID]) {
if ($scope.completion[nodeCfg.NodeID] && $scope.completion[nodeCfg.NodeID]._total === 100) {
$scope.deviceIcon = function (deviceCfg) {
if ($scope.connections[deviceCfg.DeviceID]) {
if ($scope.completion[deviceCfg.DeviceID] && $scope.completion[deviceCfg.DeviceID]._total === 100) {
return 'ok';
} else {
return 'refresh';
@ -469,9 +469,9 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
return 'minus';
};
$scope.nodeClass = function (nodeCfg) {
if ($scope.connections[nodeCfg.NodeID]) {
if ($scope.completion[nodeCfg.NodeID] && $scope.completion[nodeCfg.NodeID]._total === 100) {
$scope.deviceClass = function (deviceCfg) {
if ($scope.connections[deviceCfg.DeviceID]) {
if ($scope.completion[deviceCfg.DeviceID] && $scope.completion[deviceCfg.DeviceID]._total === 100) {
return 'success';
} else {
return 'primary';
@ -481,25 +481,25 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
return 'info';
};
$scope.nodeAddr = function (nodeCfg) {
var conn = $scope.connections[nodeCfg.NodeID];
$scope.deviceAddr = function (deviceCfg) {
var conn = $scope.connections[deviceCfg.DeviceID];
if (conn) {
return conn.Address;
}
return '?';
};
$scope.nodeCompletion = function (nodeCfg) {
var conn = $scope.connections[nodeCfg.NodeID];
$scope.deviceCompletion = function (deviceCfg) {
var conn = $scope.connections[deviceCfg.DeviceID];
if (conn) {
return conn.Completion + '%';
}
return '';
};
$scope.findNode = function (nodeID) {
var matches = $scope.nodes.filter(function (n) {
return n.NodeID == nodeID;
$scope.findDevice = function (deviceID) {
var matches = $scope.devices.filter(function (n) {
return n.DeviceID == deviceID;
});
if (matches.length != 1) {
return undefined;
@ -507,32 +507,32 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
return matches[0];
};
$scope.nodeName = function (nodeCfg) {
if (typeof nodeCfg === 'undefined') {
$scope.deviceName = function (deviceCfg) {
if (typeof deviceCfg === 'undefined') {
return "";
}
if (nodeCfg.Name) {
return nodeCfg.Name;
if (deviceCfg.Name) {
return deviceCfg.Name;
}
return nodeCfg.NodeID.substr(0, 6);
return deviceCfg.DeviceID.substr(0, 6);
};
$scope.thisNodeName = function () {
var node = $scope.thisNode();
if (typeof node === 'undefined') {
return "(unknown node)";
$scope.thisDeviceName = function () {
var device = $scope.thisDevice();
if (typeof device === 'undefined') {
return "(unknown device)";
}
if (node.Name) {
return node.Name;
if (device.Name) {
return device.Name;
}
return node.NodeID.substr(0, 6);
return device.DeviceID.substr(0, 6);
};
$scope.editSettings = function () {
// Make a working copy
$scope.tmpOptions = angular.copy($scope.config.Options);
$scope.tmpOptions.UREnabled = ($scope.tmpOptions.URAccepted > 0);
$scope.tmpOptions.NodeName = $scope.thisNode().Name;
$scope.tmpOptions.DeviceName = $scope.thisDevice().Name;
$scope.tmpGUI = angular.copy($scope.config.GUI);
$('#settings').modal();
};
@ -569,7 +569,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
}
// Apply new settings locally
$scope.thisNode().Name = $scope.tmpOptions.NodeName;
$scope.thisDevice().Name = $scope.tmpOptions.DeviceName;
$scope.config.Options = angular.copy($scope.tmpOptions);
$scope.config.GUI = angular.copy($scope.tmpGUI);
$scope.config.Options.ListenAddress = $scope.config.Options.ListenStr.split(',').map(function (x) {
@ -623,100 +623,100 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.configInSync = true;
};
$scope.editNode = function (nodeCfg) {
$scope.currentNode = $.extend({}, nodeCfg);
$scope.editDevice = function (deviceCfg) {
$scope.currentDevice = $.extend({}, deviceCfg);
$scope.editingExisting = true;
$scope.editingSelf = (nodeCfg.NodeID == $scope.myID);
$scope.currentNode.AddressesStr = nodeCfg.Addresses.join(', ');
$scope.nodeEditor.$setPristine();
$('#editNode').modal();
$scope.editingSelf = (deviceCfg.DeviceID == $scope.myID);
$scope.currentDevice.AddressesStr = deviceCfg.Addresses.join(', ');
$scope.deviceEditor.$setPristine();
$('#editDevice').modal();
};
$scope.idNode = function () {
$scope.idDevice = function () {
$('#idqr').modal('show');
};
$scope.addNode = function () {
$scope.currentNode = {
$scope.addDevice = function () {
$scope.currentDevice = {
AddressesStr: 'dynamic',
Compression: true,
Introducer: true
};
$scope.editingExisting = false;
$scope.editingSelf = false;
$scope.nodeEditor.$setPristine();
$('#editNode').modal();
$scope.deviceEditor.$setPristine();
$('#editDevice').modal();
};
$scope.deleteNode = function () {
$('#editNode').modal('hide');
$scope.deleteDevice = function () {
$('#editDevice').modal('hide');
if (!$scope.editingExisting) {
return;
}
$scope.nodes = $scope.nodes.filter(function (n) {
return n.NodeID !== $scope.currentNode.NodeID;
$scope.devices = $scope.devices.filter(function (n) {
return n.DeviceID !== $scope.currentDevice.DeviceID;
});
$scope.config.Nodes = $scope.nodes;
$scope.config.Devices = $scope.devices;
for (var id in $scope.repos) {
$scope.repos[id].Nodes = $scope.repos[id].Nodes.filter(function (n) {
return n.NodeID !== $scope.currentNode.NodeID;
for (var id in $scope.folders) {
$scope.folders[id].Devices = $scope.folders[id].Devices.filter(function (n) {
return n.DeviceID !== $scope.currentDevice.DeviceID;
});
}
$scope.saveConfig();
};
$scope.saveNode = function () {
var nodeCfg, done, i;
$scope.saveDevice = function () {
var deviceCfg, done, i;
$('#editNode').modal('hide');
nodeCfg = $scope.currentNode;
nodeCfg.Addresses = nodeCfg.AddressesStr.split(',').map(function (x) {
$('#editDevice').modal('hide');
deviceCfg = $scope.currentDevice;
deviceCfg.Addresses = deviceCfg.AddressesStr.split(',').map(function (x) {
return x.trim();
});
done = false;
for (i = 0; i < $scope.nodes.length; i++) {
if ($scope.nodes[i].NodeID === nodeCfg.NodeID) {
$scope.nodes[i] = nodeCfg;
for (i = 0; i < $scope.devices.length; i++) {
if ($scope.devices[i].DeviceID === deviceCfg.DeviceID) {
$scope.devices[i] = deviceCfg;
done = true;
break;
}
}
if (!done) {
$scope.nodes.push(nodeCfg);
$scope.devices.push(deviceCfg);
}
$scope.nodes.sort(nodeCompare);
$scope.config.Nodes = $scope.nodes;
$scope.devices.sort(deviceCompare);
$scope.config.Devices = $scope.devices;
$scope.saveConfig();
};
$scope.otherNodes = function () {
return $scope.nodes.filter(function (n) {
return n.NodeID !== $scope.myID;
$scope.otherDevices = function () {
return $scope.devices.filter(function (n) {
return n.DeviceID !== $scope.myID;
});
};
$scope.thisNode = function () {
$scope.thisDevice = function () {
var i, n;
for (i = 0; i < $scope.nodes.length; i++) {
n = $scope.nodes[i];
if (n.NodeID === $scope.myID) {
for (i = 0; i < $scope.devices.length; i++) {
n = $scope.devices[i];
if (n.DeviceID === $scope.myID) {
return n;
}
}
};
$scope.allNodes = function () {
var nodes = $scope.otherNodes();
nodes.push($scope.thisNode());
return nodes;
$scope.allDevices = function () {
var devices = $scope.otherDevices();
devices.push($scope.thisDevice());
return devices;
};
$scope.errorList = function () {
@ -730,134 +730,134 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$http.post(urlbase + '/error/clear');
};
$scope.friendlyNodes = function (str) {
for (var i = 0; i < $scope.nodes.length; i++) {
var cfg = $scope.nodes[i];
str = str.replace(cfg.NodeID, $scope.nodeName(cfg));
$scope.friendlyDevices = function (str) {
for (var i = 0; i < $scope.devices.length; i++) {
var cfg = $scope.devices[i];
str = str.replace(cfg.DeviceID, $scope.deviceName(cfg));
}
return str;
};
$scope.repoList = function () {
return repoList($scope.repos);
$scope.folderList = function () {
return folderList($scope.folders);
};
$scope.editRepo = function (nodeCfg) {
$scope.currentRepo = angular.copy(nodeCfg);
$scope.currentRepo.selectedNodes = {};
$scope.currentRepo.Nodes.forEach(function (n) {
$scope.currentRepo.selectedNodes[n.NodeID] = true;
$scope.editFolder = function (deviceCfg) {
$scope.currentFolder = angular.copy(deviceCfg);
$scope.currentFolder.selectedDevices = {};
$scope.currentFolder.Devices.forEach(function (n) {
$scope.currentFolder.selectedDevices[n.DeviceID] = true;
});
if ($scope.currentRepo.Versioning && $scope.currentRepo.Versioning.Type === "simple") {
$scope.currentRepo.simpleFileVersioning = true;
$scope.currentRepo.FileVersioningSelector = "simple";
$scope.currentRepo.simpleKeep = +$scope.currentRepo.Versioning.Params.keep;
} else if ($scope.currentRepo.Versioning && $scope.currentRepo.Versioning.Type === "staggered") {
$scope.currentRepo.staggeredFileVersioning = true;
$scope.currentRepo.FileVersioningSelector = "staggered";
$scope.currentRepo.staggeredMaxAge = Math.floor(+$scope.currentRepo.Versioning.Params.maxAge / 86400);
$scope.currentRepo.staggeredCleanInterval = +$scope.currentRepo.Versioning.Params.cleanInterval;
$scope.currentRepo.staggeredVersionsPath = $scope.currentRepo.Versioning.Params.versionsPath;
if ($scope.currentFolder.Versioning && $scope.currentFolder.Versioning.Type === "simple") {
$scope.currentFolder.simpleFileVersioning = true;
$scope.currentFolder.FileVersioningSelector = "simple";
$scope.currentFolder.simpleKeep = +$scope.currentFolder.Versioning.Params.keep;
} else if ($scope.currentFolder.Versioning && $scope.currentFolder.Versioning.Type === "staggered") {
$scope.currentFolder.staggeredFileVersioning = true;
$scope.currentFolder.FileVersioningSelector = "staggered";
$scope.currentFolder.staggeredMaxAge = Math.floor(+$scope.currentFolder.Versioning.Params.maxAge / 86400);
$scope.currentFolder.staggeredCleanInterval = +$scope.currentFolder.Versioning.Params.cleanInterval;
$scope.currentFolder.staggeredVersionsPath = $scope.currentFolder.Versioning.Params.versionsPath;
} else {
$scope.currentRepo.FileVersioningSelector = "none";
$scope.currentFolder.FileVersioningSelector = "none";
}
$scope.currentRepo.simpleKeep = $scope.currentRepo.simpleKeep || 5;
$scope.currentRepo.staggeredCleanInterval = $scope.currentRepo.staggeredCleanInterval || 3600;
$scope.currentRepo.staggeredVersionsPath = $scope.currentRepo.staggeredVersionsPath || "";
$scope.currentFolder.simpleKeep = $scope.currentFolder.simpleKeep || 5;
$scope.currentFolder.staggeredCleanInterval = $scope.currentFolder.staggeredCleanInterval || 3600;
$scope.currentFolder.staggeredVersionsPath = $scope.currentFolder.staggeredVersionsPath || "";
// staggeredMaxAge can validly be zero, which we should not replace
// with the default value of 365. So only set the default if it's
// actually undefined.
if (typeof $scope.currentRepo.staggeredMaxAge === 'undefined') {
$scope.currentRepo.staggeredMaxAge = 365;
if (typeof $scope.currentFolder.staggeredMaxAge === 'undefined') {
$scope.currentFolder.staggeredMaxAge = 365;
}
$scope.editingExisting = true;
$scope.repoEditor.$setPristine();
$('#editRepo').modal();
$scope.folderEditor.$setPristine();
$('#editFolder').modal();
};
$scope.addRepo = function () {
$scope.currentRepo = {
selectedNodes: {}
$scope.addFolder = function () {
$scope.currentFolder = {
selectedDevices: {}
};
$scope.currentRepo.RescanIntervalS = 60;
$scope.currentRepo.FileVersioningSelector = "none";
$scope.currentRepo.simpleKeep = 5;
$scope.currentRepo.staggeredMaxAge = 365;
$scope.currentRepo.staggeredCleanInterval = 3600;
$scope.currentRepo.staggeredVersionsPath = "";
$scope.currentFolder.RescanIntervalS = 60;
$scope.currentFolder.FileVersioningSelector = "none";
$scope.currentFolder.simpleKeep = 5;
$scope.currentFolder.staggeredMaxAge = 365;
$scope.currentFolder.staggeredCleanInterval = 3600;
$scope.currentFolder.staggeredVersionsPath = "";
$scope.editingExisting = false;
$scope.repoEditor.$setPristine();
$('#editRepo').modal();
$scope.folderEditor.$setPristine();
$('#editFolder').modal();
};
$scope.saveRepo = function () {
var repoCfg, done, i;
$scope.saveFolder = function () {
var folderCfg, done, i;
$('#editRepo').modal('hide');
repoCfg = $scope.currentRepo;
repoCfg.Nodes = [];
repoCfg.selectedNodes[$scope.myID] = true;
for (var nodeID in repoCfg.selectedNodes) {
if (repoCfg.selectedNodes[nodeID] === true) {
repoCfg.Nodes.push({
NodeID: nodeID
$('#editFolder').modal('hide');
folderCfg = $scope.currentFolder;
folderCfg.Devices = [];
folderCfg.selectedDevices[$scope.myID] = true;
for (var deviceID in folderCfg.selectedDevices) {
if (folderCfg.selectedDevices[deviceID] === true) {
folderCfg.Devices.push({
DeviceID: deviceID
});
}
}
delete repoCfg.selectedNodes;
delete folderCfg.selectedDevices;
if (repoCfg.FileVersioningSelector === "simple") {
repoCfg.Versioning = {
if (folderCfg.FileVersioningSelector === "simple") {
folderCfg.Versioning = {
'Type': 'simple',
'Params': {
'keep': '' + repoCfg.simpleKeep,
'keep': '' + folderCfg.simpleKeep,
}
};
delete repoCfg.simpleFileVersioning;
delete repoCfg.simpleKeep;
} else if (repoCfg.FileVersioningSelector === "staggered") {
repoCfg.Versioning = {
delete folderCfg.simpleFileVersioning;
delete folderCfg.simpleKeep;
} else if (folderCfg.FileVersioningSelector === "staggered") {
folderCfg.Versioning = {
'Type': 'staggered',
'Params': {
'maxAge': '' + (repoCfg.staggeredMaxAge * 86400),
'cleanInterval': '' + repoCfg.staggeredCleanInterval,
'versionsPath': '' + repoCfg.staggeredVersionsPath,
'maxAge': '' + (folderCfg.staggeredMaxAge * 86400),
'cleanInterval': '' + folderCfg.staggeredCleanInterval,
'versionsPath': '' + folderCfg.staggeredVersionsPath,
}
};
delete repoCfg.staggeredFileVersioning;
delete repoCfg.staggeredMaxAge;
delete repoCfg.staggeredCleanInterval;
delete repoCfg.staggeredVersionsPath;
delete folderCfg.staggeredFileVersioning;
delete folderCfg.staggeredMaxAge;
delete folderCfg.staggeredCleanInterval;
delete folderCfg.staggeredVersionsPath;
} else {
delete repoCfg.Versioning;
delete folderCfg.Versioning;
}
$scope.repos[repoCfg.ID] = repoCfg;
$scope.config.Repositories = repoList($scope.repos);
$scope.folders[folderCfg.ID] = folderCfg;
$scope.config.Folders = folderList($scope.folders);
$scope.saveConfig();
};
$scope.sharesRepo = function (repoCfg) {
$scope.sharesFolder = function (folderCfg) {
var names = [];
repoCfg.Nodes.forEach(function (node) {
names.push($scope.nodeName($scope.findNode(node.NodeID)));
folderCfg.Devices.forEach(function (device) {
names.push($scope.deviceName($scope.findDevice(device.DeviceID)));
});
names.sort();
return names.join(", ");
};
$scope.deleteRepo = function () {
$('#editRepo').modal('hide');
$scope.deleteFolder = function () {
$('#editFolder').modal('hide');
if (!$scope.editingExisting) {
return;
}
delete $scope.repos[$scope.currentRepo.ID];
$scope.config.Repositories = repoList($scope.repos);
delete $scope.folders[$scope.currentFolder.ID];
$scope.config.Folders = folderList($scope.folders);
$scope.saveConfig();
};
@ -868,18 +868,18 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
}
$('#editIgnoresButton').attr('disabled', 'disabled');
$http.get(urlbase + '/ignores?repo=' + encodeURIComponent($scope.currentRepo.ID))
$http.get(urlbase + '/ignores?folder=' + encodeURIComponent($scope.currentFolder.ID))
.success(function (data) {
data.ignore = data.ignore || [];
$('#editRepo').modal('hide');
$('#editFolder').modal('hide');
var textArea = $('#editIgnores textarea');
textArea.val(data.ignore.join('\n'));
$('#editIgnores').modal()
.on('hidden.bs.modal', function () {
$('#editRepo').modal();
$('#editFolder').modal();
})
.on('shown.bs.modal', function () {
textArea.focus();
@ -895,7 +895,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
return;
}
$http.post(urlbase + '/ignores?repo=' + encodeURIComponent($scope.currentRepo.ID), {
$http.post(urlbase + '/ignores?folder=' + encodeURIComponent($scope.currentFolder.ID), {
ignore: $('#editIgnores textarea').val().split('\n')
});
};
@ -923,10 +923,10 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$('#ur').modal('hide');
};
$scope.showNeed = function (repo) {
$scope.showNeed = function (folder) {
$scope.neededLoaded = false;
$('#needed').modal();
$http.get(urlbase + "/need?repo=" + encodeURIComponent(repo)).success(function (data) {
$http.get(urlbase + "/need?folder=" + encodeURIComponent(folder)).success(function (data) {
$scope.needed = data;
$scope.neededLoaded = true;
});
@ -947,8 +947,8 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
}
};
$scope.override = function (repo) {
$http.post(urlbase + "/model/override?repo=" + encodeURIComponent(repo));
$scope.override = function (folder) {
$http.post(urlbase + "/model/override?folder=" + encodeURIComponent(folder));
};
$scope.about = function () {
@ -959,34 +959,34 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.reportPreview = true;
};
$scope.rescanRepo = function (repo) {
$http.post(urlbase + "/scan?repo=" + encodeURIComponent(repo));
$scope.rescanFolder = function (folder) {
$http.post(urlbase + "/scan?folder=" + encodeURIComponent(folder));
};
$scope.init();
setInterval($scope.refresh, 10000);
});
function nodeCompare(a, b) {
function deviceCompare(a, b) {
if (typeof a.Name !== 'undefined' && typeof b.Name !== 'undefined') {
if (a.Name < b.Name)
return -1;
return a.Name > b.Name;
}
if (a.NodeID < b.NodeID) {
if (a.DeviceID < b.DeviceID) {
return -1;
}
return a.NodeID > b.NodeID;
return a.DeviceID > b.DeviceID;
}
function repoCompare(a, b) {
function folderCompare(a, b) {
if (a.ID < b.ID) {
return -1;
}
return a.ID > b.ID;
}
function repoMap(l) {
function folderMap(l) {
var m = {};
l.forEach(function (r) {
m[r.ID] = r;
@ -994,12 +994,12 @@ function repoMap(l) {
return m;
}
function repoList(m) {
function folderList(m) {
var l = [];
for (var id in m) {
l.push(m[id]);
}
l.sort(repoCompare);
l.sort(folderCompare);
return l;
}
@ -1137,20 +1137,20 @@ syncthing.filter('basename', function () {
};
});
syncthing.directive('uniqueRepo', function () {
syncthing.directive('uniqueFolder', function () {
return {
require: 'ngModel',
link: function (scope, elm, attrs, ctrl) {
ctrl.$parsers.unshift(function (viewValue) {
if (scope.editingExisting) {
// we shouldn't validate
ctrl.$setValidity('uniqueRepo', true);
} else if (scope.repos[viewValue]) {
// the repo exists already
ctrl.$setValidity('uniqueRepo', false);
ctrl.$setValidity('uniqueFolder', true);
} else if (scope.folders[viewValue]) {
// the folder exists already
ctrl.$setValidity('uniqueFolder', false);
} else {
// the repo is unique
ctrl.$setValidity('uniqueRepo', true);
// the folder is unique
ctrl.$setValidity('uniqueFolder', true);
}
return viewValue;
});
@ -1158,20 +1158,20 @@ syncthing.directive('uniqueRepo', function () {
};
});
syncthing.directive('validNodeid', function ($http) {
syncthing.directive('validDeviceid', function ($http) {
return {
require: 'ngModel',
link: function (scope, elm, attrs, ctrl) {
ctrl.$parsers.unshift(function (viewValue) {
if (scope.editingExisting) {
// we shouldn't validate
ctrl.$setValidity('validNodeid', true);
ctrl.$setValidity('validDeviceid', true);
} else {
$http.get(urlbase + '/nodeid?id=' + viewValue).success(function (resp) {
$http.get(urlbase + '/deviceid?id=' + viewValue).success(function (resp) {
if (resp.error) {
ctrl.$setValidity('validNodeid', false);
ctrl.$setValidity('validDeviceid', false);
} else {
ctrl.$setValidity('validNodeid', true);
ctrl.$setValidity('validDeviceid', true);
}
});
}

View File

@ -13,7 +13,7 @@
<meta name="author" content="">
<link rel="shortcut icon" href="img/favicon.png">
<title>Syncthing | {{thisNodeName()}}</title>
<title>Syncthing | {{thisDeviceName()}}</title>
<link href="bootstrap/css/bootstrap.min.css" rel="stylesheet">
<link href="font/raleway.css" rel="stylesheet">
<link href="overrides.css" rel="stylesheet">
@ -27,7 +27,7 @@
<nav class="navbar navbar-top navbar-default" role="navigation">
<div class="container">
<span class="navbar-brand"><img class="logo" src="img/logo-text-64.png" height="32" width="117"/></span>
<p class="navbar-text hidden-xs">{{thisNodeName()}}</p>
<p class="navbar-text hidden-xs">{{thisDeviceName()}}</p>
<ul class="nav navbar-nav navbar-right">
<li ng-if="upgradeInfo.newer">
<button type="button" class="btn navbar-btn btn-primary btn-sm" href="" ng-click="upgrade()">
@ -39,7 +39,7 @@
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><span class="glyphicon glyphicon-cog"></span></a>
<ul class="dropdown-menu">
<li><a href="" ng-click="editSettings()"><span class="glyphicon glyphicon-cog"></span>&emsp;<span translate>Settings</span></a></li>
<li><a href="" ng-click="idNode()"><span class="glyphicon glyphicon-qrcode"></span>&emsp;<span translate>Show ID</span></a></li>
<li><a href="" ng-click="idDevice()"><span class="glyphicon glyphicon-qrcode"></span>&emsp;<span translate>Show ID</span></a></li>
<li class="divider"></li>
<li><a href="" ng-click="shutdown()"><span class="glyphicon glyphicon-off"></span>&emsp;<span translate>Shutdown</span></a></li>
<li><a href="" ng-click="restart()"><span class="glyphicon glyphicon-refresh"></span>&emsp;<span translate>Restart</span></a></li>
@ -74,90 +74,90 @@
<div class="row">
<!-- Repository list (top left) -->
<!-- Folder list (top left) -->
<div class="col-md-6">
<div class="panel-group" id="repositories">
<div class="panel panel-{{repoClass(repo.ID)}}" ng-repeat="repo in repoList()">
<div class="panel-heading" data-toggle="collapse" data-parent="#repositories" href="#repo-{{$index}}" style="cursor: pointer">
<div class="panel-group" id="folders">
<div class="panel panel-{{folderClass(folder.ID)}}" ng-repeat="folder in folderList()">
<div class="panel-heading" data-toggle="collapse" data-parent="#folders" href="#folder-{{$index}}" style="cursor: pointer">
<h3 class="panel-title">
<span class="glyphicon glyphicon-hdd"></span>&emsp;{{repo.ID}}
<span class="pull-right hidden-xs" ng-switch="repoStatus(repo.ID)">
<span class="glyphicon glyphicon-hdd"></span>&emsp;{{folder.ID}}
<span class="pull-right hidden-xs" ng-switch="folderStatus(folder.ID)">
<span translate ng-switch-when="unknown">Unknown</span>
<span translate ng-switch-when="stopped">Stopped</span>
<span translate ng-switch-when="scanning">Scanning</span>
<span ng-switch-when="syncing">
<span translate>Syncing</span>
({{syncPercentage(repo.ID)}}%)
({{syncPercentage(folder.ID)}}%)
</span>
<span ng-switch-when="idle">
<span translate>Idle</span>
({{syncPercentage(repo.ID)}}%)
({{syncPercentage(folder.ID)}}%)
</span>
</span>
</h3>
</div>
<div id="repo-{{$index}}" class="panel-collapse collapse" ng-class="{in: $index === 0}">
<div id="folder-{{$index}}" class="panel-collapse collapse" ng-class="{in: $index === 0}">
<div class="panel-body">
<table class="table table-condensed table-striped">
<tbody>
<tr>
<th><span class="glyphicon glyphicon-tag"></span>&emsp;<span translate>Repository ID</span></th>
<td class="text-right">{{repo.ID}}</td>
<th><span class="glyphicon glyphicon-tag"></span>&emsp;<span translate>Folder ID</span></th>
<td class="text-right">{{folder.ID}}</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-folder-open"></span>&emsp;<span translate>Folder</span></th>
<td class="text-right">{{repo.Directory}}</td>
<td class="text-right">{{folder.Directory}}</td>
</tr>
<tr ng-if="model[repo.ID].invalid">
<tr ng-if="model[folder.ID].invalid">
<th><span class="glyphicon glyphicon-warning-sign"></span>&emsp;<span translate>Error</span></th>
<td class="text-right">{{model[repo.ID].invalid}}</td>
<td class="text-right">{{model[folder.ID].invalid}}</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-globe"></span>&emsp;<span translate>Global Repository</span></th>
<td class="text-right">{{model[repo.ID].globalFiles | alwaysNumber}} <span translate>items</span>, ~{{model[repo.ID].globalBytes | binary}}B</td>
<th><span class="glyphicon glyphicon-globe"></span>&emsp;<span translate>Global Folder</span></th>
<td class="text-right">{{model[folder.ID].globalFiles | alwaysNumber}} <span translate>items</span>, ~{{model[folder.ID].globalBytes | binary}}B</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-home"></span>&emsp;<span translate>Local Repository</span></th>
<td class="text-right">{{model[repo.ID].localFiles | alwaysNumber}} <span translate>items</span>, ~{{model[repo.ID].localBytes | binary}}B</td>
<th><span class="glyphicon glyphicon-home"></span>&emsp;<span translate>Local Folder</span></th>
<td class="text-right">{{model[folder.ID].localFiles | alwaysNumber}} <span translate>items</span>, ~{{model[folder.ID].localBytes | binary}}B</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-cloud-download"></span>&emsp;<span translate>Out Of Sync</span></th>
<td class="text-right">
<a ng-if="model[repo.ID].needFiles > 0" ng-click="showNeed(repo.ID)" href="">{{model[repo.ID].needFiles | alwaysNumber}} <span translate>items</span>, ~{{model[repo.ID].needBytes | binary}}B</a>
<span ng-if="model[repo.ID].needFiles == 0">0 <span translate>items</span>, 0 B</span>
<a ng-if="model[folder.ID].needFiles > 0" ng-click="showNeed(folder.ID)" href="">{{model[folder.ID].needFiles | alwaysNumber}} <span translate>items</span>, ~{{model[folder.ID].needBytes | binary}}B</a>
<span ng-if="model[folder.ID].needFiles == 0">0 <span translate>items</span>, 0 B</span>
</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-lock"></span>&emsp;<span translate>Master Repo</span></th>
<th><span class="glyphicon glyphicon-lock"></span>&emsp;<span translate>Master Folder</span></th>
<td class="text-right">
<span translate ng-if="repo.ReadOnly">Yes</span>
<span translate ng-if="!repo.ReadOnly">No</span>
<span translate ng-if="folder.ReadOnly">Yes</span>
<span translate ng-if="!folder.ReadOnly">No</span>
</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-unchecked"></span>&emsp;<span translate>Ignore Permissions</span></th>
<td class="text-right">
<span translate ng-if="repo.IgnorePerms">Yes</span>
<span translate ng-if="!repo.IgnorePerms">No</span>
<span translate ng-if="folder.IgnorePerms">Yes</span>
<span translate ng-if="!folder.IgnorePerms">No</span>
</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-refresh"></span>&emsp;<span translate>Rescan Interval</span></th>
<td class="text-right">{{repo.RescanIntervalS}} s</td>
<td class="text-right">{{folder.RescanIntervalS}} s</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-share-alt"></span>&emsp;<span translate>Shared With</span></th>
<td class="text-right">{{sharesRepo(repo)}}</td>
<td class="text-right">{{sharesFolder(folder)}}</td>
</tr>
</tbody>
</table>
</div>
<div class="panel-footer">
<button class="btn btn-sm btn-danger" ng-if="repo.ReadOnly && model[repo.ID].needFiles > 0" ng-click="override(repo.ID)" href=""><span class="glyphicon glyphicon-upload"></span>&emsp;<span translate>Override Changes</span></button>
<button class="btn btn-sm btn-danger" ng-if="folder.ReadOnly && model[folder.ID].needFiles > 0" ng-click="override(folder.ID)" href=""><span class="glyphicon glyphicon-upload"></span>&emsp;<span translate>Override Changes</span></button>
<span class="pull-right">
<button class="btn btn-sm btn-default" href="" ng-show="repoStatus(repo.ID) == 'idle'" ng-click="rescanRepo(repo.ID)"><span class="glyphicon glyphicon-refresh"></span>&emsp;<span translate>Rescan</span></button>
<button class="btn btn-sm btn-default" href="" ng-click="editRepo(repo)"><span class="glyphicon glyphicon-pencil"></span>&emsp;<span translate>Edit</span></button>
<button class="btn btn-sm btn-default" href="" ng-show="folderStatus(folder.ID) == 'idle'" ng-click="rescanFolder(folder.ID)"><span class="glyphicon glyphicon-refresh"></span>&emsp;<span translate>Rescan</span></button>
<button class="btn btn-sm btn-default" href="" ng-click="editFolder(folder)"><span class="glyphicon glyphicon-pencil"></span>&emsp;<span translate>Edit</span></button>
</span>
<div class="clearfix"></div>
</div>
@ -165,24 +165,24 @@
</div>
</div>
<div class="form-group">
<button class="btn btn-sm btn-default pull-right" ng-click="addRepo()"><span class="glyphicon glyphicon-plus"></span>&emsp;<span translate>Add Repository</span></button>
<button class="btn btn-sm btn-default pull-right" ng-click="addFolder()"><span class="glyphicon glyphicon-plus"></span>&emsp;<span translate>Add Folder</span></button>
<div class="clearfix"></div>
</div>
<hr class="visible-sm"/>
</div>
<!-- Node list (top right) -->
<!-- Device list (top right) -->
<!-- This node -->
<!-- This device -->
<div class="col-md-6">
<div class="panel panel-default" ng-repeat="nodeCfg in [thisNode()]">
<div class="panel-heading" data-toggle="collapse" href="#node-this" style="cursor: pointer">
<div class="panel panel-default" ng-repeat="deviceCfg in [thisDevice()]">
<div class="panel-heading" data-toggle="collapse" href="#device-this" style="cursor: pointer">
<h3 class="panel-title">
<span class="glyphicon glyphicon-home"></span>&emsp;{{nodeName(nodeCfg)}}
<span class="glyphicon glyphicon-home"></span>&emsp;{{deviceName(deviceCfg)}}
</h3>
</div>
<div id="node-this" class="panel-collapse collapse in">
<div id="device-this" class="panel-collapse collapse in">
<div class="panel-body">
<table class="table table-condensed table-striped">
<tbody>
@ -219,75 +219,75 @@
</div>
</div>
<!-- Remote nodes -->
<!-- Remote devices -->
<div class="panel-group" id="nodes">
<div class="panel panel-{{nodeClass(nodeCfg)}}" ng-repeat="nodeCfg in otherNodes()">
<div class="panel-heading" data-toggle="collapse" data-parent="#nodes" href="#node-{{$index}}" style="cursor: pointer">
<div class="panel-group" id="devices">
<div class="panel panel-{{deviceClass(deviceCfg)}}" ng-repeat="deviceCfg in otherDevices()">
<div class="panel-heading" data-toggle="collapse" data-parent="#devices" href="#device-{{$index}}" style="cursor: pointer">
<h3 class="panel-title">
<span class="glyphicon glyphicon-retweet"></span>&emsp;{{nodeName(nodeCfg)}}
<span class="glyphicon glyphicon-retweet"></span>&emsp;{{deviceName(deviceCfg)}}
<span class="pull-right hidden-xs">
<span ng-if="connections[nodeCfg.NodeID] && completion[nodeCfg.NodeID]._total == 100">
<span ng-if="connections[deviceCfg.DeviceID] && completion[deviceCfg.DeviceID]._total == 100">
<span translate>Up to Date</span> (100%)
</span>
<span ng-if="connections[nodeCfg.NodeID] && completion[nodeCfg.NodeID]._total < 100">
<span translate>Syncing</span> ({{completion[nodeCfg.NodeID]._total | number:0}}%)
<span ng-if="connections[deviceCfg.DeviceID] && completion[deviceCfg.DeviceID]._total < 100">
<span translate>Syncing</span> ({{completion[deviceCfg.DeviceID]._total | number:0}}%)
</span>
<span translate ng-if="!connections[nodeCfg.NodeID]">Disconnected</span>
<span translate ng-if="!connections[deviceCfg.DeviceID]">Disconnected</span>
</span>
</h3>
</div>
<div id="node-{{$index}}" class="panel-collapse collapse">
<div id="device-{{$index}}" class="panel-collapse collapse">
<div class="panel-body">
<table class="table table-condensed table-striped">
<tbody>
<tr ng-if="connections[nodeCfg.NodeID]">
<tr ng-if="connections[deviceCfg.DeviceID]">
<th><span class="glyphicon glyphicon-cloud-download"></span>&emsp;<span translate>Download Rate</span></th>
<td class="text-right">{{connections[nodeCfg.NodeID].inbps | metric}}bps ({{connections[nodeCfg.NodeID].InBytesTotal | binary}}B)</td>
<td class="text-right">{{connections[deviceCfg.DeviceID].inbps | metric}}bps ({{connections[deviceCfg.DeviceID].InBytesTotal | binary}}B)</td>
</tr>
<tr ng-if="connections[nodeCfg.NodeID]">
<tr ng-if="connections[deviceCfg.DeviceID]">
<th><span class="glyphicon glyphicon-cloud-upload"></span>&emsp;<span translate>Upload Rate</span></th>
<td class="text-right">{{connections[nodeCfg.NodeID].outbps | metric}}bps ({{connections[nodeCfg.NodeID].OutBytesTotal | binary}}B)</td>
<td class="text-right">{{connections[deviceCfg.DeviceID].outbps | metric}}bps ({{connections[deviceCfg.DeviceID].OutBytesTotal | binary}}B)</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-link"></span>&emsp;<span translate>Address</span></th>
<td class="text-right">{{nodeAddr(nodeCfg)}}</td>
<td class="text-right">{{deviceAddr(deviceCfg)}}</td>
</tr>
<tr ng-if="connections[nodeCfg.NodeID]">
<tr ng-if="connections[deviceCfg.DeviceID]">
<th><span class="glyphicon glyphicon-comment"></span>&emsp;<span translate>Synchronization</span></th>
<td class="text-right">{{completion[nodeCfg.NodeID]._total | alwaysNumber | number:0}}%</td>
<td class="text-right">{{completion[deviceCfg.DeviceID]._total | alwaysNumber | number:0}}%</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-compressed"></span>&emsp;<span translate>Use Compression</span></th>
<td translate ng-if="nodeCfg.Compression" class="text-right">Yes</td>
<td translate ng-if="!nodeCfg.Compression" class="text-right">No</td>
<td translate ng-if="deviceCfg.Compression" class="text-right">Yes</td>
<td translate ng-if="!deviceCfg.Compression" class="text-right">No</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-thumbs-up"></span>&emsp;<span translate>Introducer</span></th>
<td translate ng-if="nodeCfg.Introducer" class="text-right">Yes</td>
<td translate ng-if="!nodeCfg.Introducer" class="text-right">No</td>
<td translate ng-if="deviceCfg.Introducer" class="text-right">Yes</td>
<td translate ng-if="!deviceCfg.Introducer" class="text-right">No</td>
</tr>
<tr ng-if="connections[nodeCfg.NodeID]">
<tr ng-if="connections[deviceCfg.DeviceID]">
<th><span class="glyphicon glyphicon-tag"></span>&emsp;<span translate>Version</span></th>
<td class="text-right">{{connections[nodeCfg.NodeID].ClientVersion}}</td>
<td class="text-right">{{connections[deviceCfg.DeviceID].ClientVersion}}</td>
</tr>
<tr ng-if="!connections[nodeCfg.NodeID]">
<tr ng-if="!connections[deviceCfg.DeviceID]">
<th><span class="glyphicon glyphicon-eye-open"></span>&emsp;<span translate>Last seen</span></th>
<td translate ng-if="!stats[nodeCfg.NodeID].LastSeenDays || stats[nodeCfg.NodeID].LastSeenDays >= 365" class="text-right">Never</td>
<td ng-if="stats[nodeCfg.NodeID].LastSeenDays < 365" class="text-right">{{stats[nodeCfg.NodeID].LastSeen | date:"yyyy-MM-dd HH:mm"}}</td>
<td translate ng-if="!stats[deviceCfg.DeviceID].LastSeenDays || stats[deviceCfg.DeviceID].LastSeenDays >= 365" class="text-right">Never</td>
<td ng-if="stats[deviceCfg.DeviceID].LastSeenDays < 365" class="text-right">{{stats[deviceCfg.DeviceID].LastSeen | date:"yyyy-MM-dd HH:mm"}}</td>
</tr>
</tbody>
</table>
</div>
<div class="panel-footer">
<span class="pull-right"><a class="btn btn-sm btn-default" href="" ng-click="editNode(nodeCfg)"><span class="glyphicon glyphicon-pencil"></span>&emsp;<span translate>Edit</span></a></span>
<span class="pull-right"><a class="btn btn-sm btn-default" href="" ng-click="editDevice(deviceCfg)"><span class="glyphicon glyphicon-pencil"></span>&emsp;<span translate>Edit</span></a></span>
<div class="clearfix"></div>
</div>
</div>
</div>
</div>
<div class="form-group">
<button class="btn btn-sm btn-default pull-right" ng-click="addNode()"><span class="glyphicon glyphicon-plus"></span>&emsp;<span translate>Add Node</span></button>
<button class="btn btn-sm btn-default pull-right" ng-click="addDevice()"><span class="glyphicon glyphicon-plus"></span>&emsp;<span translate>Add Device</span></button>
<div class="clearfix"></div>
</div>
</div>
@ -300,7 +300,7 @@
<div class="panel panel-warning">
<div class="panel-heading"><h3 class="panel-title"><span translate>Notice</span></h3></div>
<div class="panel-body">
<p ng-repeat="err in errorList()"><small>{{err.Time | date:"H:mm:ss"}}:</small> {{friendlyNodes(err.Error)}}</p>
<p ng-repeat="err in errorList()"><small>{{err.Time | date:"H:mm:ss"}}:</small> {{friendlyDevices(err.Error)}}</p>
</div>
<div class="panel-footer">
<button type="button" class="pull-right btn btn-sm btn-default" ng-click="clearErrors()"><span class="glyphicon glyphicon-ok"></span>&emsp;<span translate>OK</span></button>
@ -354,48 +354,48 @@
<!-- ID modal -->
<modal id="idqr" large="yes" status="info" close="yes" icon="qrcode" title="{{'Node Identification' | translate}} &mdash; {{nodeName(thisNode())}}">
<modal id="idqr" large="yes" status="info" close="yes" icon="qrcode" title="{{'Device Identification' | translate}} &mdash; {{deviceName(thisDevice())}}">
<div class="well well-sm text-monospace text-center">{{myID}}</div>
<img ng-if="myID" class="center-block img-thumbnail" src="qr/?text={{myID}}"/>
</modal>
<!-- Node editor modal -->
<!-- Device editor modal -->
<div id="editNode" class="modal fade" tabindex="-1">
<div id="editDevice" class="modal fade" tabindex="-1">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header">
<h4 translate ng-show="!editingExisting" class="modal-title">Add Node</h4>
<h4 translate ng-show="editingExisting" class="modal-title">Edit Node</h4>
<h4 translate ng-show="!editingExisting" class="modal-title">Add Device</h4>
<h4 translate ng-show="editingExisting" class="modal-title">Edit Device</h4>
</div>
<div class="modal-body">
<form role="form" name="nodeEditor">
<div class="form-group" ng-class="{'has-error': nodeEditor.nodeID.$invalid && nodeEditor.nodeID.$dirty}">
<label translate for="nodeID">Node ID</label>
<input ng-if="!editingExisting" name="nodeID" id="nodeID" class="form-control text-monospace" type="text" ng-model="currentNode.NodeID" required valid-nodeid></input>
<div ng-if="editingExisting" class="well well-sm text-monospace">{{currentNode.NodeID}}</div>
<form role="form" name="deviceEditor">
<div class="form-group" ng-class="{'has-error': deviceEditor.deviceID.$invalid && deviceEditor.deviceID.$dirty}">
<label translate for="deviceID">Device ID</label>
<input ng-if="!editingExisting" name="deviceID" id="deviceID" class="form-control text-monospace" type="text" ng-model="currentDevice.DeviceID" required valid-deviceid></input>
<div ng-if="editingExisting" class="well well-sm text-monospace">{{currentDevice.DeviceID}}</div>
<p class="help-block">
<span translate ng-if="nodeEditor.nodeID.$valid || nodeEditor.nodeID.$pristine">The node ID to enter here can be found in the "Edit > Show ID" dialog on the other node. Spaces and dashes are optional (ignored).</span>
<span translate ng-show="!editingExisting && (nodeEditor.nodeID.$valid || nodeEditor.nodeID.$pristine)">When adding a new node, keep in mind that this node must be added on the other side too.</span>
<span translate ng-if="nodeEditor.nodeID.$error.required && nodeEditor.nodeID.$dirty">The node ID cannot be blank.</span>
<span translate ng-if="nodeEditor.nodeID.$error.validNodeid && nodeEditor.nodeID.$dirty">The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.</span>
<span translate ng-if="deviceEditor.deviceID.$valid || deviceEditor.deviceID.$pristine">The device ID to enter here can be found in the "Edit > Show ID" dialog on the other device. Spaces and dashes are optional (ignored).</span>
<span translate ng-show="!editingExisting && (deviceEditor.deviceID.$valid || deviceEditor.deviceID.$pristine)">When adding a new device, keep in mind that this device must be added on the other side too.</span>
<span translate ng-if="deviceEditor.deviceID.$error.required && deviceEditor.deviceID.$dirty">The device ID cannot be blank.</span>
<span translate ng-if="deviceEditor.deviceID.$error.validDeviceid && deviceEditor.deviceID.$dirty">The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.</span>
</p>
</div>
<div class="form-group">
<label translate for="name">Node Name</label>
<input id="name" class="form-control" type="text" ng-model="currentNode.Name"></input>
<p translate ng-if="currentNode.NodeID == myID" class="help-block">Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.</p>
<p translate ng-if="currentNode.NodeID != myID" class="help-block">Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.</p>
<label translate for="name">Device Name</label>
<input id="name" class="form-control" type="text" ng-model="currentDevice.Name"></input>
<p translate ng-if="currentDevice.DeviceID == myID" class="help-block">Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.</p>
<p translate ng-if="currentDevice.DeviceID != myID" class="help-block">Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.</p>
</div>
<div class="form-group">
<label translate for="addresses">Addresses</label>
<input ng-disabled="currentNode.NodeID == myID" id="addresses" class="form-control" type="text" ng-model="currentNode.AddressesStr"></input>
<input ng-disabled="currentDevice.DeviceID == myID" id="addresses" class="form-control" type="text" ng-model="currentDevice.AddressesStr"></input>
<p translate class="help-block">Enter comma separated "ip:port" addresses or "dynamic" to perform automatic discovery of the address.</p>
</div>
<div ng-if="!editingSelf" class="form-group">
<div class="checkbox">
<label>
<input type="checkbox" ng-model="currentNode.Compression"> <span translate>Use Compression</span>
<input type="checkbox" ng-model="currentDevice.Compression"> <span translate>Use Compression</span>
</label>
<p translate class="help-block">Compression is recommended in most setups.</p>
</div>
@ -403,58 +403,58 @@
<div ng-if="!editingSelf" class="form-group">
<div class="checkbox">
<label>
<input type="checkbox" ng-model="currentNode.Introducer"> <span translate>Introducer</span>
<input type="checkbox" ng-model="currentDevice.Introducer"> <span translate>Introducer</span>
</label>
<p translate class="help-block">Any nodes configured on an introducer node will be added to this node as well.</p>
<p translate class="help-block">Any devices configured on an introducer device will be added to this device as well.</p>
</div>
</div>
</form>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-primary btn-sm" ng-click="saveNode()" ng-disabled="nodeEditor.$invalid"><span class="glyphicon glyphicon-ok"></span>&emsp;<span translate>Save</span></button>
<button type="button" class="btn btn-primary btn-sm" ng-click="saveDevice()" ng-disabled="deviceEditor.$invalid"><span class="glyphicon glyphicon-ok"></span>&emsp;<span translate>Save</span></button>
<button type="button" class="btn btn-default btn-sm" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span>&emsp;<span translate>Close</span></button>
<button ng-if="editingExisting && !editingSelf" type="button" class="btn btn-danger pull-left btn-sm" ng-click="deleteNode()"><span class="glyphicon glyphicon-minus"></span>&emsp;<span translate>Delete</span></button>
<button ng-if="editingExisting && !editingSelf" type="button" class="btn btn-danger pull-left btn-sm" ng-click="deleteDevice()"><span class="glyphicon glyphicon-minus"></span>&emsp;<span translate>Delete</span></button>
</div>
</div>
</div>
</div>
<!-- Repo editor modal -->
<!-- Folder editor modal -->
<div id="editRepo" class="modal fade" tabindex="-1">
<div id="editFolder" class="modal fade" tabindex="-1">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header">
<h4 ng-show="!editingExisting" class="modal-title"><span translate>Add Repository</span></h4>
<h4 ng-show="editingExisting" class="modal-title"><span translate>Edit Repository</span></h4>
<h4 ng-show="!editingExisting" class="modal-title"><span translate>Add Folder</span></h4>
<h4 ng-show="editingExisting" class="modal-title"><span translate>Edit Folder</span></h4>
</div>
<div class="modal-body">
<form role="form" name="repoEditor">
<form role="form" name="folderEditor">
<div class="row">
<div class="col-md-12">
<div class="form-group" ng-class="{'has-error': repoEditor.repoID.$invalid && repoEditor.repoID.$dirty}">
<label for="repoID"><span translate>Repository ID</span></label>
<input name="repoID" ng-disabled="editingExisting" id="repoID" class="form-control" type="text" ng-model="currentRepo.ID" required unique-repo ng-pattern="/^[a-zA-Z0-9-_.]{1,64}$/"></input>
<div class="form-group" ng-class="{'has-error': folderEditor.folderID.$invalid && folderEditor.folderID.$dirty}">
<label for="folderID"><span translate>Folder ID</span></label>
<input name="folderID" ng-disabled="editingExisting" id="folderID" class="form-control" type="text" ng-model="currentFolder.ID" required unique-folder ng-pattern="/^[a-zA-Z0-9-_.]{1,64}$/"></input>
<p class="help-block">
<span translate ng-if="repoEditor.repoID.$valid || repoEditor.repoID.$pristine">Short identifier for the repository. Must be the same on all cluster nodes.</span>
<span translate ng-if="repoEditor.repoID.$error.uniqueRepo">The repository ID must be unique.</span>
<span translate ng-if="repoEditor.repoID.$error.required && repoEditor.repoID.$dirty">The repository ID cannot be blank.</span>
<span translate ng-if="repoEditor.repoID.$error.pattern && repoEditor.repoID.$dirty">The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.</span>
<span translate ng-if="folderEditor.folderID.$valid || folderEditor.folderID.$pristine">Short identifier for the folder. Must be the same on all cluster devices.</span>
<span translate ng-if="folderEditor.folderID.$error.uniqueFolder">The folder ID must be unique.</span>
<span translate ng-if="folderEditor.folderID.$error.required && folderEditor.folderID.$dirty">The folder ID cannot be blank.</span>
<span translate ng-if="folderEditor.folderID.$error.pattern && folderEditor.folderID.$dirty">The folder ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.</span>
</p>
</div>
<div class="form-group" ng-class="{'has-error': repoEditor.repoPath.$invalid && repoEditor.repoPath.$dirty}">
<label translate for="repoPath">Repository Path</label>
<input name="repoPath" ng-disabled="editingExisting" id="repoPath" class="form-control" type="text" ng-model="currentRepo.Directory" required></input>
<div class="form-group" ng-class="{'has-error': folderEditor.folderPath.$invalid && folderEditor.folderPath.$dirty}">
<label translate for="folderPath">Folder Path</label>
<input name="folderPath" ng-disabled="editingExisting" id="folderPath" class="form-control" type="text" ng-model="currentFolder.Directory" required></input>
<p class="help-block">
<span translate ng-if="repoEditor.repoPath.$valid || repoEditor.repoPath.$pristine">Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for</span> <code>{{system.tilde}}</code>.
<span translate ng-if="repoEditor.repoPath.$error.required && repoEditor.repoPath.$dirty">The repository path cannot be blank.</span>
<span translate ng-if="folderEditor.folderPath.$valid || folderEditor.folderPath.$pristine">Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for</span> <code>{{system.tilde}}</code>.
<span translate ng-if="folderEditor.folderPath.$error.required && folderEditor.folderPath.$dirty">The folder path cannot be blank.</span>
</p>
</div>
<div class="form-group" ng-class="{'has-error': repoEditor.rescanIntervalS.$invalid && repoEditor.rescanIntervalS.$dirty}">
<div class="form-group" ng-class="{'has-error': folderEditor.rescanIntervalS.$invalid && folderEditor.rescanIntervalS.$dirty}">
<label for="rescanIntervalS"><span translate>Rescan Interval</span> (s)</label>
<input name="rescanIntervalS" id="rescanIntervalS" class="form-control" type="number" ng-model="currentRepo.RescanIntervalS" required min="5"></input>
<input name="rescanIntervalS" id="rescanIntervalS" class="form-control" type="number" ng-model="currentFolder.RescanIntervalS" required min="5"></input>
<p class="help-block">
<span translate ng-if="!repoEditor.rescanIntervalS.$valid && repoEditor.rescanIntervalS.$dirty">The rescan interval must be at least 5 seconds.</span>
<span translate ng-if="!folderEditor.rescanIntervalS.$valid && folderEditor.rescanIntervalS.$dirty">The rescan interval must be at least 5 seconds.</span>
</p>
</div>
</div>
@ -464,27 +464,27 @@
<div class="form-group">
<div class="checkbox">
<label>
<input type="checkbox" ng-model="currentRepo.ReadOnly"> <span translate>Repository Master</span>
<input type="checkbox" ng-model="currentFolder.ReadOnly"> <span translate>Folder Master</span>
</label>
</div>
<p translate class="help-block">Files are protected from changes made on other nodes, but changes made on this node will be sent to the rest of the cluster.</p>
<p translate class="help-block">Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.</p>
</div>
<div class="form-group">
<div class="checkbox">
<label>
<input type="checkbox" ng-model="currentRepo.IgnorePerms"> <span translate>Ignore Permissions</span>
<input type="checkbox" ng-model="currentFolder.IgnorePerms"> <span translate>Ignore Permissions</span>
</label>
</div>
<p translate class="help-block">File permission bits are ignored when looking for changes. Use on FAT filesystems.</p>
</div>
<div class="form-group">
<label translate for="nodes">Share With Nodes</label>
<div class="checkbox" ng-repeat="node in otherNodes()">
<label translate for="devices">Share With Devices</label>
<div class="checkbox" ng-repeat="device in otherDevices()">
<label>
<input type="checkbox" ng-model="currentRepo.selectedNodes[node.NodeID]"> {{nodeName(node)}}
<input type="checkbox" ng-model="currentFolder.selectedDevices[device.DeviceID]"> {{deviceName(device)}}
</label>
</div>
<p translate class="help-block">Select the nodes to share this repository with.</p>
<p translate class="help-block">Select the devices to share this folder with.</p>
</div>
</div>
<div class="col-md-6">
@ -492,54 +492,54 @@
<label translate>File Versioning</label>
<div class="radio">
<label>
<input type="radio" ng-model="currentRepo.FileVersioningSelector" value="none"> <span translate>No File Versioning</span>
<input type="radio" ng-model="currentFolder.FileVersioningSelector" value="none"> <span translate>No File Versioning</span>
</label>
</div>
<div class="radio">
<label>
<input type="radio" ng-model="currentRepo.FileVersioningSelector" value="simple"> <span translate>Simple File Versioning</span>
<input type="radio" ng-model="currentFolder.FileVersioningSelector" value="simple"> <span translate>Simple File Versioning</span>
</label>
</div>
<div class="radio">
<label>
<input type="radio" ng-model="currentRepo.FileVersioningSelector" value="staggered"> <span translate>Staggered File Versioning</span>
<input type="radio" ng-model="currentFolder.FileVersioningSelector" value="staggered"> <span translate>Staggered File Versioning</span>
</label>
</div>
</div>
<div class="form-group" ng-if="currentRepo.FileVersioningSelector=='simple'" ng-class="{'has-error': repoEditor.simpleKeep.$invalid && repoEditor.simpleKeep.$dirty}">
<div class="form-group" ng-if="currentFolder.FileVersioningSelector=='simple'" ng-class="{'has-error': folderEditor.simpleKeep.$invalid && folderEditor.simpleKeep.$dirty}">
<p translate class="help-block">Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.</p>
<label translate for="simpleKeep">Keep Versions</label>
<input name="simpleKeep" id="simpleKeep" class="form-control" type="number" ng-model="currentRepo.simpleKeep" required min="1"></input>
<input name="simpleKeep" id="simpleKeep" class="form-control" type="number" ng-model="currentFolder.simpleKeep" required min="1"></input>
<p class="help-block">
<span translate ng-if="repoEditor.simpleKeep.$valid || repoEditor.simpleKeep.$pristine">The number of old versions to keep, per file.</span>
<span translate ng-if="repoEditor.simpleKeep.$error.required && repoEditor.simpleKeep.$dirty">The number of versions must be a number and cannot be blank.</span>
<span translate ng-if="repoEditor.simpleKeep.$error.min && repoEditor.simpleKeep.$dirty">You must keep at least one version.</span>
<span translate ng-if="folderEditor.simpleKeep.$valid || folderEditor.simpleKeep.$pristine">The number of old versions to keep, per file.</span>
<span translate ng-if="folderEditor.simpleKeep.$error.required && folderEditor.simpleKeep.$dirty">The number of versions must be a number and cannot be blank.</span>
<span translate ng-if="folderEditor.simpleKeep.$error.min && folderEditor.simpleKeep.$dirty">You must keep at least one version.</span>
</p>
</div>
<div class="form-group" ng-if="currentRepo.FileVersioningSelector=='staggered'" ng-class="{'has-error': repoEditor.staggeredMaxAge.$invalid && repoEditor.staggeredMaxAge.$dirty}">
<div class="form-group" ng-if="currentFolder.FileVersioningSelector=='staggered'" ng-class="{'has-error': folderEditor.staggeredMaxAge.$invalid && folderEditor.staggeredMaxAge.$dirty}">
<p class="help-block"><span translate>Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.</span> <span translate>Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.</span></p>
<p translate class="help-block">The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.</p>
<label translate for="staggeredMaxAge">Maximum Age</label>
<input name="staggeredMaxAge" id="staggeredMaxAge" class="form-control" type="number" ng-model="currentRepo.staggeredMaxAge" required></input>
<input name="staggeredMaxAge" id="staggeredMaxAge" class="form-control" type="number" ng-model="currentFolder.staggeredMaxAge" required></input>
<p class="help-block">
<span translate ng-if="repoEditor.staggeredMaxAge.$valid || repoEditor.staggeredMaxAge.$pristine">The maximum time to keep a version (in days, set to 0 to keep versions forever).</span>
<span translate ng-if="repoEditor.staggeredMaxAge.$error.required && repoEditor.staggeredMaxAge.$dirty">The maximum age must be a number and cannot be blank.</span>
<span translate ng-if="folderEditor.staggeredMaxAge.$valid || folderEditor.staggeredMaxAge.$pristine">The maximum time to keep a version (in days, set to 0 to keep versions forever).</span>
<span translate ng-if="folderEditor.staggeredMaxAge.$error.required && folderEditor.staggeredMaxAge.$dirty">The maximum age must be a number and cannot be blank.</span>
</p>
</div>
<div class="form-group" ng-if="currentRepo.FileVersioningSelector == 'staggered'">
<div class="form-group" ng-if="currentFolder.FileVersioningSelector == 'staggered'">
<label translate for="staggeredVersionsPath">Versions Path</label>
<input name="staggeredVersionsPath" id="staggeredVersionsPath" class="form-control" type="text" ng-model="currentRepo.staggeredVersionsPath"></input>
<p translate class="help-block">Path where versions should be stored (leave empty for the default .stversions folder in the repository).</p>
<input name="staggeredVersionsPath" id="staggeredVersionsPath" class="form-control" type="text" ng-model="currentFolder.staggeredVersionsPath"></input>
<p translate class="help-block">Path where versions should be stored (leave empty for the default .stversions folder in the folder).</p>
</div>
</div>
</div>
</form>
<div translate ng-show="!editingExisting">When adding a new repository, keep in mind that the Repository ID is used to tie repositories together between nodes. They are case sensitive and must match exactly between all nodes.</div>
<div translate ng-show="!editingExisting">When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-primary btn-sm" ng-click="saveRepo()" ng-disabled="repoEditor.$invalid"><span class="glyphicon glyphicon-ok"></span>&emsp;<span translate>Save</span></button>
<button type="button" class="btn btn-primary btn-sm" ng-click="saveFolder()" ng-disabled="folderEditor.$invalid"><span class="glyphicon glyphicon-ok"></span>&emsp;<span translate>Save</span></button>
<button type="button" class="btn btn-default btn-sm" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span>&emsp;<span translate>Close</span></button>
<button ng-if="editingExisting" type="button" class="btn btn-danger pull-left btn-sm" ng-click="deleteRepo()"><span class="glyphicon glyphicon-minus"></span>&emsp;<span translate>Delete</span></button>
<button ng-if="editingExisting" type="button" class="btn btn-danger pull-left btn-sm" ng-click="deleteFolder()"><span class="glyphicon glyphicon-minus"></span>&emsp;<span translate>Delete</span></button>
<button id="editIgnoresButton" ng-if="editingExisting" type="button" class="btn btn-default pull-left btn-sm" ng-click="editIgnores()"><span class="glyphicon glyphicon-eye-close"></span>&emsp;<span translate>Ignore Patterns</span></button>
</div>
</div>
@ -569,7 +569,7 @@
</dl>
</div>
<div class="modal-footer">
<div class="pull-left"><span translate>Editing</span> <code>{{currentRepo.Directory}}/.stignore</code></div>
<div class="pull-left"><span translate>Editing</span> <code>{{currentFolder.Directory}}/.stignore</code></div>
<button type="button" class="btn btn-primary btn-sm" data-dismiss="modal" ng-click="saveIgnores()"><span class="glyphicon glyphicon-ok"></span>&emsp;<span translate>Save</span></button>
<button type="button" class="btn btn-default btn-sm" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span>&emsp;<span translate>Close</span></button>
</div>
@ -591,8 +591,8 @@
<div class="col-md-6">
<div class="form-group">
<label translate for="NodeName">Node Name</label>
<input id="NodeName" class="form-control" type="text" ng-model="tmpOptions.NodeName">
<label translate for="DeviceName">Device Name</label>
<input id="DeviceName" class="form-control" type="text" ng-model="tmpOptions.DeviceName">
</div>
<div class="form-group">
<label translate for="ListenStr">Sync Protocol Listen Addresses</label>
@ -698,7 +698,7 @@
<h4 translate class="modal-title">Allow Anonymous Usage Reporting?</h4>
</div>
<div class="modal-body">
<p translate>The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.</p>
<p translate>The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.</p>
<p translate translate-value-url="https://data.syncthing.net">The aggregated statistics are publicly available at {%url%}.</p>
<button translate type="button" class="btn btn-default btn-sm" ng-show="!reportPreview" ng-click="showReportPreview()">Preview Usage Report</button>
<pre ng-if="reportPreview"><small>{{reportData | json}}</small></pre>
@ -720,7 +720,7 @@
<h4 translate class="modal-title">Anonymous Usage Reporting</h4>
</div>
<div class="modal-body">
<p translate>The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.</p>
<p translate>The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.</p>
<p translate translate-value-url="https://data.syncthing.net">The aggregated statistics are publicly available at {%url%}.</p>
<pre><small>{{reportData | json}}</small></pre>
</div>

View File

@ -25,24 +25,24 @@ var l = logger.DefaultLogger
type Configuration struct {
Location string `xml:"-" json:"-"`
Version int `xml:"version,attr" default:"3"`
Repositories []RepositoryConfiguration `xml:"repository"`
Nodes []NodeConfiguration `xml:"node"`
Folders []FolderConfiguration `xml:"folder"`
Devices []DeviceConfiguration `xml:"device"`
GUI GUIConfiguration `xml:"gui"`
Options OptionsConfiguration `xml:"options"`
XMLName xml.Name `xml:"configuration" json:"-"`
}
type RepositoryConfiguration struct {
type FolderConfiguration struct {
ID string `xml:"id,attr"`
Directory string `xml:"directory,attr"`
Nodes []RepositoryNodeConfiguration `xml:"node"`
Devices []FolderDeviceConfiguration `xml:"device"`
ReadOnly bool `xml:"ro,attr"`
RescanIntervalS int `xml:"rescanIntervalS,attr" default:"60"`
IgnorePerms bool `xml:"ignorePerms,attr"`
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
Versioning VersioningConfiguration `xml:"versioning"`
nodeIDs []protocol.NodeID
deviceIDs []protocol.DeviceID
}
type VersioningConfiguration struct {
@ -86,17 +86,17 @@ func (c *VersioningConfiguration) UnmarshalXML(d *xml.Decoder, start xml.StartEl
return nil
}
func (r *RepositoryConfiguration) NodeIDs() []protocol.NodeID {
if r.nodeIDs == nil {
for _, n := range r.Nodes {
r.nodeIDs = append(r.nodeIDs, n.NodeID)
func (r *FolderConfiguration) DeviceIDs() []protocol.DeviceID {
if r.deviceIDs == nil {
for _, n := range r.Devices {
r.deviceIDs = append(r.deviceIDs, n.DeviceID)
}
}
return r.nodeIDs
return r.deviceIDs
}
type NodeConfiguration struct {
NodeID protocol.NodeID `xml:"id,attr"`
type DeviceConfiguration struct {
DeviceID protocol.DeviceID `xml:"id,attr"`
Name string `xml:"name,attr,omitempty"`
Addresses []string `xml:"address,omitempty"`
Compression bool `xml:"compression,attr"`
@ -104,8 +104,8 @@ type NodeConfiguration struct {
Introducer bool `xml:"introducer,attr"`
}
type RepositoryNodeConfiguration struct {
NodeID protocol.NodeID `xml:"id,attr"`
type FolderDeviceConfiguration struct {
DeviceID protocol.DeviceID `xml:"id,attr"`
Deprecated_Name string `xml:"name,attr,omitempty" json:"-"`
Deprecated_Addresses []string `xml:"address,omitempty" json:"-"`
@ -145,35 +145,35 @@ type GUIConfiguration struct {
APIKey string `xml:"apikey,omitempty"`
}
func (cfg *Configuration) NodeMap() map[protocol.NodeID]NodeConfiguration {
m := make(map[protocol.NodeID]NodeConfiguration, len(cfg.Nodes))
for _, n := range cfg.Nodes {
m[n.NodeID] = n
func (cfg *Configuration) DeviceMap() map[protocol.DeviceID]DeviceConfiguration {
m := make(map[protocol.DeviceID]DeviceConfiguration, len(cfg.Devices))
for _, n := range cfg.Devices {
m[n.DeviceID] = n
}
return m
}
func (cfg *Configuration) GetNodeConfiguration(nodeID protocol.NodeID) *NodeConfiguration {
for i, node := range cfg.Nodes {
if node.NodeID == nodeID {
return &cfg.Nodes[i]
func (cfg *Configuration) GetDeviceConfiguration(deviceID protocol.DeviceID) *DeviceConfiguration {
for i, device := range cfg.Devices {
if device.DeviceID == deviceID {
return &cfg.Devices[i]
}
}
return nil
}
func (cfg *Configuration) GetRepoConfiguration(repoID string) *RepositoryConfiguration {
for i, repo := range cfg.Repositories {
if repo.ID == repoID {
return &cfg.Repositories[i]
func (cfg *Configuration) GetFolderConfiguration(folderID string) *FolderConfiguration {
for i, folder := range cfg.Folders {
if folder.ID == folderID {
return &cfg.Folders[i]
}
}
return nil
}
func (cfg *Configuration) RepoMap() map[string]RepositoryConfiguration {
m := make(map[string]RepositoryConfiguration, len(cfg.Repositories))
for _, r := range cfg.Repositories {
func (cfg *Configuration) FolderMap() map[string]FolderConfiguration {
m := make(map[string]FolderConfiguration, len(cfg.Folders))
for _, r := range cfg.Folders {
m[r.ID] = r
}
return m
@ -290,44 +290,44 @@ func uniqueStrings(ss []string) []string {
return us
}
func (cfg *Configuration) prepare(myID protocol.NodeID) {
func (cfg *Configuration) prepare(myID protocol.DeviceID) {
fillNilSlices(&cfg.Options)
cfg.Options.ListenAddress = uniqueStrings(cfg.Options.ListenAddress)
// Initialize an empty slice for repositories if the config has none
if cfg.Repositories == nil {
cfg.Repositories = []RepositoryConfiguration{}
// Initialize an empty slice for folders if the config has none
if cfg.Folders == nil {
cfg.Folders = []FolderConfiguration{}
}
// Check for missing, bad or duplicate repository ID:s
var seenRepos = map[string]*RepositoryConfiguration{}
// Check for missing, bad or duplicate folder ID:s
var seenFolders = map[string]*FolderConfiguration{}
var uniqueCounter int
for i := range cfg.Repositories {
repo := &cfg.Repositories[i]
for i := range cfg.Folders {
folder := &cfg.Folders[i]
if len(repo.Directory) == 0 {
repo.Invalid = "no directory configured"
if len(folder.Directory) == 0 {
folder.Invalid = "no directory configured"
continue
}
if repo.ID == "" {
repo.ID = "default"
if folder.ID == "" {
folder.ID = "default"
}
if seen, ok := seenRepos[repo.ID]; ok {
l.Warnf("Multiple repositories with ID %q; disabling", repo.ID)
if seen, ok := seenFolders[folder.ID]; ok {
l.Warnf("Multiple folders with ID %q; disabling", folder.ID)
seen.Invalid = "duplicate repository ID"
if seen.ID == repo.ID {
seen.Invalid = "duplicate folder ID"
if seen.ID == folder.ID {
uniqueCounter++
seen.ID = fmt.Sprintf("%s~%d", repo.ID, uniqueCounter)
seen.ID = fmt.Sprintf("%s~%d", folder.ID, uniqueCounter)
}
repo.Invalid = "duplicate repository ID"
folder.Invalid = "duplicate folder ID"
uniqueCounter++
repo.ID = fmt.Sprintf("%s~%d", repo.ID, uniqueCounter)
folder.ID = fmt.Sprintf("%s~%d", folder.ID, uniqueCounter)
} else {
seenRepos[repo.ID] = repo
seenFolders[folder.ID] = folder
}
}
@ -362,42 +362,42 @@ func (cfg *Configuration) prepare(myID protocol.NodeID) {
}
}
// Build a list of available nodes
existingNodes := make(map[protocol.NodeID]bool)
existingNodes[myID] = true
for _, node := range cfg.Nodes {
existingNodes[node.NodeID] = true
// Build a list of available devices
existingDevices := make(map[protocol.DeviceID]bool)
existingDevices[myID] = true
for _, device := range cfg.Devices {
existingDevices[device.DeviceID] = true
}
// Ensure this node is present in all relevant places
me := cfg.GetNodeConfiguration(myID)
// Ensure this device is present in all relevant places
me := cfg.GetDeviceConfiguration(myID)
if me == nil {
myName, _ := os.Hostname()
cfg.Nodes = append(cfg.Nodes, NodeConfiguration{
NodeID: myID,
cfg.Devices = append(cfg.Devices, DeviceConfiguration{
DeviceID: myID,
Name: myName,
})
}
sort.Sort(NodeConfigurationList(cfg.Nodes))
// Ensure that any loose nodes are not present in the wrong places
// Ensure that there are no duplicate nodes
for i := range cfg.Repositories {
cfg.Repositories[i].Nodes = ensureNodePresent(cfg.Repositories[i].Nodes, myID)
cfg.Repositories[i].Nodes = ensureExistingNodes(cfg.Repositories[i].Nodes, existingNodes)
cfg.Repositories[i].Nodes = ensureNoDuplicates(cfg.Repositories[i].Nodes)
sort.Sort(RepositoryNodeConfigurationList(cfg.Repositories[i].Nodes))
sort.Sort(DeviceConfigurationList(cfg.Devices))
// Ensure that any loose devices are not present in the wrong places
// Ensure that there are no duplicate devices
for i := range cfg.Folders {
cfg.Folders[i].Devices = ensureDevicePresent(cfg.Folders[i].Devices, myID)
cfg.Folders[i].Devices = ensureExistingDevices(cfg.Folders[i].Devices, existingDevices)
cfg.Folders[i].Devices = ensureNoDuplicates(cfg.Folders[i].Devices)
sort.Sort(FolderDeviceConfigurationList(cfg.Folders[i].Devices))
}
// An empty address list is equivalent to a single "dynamic" entry
for i := range cfg.Nodes {
n := &cfg.Nodes[i]
for i := range cfg.Devices {
n := &cfg.Devices[i]
if len(n.Addresses) == 0 || len(n.Addresses) == 1 && n.Addresses[0] == "" {
n.Addresses = []string{"dynamic"}
}
}
}
func New(location string, myID protocol.NodeID) Configuration {
func New(location string, myID protocol.DeviceID) Configuration {
var cfg Configuration
cfg.Location = location
@ -411,7 +411,7 @@ func New(location string, myID protocol.NodeID) Configuration {
return cfg
}
func Load(location string, myID protocol.NodeID) (Configuration, error) {
func Load(location string, myID protocol.DeviceID) (Configuration, error) {
var cfg Configuration
cfg.Location = location
@ -435,24 +435,24 @@ func Load(location string, myID protocol.NodeID) (Configuration, error) {
// ChangeRequiresRestart returns true if updating the configuration requires a
// complete restart.
func ChangeRequiresRestart(from, to Configuration) bool {
// Adding, removing or changing repos requires restart
if len(from.Repositories) != len(to.Repositories) {
// Adding, removing or changing folders requires restart
if len(from.Folders) != len(to.Folders) {
return true
}
fromRepos := from.RepoMap()
toRepos := to.RepoMap()
for id := range fromRepos {
if !reflect.DeepEqual(fromRepos[id], toRepos[id]) {
fromFolders := from.FolderMap()
toFolders := to.FolderMap()
for id := range fromFolders {
if !reflect.DeepEqual(fromFolders[id], toFolders[id]) {
return true
}
}
// Removing a node requires a restart. Adding one does not. Changing
// Removing a device requires a restart. Adding one does not. Changing
// address or name does not.
fromNodes := from.NodeMap()
toNodes := to.NodeMap()
for nodeID := range fromNodes {
if _, ok := toNodes[nodeID]; !ok {
fromDevices := from.DeviceMap()
toDevices := to.DeviceMap()
for deviceID := range fromDevices {
if _, ok := toDevices[deviceID]; !ok {
return true
}
}
@ -466,22 +466,22 @@ func ChangeRequiresRestart(from, to Configuration) bool {
}
func convertV3V4(cfg *Configuration) {
// In previous versions, rescan interval was common for each repository.
// In previous versions, rescan interval was common for each folder.
// From now, it can be set independently. We have to make sure, that after upgrade
// the individual rescan interval will be defined for every existing repository.
for i := range cfg.Repositories {
cfg.Repositories[i].RescanIntervalS = cfg.Options.Deprecated_RescanIntervalS
// the individual rescan interval will be defined for every existing folder.
for i := range cfg.Folders {
cfg.Folders[i].RescanIntervalS = cfg.Options.Deprecated_RescanIntervalS
}
cfg.Options.Deprecated_RescanIntervalS = 0
// In previous versions, repositories held full node configurations.
// Since that's the only place where node configs were in V1, we still have
// In previous versions, folders held full device configurations.
// Since that's the only place where device configs were in V1, we still have
// to define the deprecated fields to be able to upgrade from V1 to V4.
for i, repo := range cfg.Repositories {
for i, folder := range cfg.Folders {
for j := range repo.Nodes {
rncfg := cfg.Repositories[i].Nodes[j]
for j := range folder.Devices {
rncfg := cfg.Folders[i].Devices[j]
rncfg.Deprecated_Name = ""
rncfg.Deprecated_Addresses = nil
}
@ -492,10 +492,10 @@ func convertV3V4(cfg *Configuration) {
func convertV2V3(cfg *Configuration) {
// In previous versions, compression was always on. When upgrading, enable
// compression on all existing new. New nodes will get compression on by
// compression on all existing new. New devices will get compression on by
// default by the GUI.
for i := range cfg.Nodes {
cfg.Nodes[i].Compression = true
for i := range cfg.Devices {
cfg.Devices[i].Compression = true
}
// The global discovery format and port number changed in v0.9. Having the
@ -508,31 +508,31 @@ func convertV2V3(cfg *Configuration) {
}
func convertV1V2(cfg *Configuration) {
// Collect the list of nodes.
// Replace node configs inside repositories with only a reference to the nide ID.
// Set all repositories to read only if the global read only flag is set.
var nodes = map[string]RepositoryNodeConfiguration{}
for i, repo := range cfg.Repositories {
cfg.Repositories[i].ReadOnly = cfg.Options.Deprecated_ReadOnly
for j, node := range repo.Nodes {
id := node.NodeID.String()
if _, ok := nodes[id]; !ok {
nodes[id] = node
// Collect the list of devices.
// Replace device configs inside folders with only a reference to the nide ID.
// Set all folders to read only if the global read only flag is set.
var devices = map[string]FolderDeviceConfiguration{}
for i, folder := range cfg.Folders {
cfg.Folders[i].ReadOnly = cfg.Options.Deprecated_ReadOnly
for j, device := range folder.Devices {
id := device.DeviceID.String()
if _, ok := devices[id]; !ok {
devices[id] = device
}
cfg.Repositories[i].Nodes[j] = RepositoryNodeConfiguration{NodeID: node.NodeID}
cfg.Folders[i].Devices[j] = FolderDeviceConfiguration{DeviceID: device.DeviceID}
}
}
cfg.Options.Deprecated_ReadOnly = false
// Set and sort the list of nodes.
for _, node := range nodes {
cfg.Nodes = append(cfg.Nodes, NodeConfiguration{
NodeID: node.NodeID,
Name: node.Deprecated_Name,
Addresses: node.Deprecated_Addresses,
// Set and sort the list of devices.
for _, device := range devices {
cfg.Devices = append(cfg.Devices, DeviceConfiguration{
DeviceID: device.DeviceID,
Name: device.Deprecated_Name,
Addresses: device.Deprecated_Addresses,
})
}
sort.Sort(NodeConfigurationList(cfg.Nodes))
sort.Sort(DeviceConfigurationList(cfg.Devices))
// GUI
cfg.GUI.Address = cfg.Options.Deprecated_GUIAddress
@ -543,73 +543,73 @@ func convertV1V2(cfg *Configuration) {
cfg.Version = 2
}
type NodeConfigurationList []NodeConfiguration
type DeviceConfigurationList []DeviceConfiguration
func (l NodeConfigurationList) Less(a, b int) bool {
return l[a].NodeID.Compare(l[b].NodeID) == -1
func (l DeviceConfigurationList) Less(a, b int) bool {
return l[a].DeviceID.Compare(l[b].DeviceID) == -1
}
func (l NodeConfigurationList) Swap(a, b int) {
func (l DeviceConfigurationList) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func (l NodeConfigurationList) Len() int {
func (l DeviceConfigurationList) Len() int {
return len(l)
}
type RepositoryNodeConfigurationList []RepositoryNodeConfiguration
type FolderDeviceConfigurationList []FolderDeviceConfiguration
func (l RepositoryNodeConfigurationList) Less(a, b int) bool {
return l[a].NodeID.Compare(l[b].NodeID) == -1
func (l FolderDeviceConfigurationList) Less(a, b int) bool {
return l[a].DeviceID.Compare(l[b].DeviceID) == -1
}
func (l RepositoryNodeConfigurationList) Swap(a, b int) {
func (l FolderDeviceConfigurationList) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func (l RepositoryNodeConfigurationList) Len() int {
func (l FolderDeviceConfigurationList) Len() int {
return len(l)
}
func ensureNodePresent(nodes []RepositoryNodeConfiguration, myID protocol.NodeID) []RepositoryNodeConfiguration {
for _, node := range nodes {
if node.NodeID.Equals(myID) {
return nodes
func ensureDevicePresent(devices []FolderDeviceConfiguration, myID protocol.DeviceID) []FolderDeviceConfiguration {
for _, device := range devices {
if device.DeviceID.Equals(myID) {
return devices
}
}
nodes = append(nodes, RepositoryNodeConfiguration{
NodeID: myID,
devices = append(devices, FolderDeviceConfiguration{
DeviceID: myID,
})
return nodes
return devices
}
func ensureExistingNodes(nodes []RepositoryNodeConfiguration, existingNodes map[protocol.NodeID]bool) []RepositoryNodeConfiguration {
count := len(nodes)
func ensureExistingDevices(devices []FolderDeviceConfiguration, existingDevices map[protocol.DeviceID]bool) []FolderDeviceConfiguration {
count := len(devices)
i := 0
loop:
for i < count {
if _, ok := existingNodes[nodes[i].NodeID]; !ok {
nodes[i] = nodes[count-1]
if _, ok := existingDevices[devices[i].DeviceID]; !ok {
devices[i] = devices[count-1]
count--
continue loop
}
i++
}
return nodes[0:count]
return devices[0:count]
}
func ensureNoDuplicates(nodes []RepositoryNodeConfiguration) []RepositoryNodeConfiguration {
count := len(nodes)
func ensureNoDuplicates(devices []FolderDeviceConfiguration) []FolderDeviceConfiguration {
count := len(devices)
i := 0
seenNodes := make(map[protocol.NodeID]bool)
seenDevices := make(map[protocol.DeviceID]bool)
loop:
for i < count {
id := nodes[i].NodeID
if _, ok := seenNodes[id]; ok {
nodes[i] = nodes[count-1]
id := devices[i].DeviceID
if _, ok := seenDevices[id]; ok {
devices[i] = devices[count-1]
count--
continue loop
}
seenNodes[id] = true
seenDevices[id] = true
i++
}
return nodes[0:count]
return devices[0:count]
}

View File

@ -12,13 +12,13 @@ import (
"github.com/syncthing/syncthing/internal/protocol"
)
var node1, node2, node3, node4 protocol.NodeID
var device1, device2, device3, device4 protocol.DeviceID
func init() {
node1, _ = protocol.NodeIDFromString("AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ")
node2, _ = protocol.NodeIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
node3, _ = protocol.NodeIDFromString("LGFPDIT-7SKNNJL-VJZA4FC-7QNCRKA-CE753K7-2BW5QDK-2FOZ7FR-FEP57QJ")
node4, _ = protocol.NodeIDFromString("P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2")
device1, _ = protocol.DeviceIDFromString("AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ")
device2, _ = protocol.DeviceIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
device3, _ = protocol.DeviceIDFromString("LGFPDIT-7SKNNJL-VJZA4FC-7QNCRKA-CE753K7-2BW5QDK-2FOZ7FR-FEP57QJ")
device4, _ = protocol.DeviceIDFromString("P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2")
}
func TestDefaultValues(t *testing.T) {
@ -39,69 +39,69 @@ func TestDefaultValues(t *testing.T) {
RestartOnWakeup: true,
}
cfg := New("test", node1)
cfg := New("test", device1)
if !reflect.DeepEqual(cfg.Options, expected) {
t.Errorf("Default config differs;\n E: %#v\n A: %#v", expected, cfg.Options)
}
}
func TestNodeConfig(t *testing.T) {
func TestDeviceConfig(t *testing.T) {
for i, ver := range []string{"v1", "v2", "v3", "v4"} {
cfg, err := Load("testdata/"+ver+".xml", node1)
cfg, err := Load("testdata/"+ver+".xml", device1)
if err != nil {
t.Error(err)
}
expectedRepos := []RepositoryConfiguration{
expectedFolders := []FolderConfiguration{
{
ID: "test",
Directory: "~/Sync",
Nodes: []RepositoryNodeConfiguration{{NodeID: node1}, {NodeID: node4}},
Devices: []FolderDeviceConfiguration{{DeviceID: device1}, {DeviceID: device4}},
ReadOnly: true,
RescanIntervalS: 600,
},
}
expectedNodes := []NodeConfiguration{
expectedDevices := []DeviceConfiguration{
{
NodeID: node1,
Name: "node one",
DeviceID: device1,
Name: "device one",
Addresses: []string{"a"},
Compression: true,
},
{
NodeID: node4,
Name: "node two",
DeviceID: device4,
Name: "device two",
Addresses: []string{"b"},
Compression: true,
},
}
expectedNodeIDs := []protocol.NodeID{node1, node4}
expectedDeviceIDs := []protocol.DeviceID{device1, device4}
if cfg.Version != 4 {
t.Errorf("%d: Incorrect version %d != 3", i, cfg.Version)
}
if !reflect.DeepEqual(cfg.Repositories, expectedRepos) {
t.Errorf("%d: Incorrect Repositories\n A: %#v\n E: %#v", i, cfg.Repositories, expectedRepos)
if !reflect.DeepEqual(cfg.Folders, expectedFolders) {
t.Errorf("%d: Incorrect Folders\n A: %#v\n E: %#v", i, cfg.Folders, expectedFolders)
}
if !reflect.DeepEqual(cfg.Nodes, expectedNodes) {
t.Errorf("%d: Incorrect Nodes\n A: %#v\n E: %#v", i, cfg.Nodes, expectedNodes)
if !reflect.DeepEqual(cfg.Devices, expectedDevices) {
t.Errorf("%d: Incorrect Devices\n A: %#v\n E: %#v", i, cfg.Devices, expectedDevices)
}
if !reflect.DeepEqual(cfg.Repositories[0].NodeIDs(), expectedNodeIDs) {
t.Errorf("%d: Incorrect NodeIDs\n A: %#v\n E: %#v", i, cfg.Repositories[0].NodeIDs(), expectedNodeIDs)
if !reflect.DeepEqual(cfg.Folders[0].DeviceIDs(), expectedDeviceIDs) {
t.Errorf("%d: Incorrect DeviceIDs\n A: %#v\n E: %#v", i, cfg.Folders[0].DeviceIDs(), expectedDeviceIDs)
}
if len(cfg.NodeMap()) != len(expectedNodes) {
t.Errorf("Unexpected number of NodeMap() entries")
if len(cfg.DeviceMap()) != len(expectedDevices) {
t.Errorf("Unexpected number of DeviceMap() entries")
}
if len(cfg.RepoMap()) != len(expectedRepos) {
t.Errorf("Unexpected number of RepoMap() entries")
if len(cfg.FolderMap()) != len(expectedFolders) {
t.Errorf("Unexpected number of FolderMap() entries")
}
}
}
func TestNoListenAddress(t *testing.T) {
cfg, err := Load("testdata/nolistenaddress.xml", node1)
cfg, err := Load("testdata/nolistenaddress.xml", device1)
if err != nil {
t.Error(err)
}
@ -130,7 +130,7 @@ func TestOverriddenValues(t *testing.T) {
RestartOnWakeup: false,
}
cfg, err := Load("testdata/overridenvalues.xml", node1)
cfg, err := Load("testdata/overridenvalues.xml", device1)
if err != nil {
t.Error(err)
}
@ -140,80 +140,80 @@ func TestOverriddenValues(t *testing.T) {
}
}
func TestNodeAddressesDynamic(t *testing.T) {
func TestDeviceAddressesDynamic(t *testing.T) {
name, _ := os.Hostname()
expected := []NodeConfiguration{
expected := []DeviceConfiguration{
{
NodeID: node1,
DeviceID: device1,
Addresses: []string{"dynamic"},
Compression: true,
},
{
NodeID: node2,
DeviceID: device2,
Addresses: []string{"dynamic"},
Compression: true,
},
{
NodeID: node3,
DeviceID: device3,
Addresses: []string{"dynamic"},
Compression: true,
},
{
NodeID: node4,
DeviceID: device4,
Name: name, // Set when auto created
Addresses: []string{"dynamic"},
},
}
cfg, err := Load("testdata/nodeaddressesdynamic.xml", node4)
cfg, err := Load("testdata/deviceaddressesdynamic.xml", device4)
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(cfg.Nodes, expected) {
t.Errorf("Nodes differ;\n E: %#v\n A: %#v", expected, cfg.Nodes)
if !reflect.DeepEqual(cfg.Devices, expected) {
t.Errorf("Devices differ;\n E: %#v\n A: %#v", expected, cfg.Devices)
}
}
func TestNodeAddressesStatic(t *testing.T) {
func TestDeviceAddressesStatic(t *testing.T) {
name, _ := os.Hostname()
expected := []NodeConfiguration{
expected := []DeviceConfiguration{
{
NodeID: node1,
DeviceID: device1,
Addresses: []string{"192.0.2.1", "192.0.2.2"},
},
{
NodeID: node2,
DeviceID: device2,
Addresses: []string{"192.0.2.3:6070", "[2001:db8::42]:4242"},
},
{
NodeID: node3,
DeviceID: device3,
Addresses: []string{"[2001:db8::44]:4444", "192.0.2.4:6090"},
},
{
NodeID: node4,
DeviceID: device4,
Name: name, // Set when auto created
Addresses: []string{"dynamic"},
},
}
cfg, err := Load("testdata/nodeaddressesstatic.xml", node4)
cfg, err := Load("testdata/deviceaddressesstatic.xml", device4)
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(cfg.Nodes, expected) {
t.Errorf("Nodes differ;\n E: %#v\n A: %#v", expected, cfg.Nodes)
if !reflect.DeepEqual(cfg.Devices, expected) {
t.Errorf("Devices differ;\n E: %#v\n A: %#v", expected, cfg.Devices)
}
}
func TestVersioningConfig(t *testing.T) {
cfg, err := Load("testdata/versioningconfig.xml", node4)
cfg, err := Load("testdata/versioningconfig.xml", device4)
if err != nil {
t.Error(err)
}
vc := cfg.Repositories[0].Versioning
vc := cfg.Folders[0].Versioning
if vc.Type != "simple" {
t.Errorf(`vc.Type %q != "simple"`, vc.Type)
}
@ -239,7 +239,7 @@ func TestNewSaveLoad(t *testing.T) {
return err == nil
}
cfg := New(path, node1)
cfg := New(path, device1)
// To make the equality pass later
cfg.XMLName.Local = "configuration"
@ -256,7 +256,7 @@ func TestNewSaveLoad(t *testing.T) {
t.Error(path, "does not exist")
}
cfg2, err := Load(path, node1)
cfg2, err := Load(path, device1)
if err != nil {
t.Error(err)
}
@ -268,7 +268,7 @@ func TestNewSaveLoad(t *testing.T) {
cfg.GUI.User = "test"
cfg.Save()
cfg2, err = Load(path, node1)
cfg2, err = Load(path, device1)
if err != nil {
t.Error(err)
}
@ -283,13 +283,13 @@ func TestNewSaveLoad(t *testing.T) {
func TestPrepare(t *testing.T) {
var cfg Configuration
if cfg.Repositories != nil || cfg.Nodes != nil || cfg.Options.ListenAddress != nil {
if cfg.Folders != nil || cfg.Devices != nil || cfg.Options.ListenAddress != nil {
t.Error("Expected nil")
}
cfg.prepare(node1)
cfg.prepare(device1)
if cfg.Repositories == nil || cfg.Nodes == nil || cfg.Options.ListenAddress == nil {
if cfg.Folders == nil || cfg.Devices == nil || cfg.Options.ListenAddress == nil {
t.Error("Unexpected nil")
}
}

View File

@ -20,7 +20,7 @@ import (
)
type Discoverer struct {
myID protocol.NodeID
myID protocol.DeviceID
listenAddrs []string
localBcastIntv time.Duration
globalBcastIntv time.Duration
@ -28,7 +28,7 @@ type Discoverer struct {
cacheLifetime time.Duration
broadcastBeacon beacon.Interface
multicastBeacon beacon.Interface
registry map[protocol.NodeID][]cacheEntry
registry map[protocol.DeviceID][]cacheEntry
registryLock sync.RWMutex
extServer string
extPort uint16
@ -49,7 +49,7 @@ var (
ErrIncorrectMagic = errors.New("incorrect magic number")
)
func NewDiscoverer(id protocol.NodeID, addresses []string) *Discoverer {
func NewDiscoverer(id protocol.DeviceID, addresses []string) *Discoverer {
return &Discoverer{
myID: id,
listenAddrs: addresses,
@ -57,7 +57,7 @@ func NewDiscoverer(id protocol.NodeID, addresses []string) *Discoverer {
globalBcastIntv: 1800 * time.Second,
errorRetryIntv: 60 * time.Second,
cacheLifetime: 5 * time.Minute,
registry: make(map[protocol.NodeID][]cacheEntry),
registry: make(map[protocol.DeviceID][]cacheEntry),
}
}
@ -120,9 +120,9 @@ func (d *Discoverer) ExtAnnounceOK() bool {
return d.extAnnounceOK
}
func (d *Discoverer) Lookup(node protocol.NodeID) []string {
func (d *Discoverer) Lookup(device protocol.DeviceID) []string {
d.registryLock.Lock()
cached := d.filterCached(d.registry[node])
cached := d.filterCached(d.registry[device])
d.registryLock.Unlock()
if len(cached) > 0 {
@ -132,7 +132,7 @@ func (d *Discoverer) Lookup(node protocol.NodeID) []string {
}
return addrs
} else if len(d.extServer) != 0 {
addrs := d.externalLookup(node)
addrs := d.externalLookup(device)
cached = make([]cacheEntry, len(addrs))
for i := range addrs {
cached[i] = cacheEntry{
@ -142,32 +142,32 @@ func (d *Discoverer) Lookup(node protocol.NodeID) []string {
}
d.registryLock.Lock()
d.registry[node] = cached
d.registry[device] = cached
d.registryLock.Unlock()
}
return nil
}
func (d *Discoverer) Hint(node string, addrs []string) {
func (d *Discoverer) Hint(device string, addrs []string) {
resAddrs := resolveAddrs(addrs)
var id protocol.NodeID
id.UnmarshalText([]byte(node))
d.registerNode(nil, Node{
var id protocol.DeviceID
id.UnmarshalText([]byte(device))
d.registerDevice(nil, Device{
Addresses: resAddrs,
ID: id[:],
})
}
func (d *Discoverer) All() map[protocol.NodeID][]cacheEntry {
func (d *Discoverer) All() map[protocol.DeviceID][]cacheEntry {
d.registryLock.RLock()
nodes := make(map[protocol.NodeID][]cacheEntry, len(d.registry))
for node, addrs := range d.registry {
devices := make(map[protocol.DeviceID][]cacheEntry, len(d.registry))
for device, addrs := range d.registry {
addrsCopy := make([]cacheEntry, len(addrs))
copy(addrsCopy, addrs)
nodes[node] = addrsCopy
devices[device] = addrsCopy
}
d.registryLock.RUnlock()
return nodes
return devices
}
func (d *Discoverer) announcementPkt() []byte {
@ -190,7 +190,7 @@ func (d *Discoverer) announcementPkt() []byte {
}
var pkt = Announce{
Magic: AnnouncementMagic,
This: Node{d.myID[:], addrs},
This: Device{d.myID[:], addrs},
}
return pkt.MarshalXDR()
}
@ -200,7 +200,7 @@ func (d *Discoverer) sendLocalAnnouncements() {
var pkt = Announce{
Magic: AnnouncementMagic,
This: Node{d.myID[:], addrs},
This: Device{d.myID[:], addrs},
}
msg := pkt.MarshalXDR()
@ -240,7 +240,7 @@ func (d *Discoverer) sendExternalAnnouncements() {
if d.extPort != 0 {
var pkt = Announce{
Magic: AnnouncementMagic,
This: Node{d.myID[:], []Address{{Port: d.extPort}}},
This: Device{d.myID[:], []Address{{Port: d.extPort}}},
}
buf = pkt.MarshalXDR()
} else {
@ -264,7 +264,7 @@ func (d *Discoverer) sendExternalAnnouncements() {
}
ok = false
} else {
// Verify that the announce server responds positively for our node ID
// Verify that the announce server responds positively for our device ID
time.Sleep(1 * time.Second)
res := d.externalLookup(d.myID)
@ -321,12 +321,12 @@ func (d *Discoverer) recvAnnouncements(b beacon.Interface) {
continue
}
var newNode bool
var newDevice bool
if bytes.Compare(pkt.This.ID, d.myID[:]) != 0 {
newNode = d.registerNode(addr, pkt.This)
newDevice = d.registerDevice(addr, pkt.This)
}
if newNode {
if newDevice {
select {
case d.forcedBcastTick <- time.Now():
}
@ -334,9 +334,9 @@ func (d *Discoverer) recvAnnouncements(b beacon.Interface) {
}
}
func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
var id protocol.NodeID
copy(id[:], node.ID)
func (d *Discoverer) registerDevice(addr net.Addr, device Device) bool {
var id protocol.DeviceID
copy(id[:], device.ID)
d.registryLock.RLock()
current := d.filterCached(d.registry[id])
@ -344,23 +344,23 @@ func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
orig := current
for _, a := range node.Addresses {
var nodeAddr string
for _, a := range device.Addresses {
var deviceAddr string
if len(a.IP) > 0 {
nodeAddr = net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
deviceAddr = net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
} else if addr != nil {
ua := addr.(*net.UDPAddr)
ua.Port = int(a.Port)
nodeAddr = ua.String()
deviceAddr = ua.String()
}
for i := range current {
if current[i].addr == nodeAddr {
if current[i].addr == deviceAddr {
current[i].seen = time.Now()
goto done
}
}
current = append(current, cacheEntry{
addr: nodeAddr,
addr: deviceAddr,
seen: time.Now(),
})
done:
@ -379,8 +379,8 @@ func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
for i := range current {
addrs[i] = current[i].addr
}
events.Default.Log(events.NodeDiscovered, map[string]interface{}{
"node": id.String(),
events.Default.Log(events.DeviceDiscovered, map[string]interface{}{
"device": id.String(),
"addrs": addrs,
})
}
@ -388,7 +388,7 @@ func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
return len(current) > len(orig)
}
func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
func (d *Discoverer) externalLookup(device protocol.DeviceID) []string {
extIP, err := net.ResolveUDPAddr("udp", d.extServer)
if err != nil {
if debug {
@ -414,7 +414,7 @@ func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
return nil
}
buf := Query{QueryMagic, node[:]}.MarshalXDR()
buf := Query{QueryMagic, device[:]}.MarshalXDR()
_, err = conn.Write(buf)
if err != nil {
if debug {
@ -427,7 +427,7 @@ func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
n, err := conn.Read(buf)
if err != nil {
if err, ok := err.(net.Error); ok && err.Timeout() {
// Expected if the server doesn't know about requested node ID
// Expected if the server doesn't know about requested device ID
return nil
}
if debug {
@ -451,8 +451,8 @@ func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
var addrs []string
for _, a := range pkt.This.Addresses {
nodeAddr := net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
addrs = append(addrs, nodeAddr)
deviceAddr := net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
addrs = append(addrs, deviceAddr)
}
return addrs
}

View File

@ -2,5 +2,5 @@
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Package discover implements the node discovery protocol.
// Package discover implements the device discovery protocol.
package discover

View File

@ -11,16 +11,16 @@ const (
type Query struct {
Magic uint32
NodeID []byte // max:32
DeviceID []byte // max:32
}
type Announce struct {
Magic uint32
This Node
Extra []Node // max:16
This Device
Extra []Device // max:16
}
type Node struct {
type Device struct {
ID []byte // max:32
Addresses []Address // max:16
}

View File

@ -20,17 +20,17 @@ Query Structure:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Magic |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Node ID |
| Length of Device ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Node ID (variable length) \
\ Device ID (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct Query {
unsigned int Magic;
opaque NodeID<32>;
opaque DeviceID<32>;
}
*/
@ -53,10 +53,10 @@ func (o Query) AppendXDR(bs []byte) []byte {
func (o Query) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteUint32(o.Magic)
if len(o.NodeID) > 32 {
if len(o.DeviceID) > 32 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteBytes(o.NodeID)
xw.WriteBytes(o.DeviceID)
return xw.Tot(), xw.Error()
}
@ -73,7 +73,7 @@ func (o *Query) UnmarshalXDR(bs []byte) error {
func (o *Query) decodeXDR(xr *xdr.Reader) error {
o.Magic = xr.ReadUint32()
o.NodeID = xr.ReadBytesMax(32)
o.DeviceID = xr.ReadBytesMax(32)
return xr.Error()
}
@ -86,20 +86,20 @@ Announce Structure:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Magic |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Node |
| Device |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Extra |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Zero or more Node Structures \
\ Zero or more Device Structures \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct Announce {
unsigned int Magic;
Node This;
Node Extra<16>;
Device This;
Device Extra<16>;
}
*/
@ -157,7 +157,7 @@ func (o *Announce) decodeXDR(xr *xdr.Reader) error {
if _ExtraSize > 16 {
return xdr.ErrElementSizeExceeded
}
o.Extra = make([]Node, _ExtraSize)
o.Extra = make([]Device, _ExtraSize)
for i := range o.Extra {
(&o.Extra[i]).decodeXDR(xr)
}
@ -166,7 +166,7 @@ func (o *Announce) decodeXDR(xr *xdr.Reader) error {
/*
Node Structure:
Device Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@ -185,30 +185,30 @@ Node Structure:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct Node {
struct Device {
opaque ID<32>;
Address Addresses<16>;
}
*/
func (o Node) EncodeXDR(w io.Writer) (int, error) {
func (o Device) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}
func (o Node) MarshalXDR() []byte {
func (o Device) MarshalXDR() []byte {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o Node) AppendXDR(bs []byte) []byte {
func (o Device) AppendXDR(bs []byte) []byte {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
o.encodeXDR(xw)
return []byte(aw)
}
func (o Node) encodeXDR(xw *xdr.Writer) (int, error) {
func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
if len(o.ID) > 32 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
@ -226,18 +226,18 @@ func (o Node) encodeXDR(xw *xdr.Writer) (int, error) {
return xw.Tot(), xw.Error()
}
func (o *Node) DecodeXDR(r io.Reader) error {
func (o *Device) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}
func (o *Node) UnmarshalXDR(bs []byte) error {
func (o *Device) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.decodeXDR(xr)
}
func (o *Node) decodeXDR(xr *xdr.Reader) error {
func (o *Device) decodeXDR(xr *xdr.Reader) error {
o.ID = xr.ReadBytesMax(32)
_AddressesSize := int(xr.ReadUint32())
if _AddressesSize > 16 {

View File

@ -17,15 +17,15 @@ const (
Ping = 1 << iota
Starting
StartupComplete
NodeDiscovered
NodeConnected
NodeDisconnected
NodeRejected
DeviceDiscovered
DeviceConnected
DeviceDisconnected
DeviceRejected
LocalIndexUpdated
RemoteIndexUpdated
ItemStarted
StateChanged
RepoRejected
FolderRejected
ConfigSaved
AllEvents = ^EventType(0)
@ -39,14 +39,14 @@ func (t EventType) String() string {
return "Starting"
case StartupComplete:
return "StartupComplete"
case NodeDiscovered:
return "NodeDiscovered"
case NodeConnected:
return "NodeConnected"
case NodeDisconnected:
return "NodeDisconnected"
case NodeRejected:
return "NodeRejected"
case DeviceDiscovered:
return "DeviceDiscovered"
case DeviceConnected:
return "DeviceConnected"
case DeviceDisconnected:
return "DeviceDisconnected"
case DeviceRejected:
return "DeviceRejected"
case LocalIndexUpdated:
return "LocalIndexUpdated"
case RemoteIndexUpdated:
@ -55,8 +55,8 @@ func (t EventType) String() string {
return "ItemStarted"
case StateChanged:
return "StateChanged"
case RepoRejected:
return "RepoRejected"
case FolderRejected:
return "FolderRejected"
case ConfigSaved:
return "ConfigSaved"
default:

View File

@ -41,7 +41,7 @@ func TestTimeout(t *testing.T) {
func TestEventBeforeSubscribe(t *testing.T) {
l := events.NewLogger()
l.Log(events.NodeConnected, "foo")
l.Log(events.DeviceConnected, "foo")
s := l.Subscribe(0)
_, err := s.Poll(timeout)
@ -54,14 +54,14 @@ func TestEventAfterSubscribe(t *testing.T) {
l := events.NewLogger()
s := l.Subscribe(events.AllEvents)
l.Log(events.NodeConnected, "foo")
l.Log(events.DeviceConnected, "foo")
ev, err := s.Poll(timeout)
if err != nil {
t.Fatal("Unexpected error:", err)
}
if ev.Type != events.NodeConnected {
if ev.Type != events.DeviceConnected {
t.Error("Incorrect event type", ev.Type)
}
switch v := ev.Data.(type) {
@ -77,8 +77,8 @@ func TestEventAfterSubscribe(t *testing.T) {
func TestEventAfterSubscribeIgnoreMask(t *testing.T) {
l := events.NewLogger()
s := l.Subscribe(events.NodeDisconnected)
l.Log(events.NodeConnected, "foo")
s := l.Subscribe(events.DeviceDisconnected)
l.Log(events.DeviceConnected, "foo")
_, err := s.Poll(timeout)
if err != events.ErrTimeout {
@ -93,7 +93,7 @@ func TestBufferOverflow(t *testing.T) {
t0 := time.Now()
for i := 0; i < events.BufferSize*2; i++ {
l.Log(events.NodeConnected, "foo")
l.Log(events.DeviceConnected, "foo")
}
if time.Since(t0) > timeout {
t.Fatalf("Logging took too long")
@ -104,7 +104,7 @@ func TestUnsubscribe(t *testing.T) {
l := events.NewLogger()
s := l.Subscribe(events.AllEvents)
l.Log(events.NodeConnected, "foo")
l.Log(events.DeviceConnected, "foo")
_, err := s.Poll(timeout)
if err != nil {
@ -112,7 +112,7 @@ func TestUnsubscribe(t *testing.T) {
}
l.Unsubscribe(s)
l.Log(events.NodeConnected, "foo")
l.Log(events.DeviceConnected, "foo")
_, err = s.Poll(timeout)
if err != events.ErrClosed {
@ -124,8 +124,8 @@ func TestIDs(t *testing.T) {
l := events.NewLogger()
s := l.Subscribe(events.AllEvents)
l.Log(events.NodeConnected, "foo")
l.Log(events.NodeConnected, "bar")
l.Log(events.DeviceConnected, "foo")
l.Log(events.DeviceConnected, "bar")
ev, err := s.Poll(timeout)
if err != nil {
@ -156,7 +156,7 @@ func TestBufferedSub(t *testing.T) {
go func() {
for i := 0; i < 10*events.BufferSize; i++ {
l.Log(events.NodeConnected, fmt.Sprintf("event-%d", i))
l.Log(events.DeviceConnected, fmt.Sprintf("event-%d", i))
if i%30 == 0 {
// Give the buffer routine time to pick up the events
time.Sleep(20 * time.Millisecond)

View File

@ -35,13 +35,13 @@ func clock(v uint64) uint64 {
}
const (
keyTypeNode = iota
keyTypeDevice = iota
keyTypeGlobal
)
type fileVersion struct {
version uint64
node []byte
device []byte
}
type versionList struct {
@ -73,47 +73,47 @@ type dbWriter interface {
/*
keyTypeNode (1 byte)
repository (64 bytes)
node (32 bytes)
keyTypeDevice (1 byte)
folder (64 bytes)
device (32 bytes)
name (variable size)
|
scanner.File
keyTypeGlobal (1 byte)
repository (64 bytes)
folder (64 bytes)
name (variable size)
|
[]fileVersion (sorted)
*/
func nodeKey(repo, node, file []byte) []byte {
func deviceKey(folder, device, file []byte) []byte {
k := make([]byte, 1+64+32+len(file))
k[0] = keyTypeNode
copy(k[1:], []byte(repo))
copy(k[1+64:], node[:])
k[0] = keyTypeDevice
copy(k[1:], []byte(folder))
copy(k[1+64:], device[:])
copy(k[1+64+32:], []byte(file))
return k
}
func globalKey(repo, file []byte) []byte {
func globalKey(folder, file []byte) []byte {
k := make([]byte, 1+64+len(file))
k[0] = keyTypeGlobal
copy(k[1:], []byte(repo))
copy(k[1:], []byte(folder))
copy(k[1+64:], []byte(file))
return k
}
func nodeKeyName(key []byte) []byte {
func deviceKeyName(key []byte) []byte {
return key[1+64+32:]
}
func nodeKeyRepo(key []byte) []byte {
repo := key[1 : 1+64]
izero := bytes.IndexByte(repo, 0)
return repo[:izero]
func deviceKeyFolder(key []byte) []byte {
folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0)
return folder[:izero]
}
func nodeKeyNode(key []byte) []byte {
func deviceKeyDevice(key []byte) []byte {
return key[1+64 : 1+64+32]
}
@ -121,23 +121,23 @@ func globalKeyName(key []byte) []byte {
return key[1+64:]
}
func globalKeyRepo(key []byte) []byte {
repo := key[1 : 1+64]
izero := bytes.IndexByte(repo, 0)
return repo[:izero]
func globalKeyFolder(key []byte) []byte {
folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0)
return folder[:izero]
}
type deletionHandler func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64
type deletionHandler func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) uint64
type fileIterator func(f protocol.FileIntf) bool
func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo, deleteFn deletionHandler) uint64 {
func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo, deleteFn deletionHandler) uint64 {
runtime.GC()
sort.Sort(fileList(fs)) // sort list on name, same as on disk
start := nodeKey(repo, node, nil) // before all repo/node files
limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
start := deviceKey(folder, device, nil) // before all folder/device files
limit := deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
batch := new(leveldb.Batch)
snap, err := db.GetSnapshot()
@ -171,42 +171,42 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
}
if moreDb {
oldName = nodeKeyName(dbi.Key())
oldName = deviceKeyName(dbi.Key())
}
cmp := bytes.Compare(newName, oldName)
if debug {
l.Debugf("generic replace; repo=%q node=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", repo, protocol.NodeIDFromBytes(node), moreFs, moreDb, cmp, newName, oldName)
l.Debugf("generic replace; folder=%q device=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", folder, protocol.DeviceIDFromBytes(device), moreFs, moreDb, cmp, newName, oldName)
}
switch {
case moreFs && (!moreDb || cmp == -1):
// Disk is missing this file. Insert it.
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
if lv := ldbInsert(batch, folder, device, newName, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv
}
if fs[fsi].IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, newName)
ldbRemoveFromGlobal(snap, batch, folder, device, newName)
} else {
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
ldbUpdateGlobal(snap, batch, folder, device, newName, fs[fsi].Version)
}
fsi++
case moreFs && moreDb && cmp == 0:
// File exists on both sides - compare versions. We might get an
// update with the same version and different flags if a node has
// update with the same version and different flags if a device has
// marked a file as invalid, so handle that too.
var ef protocol.FileInfoTruncated
ef.UnmarshalXDR(dbi.Value())
if fs[fsi].Version > ef.Version || fs[fsi].Version != ef.Version {
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
if lv := ldbInsert(batch, folder, device, newName, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv
}
if fs[fsi].IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, newName)
ldbRemoveFromGlobal(snap, batch, folder, device, newName)
} else {
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
ldbUpdateGlobal(snap, batch, folder, device, newName, fs[fsi].Version)
}
}
// Iterate both sides.
@ -215,7 +215,7 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
case moreDb && (!moreFs || cmp == 1):
if deleteFn != nil {
if lv := deleteFn(snap, batch, repo, node, oldName, dbi); lv > maxLocalVer {
if lv := deleteFn(snap, batch, folder, device, oldName, dbi); lv > maxLocalVer {
maxLocalVer = lv
}
}
@ -231,21 +231,21 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
return maxLocalVer
}
func ldbReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 {
func ldbReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) uint64 {
// TODO: Return the remaining maxLocalVer?
return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64 {
return ldbGenericReplace(db, folder, device, fs, func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) uint64 {
// Disk has files that we are missing. Remove it.
if debug {
l.Debugf("delete; repo=%q node=%v name=%q", repo, protocol.NodeIDFromBytes(node), name)
l.Debugf("delete; folder=%q device=%v name=%q", folder, protocol.DeviceIDFromBytes(device), name)
}
ldbRemoveFromGlobal(db, batch, repo, node, name)
ldbRemoveFromGlobal(db, batch, folder, device, name)
batch.Delete(dbi.Key())
return 0
})
}
func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 {
return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64 {
func ldbReplaceWithDelete(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) uint64 {
return ldbGenericReplace(db, folder, device, fs, func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) uint64 {
var tf protocol.FileInfoTruncated
err := tf.UnmarshalXDR(dbi.Value())
if err != nil {
@ -253,7 +253,7 @@ func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileI
}
if !tf.IsDeleted() {
if debug {
l.Debugf("mark deleted; repo=%q node=%v name=%q", repo, protocol.NodeIDFromBytes(node), name)
l.Debugf("mark deleted; folder=%q device=%v name=%q", folder, protocol.DeviceIDFromBytes(device), name)
}
ts := clock(tf.LocalVersion)
f := protocol.FileInfo{
@ -264,14 +264,14 @@ func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileI
Modified: tf.Modified,
}
batch.Put(dbi.Key(), f.MarshalXDR())
ldbUpdateGlobal(db, batch, repo, node, nodeKeyName(dbi.Key()), f.Version)
ldbUpdateGlobal(db, batch, folder, device, deviceKeyName(dbi.Key()), f.Version)
return ts
}
return 0
})
}
func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 {
func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) uint64 {
runtime.GC()
batch := new(leveldb.Batch)
@ -284,16 +284,16 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
var maxLocalVer uint64
for _, f := range fs {
name := []byte(f.Name)
fk := nodeKey(repo, node, name)
fk := deviceKey(folder, device, name)
bs, err := snap.Get(fk, nil)
if err == leveldb.ErrNotFound {
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
if lv := ldbInsert(batch, folder, device, name, f); lv > maxLocalVer {
maxLocalVer = lv
}
if f.IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, name)
ldbRemoveFromGlobal(snap, batch, folder, device, name)
} else {
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
ldbUpdateGlobal(snap, batch, folder, device, name, f.Version)
}
continue
}
@ -306,13 +306,13 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
// Flags might change without the version being bumped when we set the
// invalid flag on an existing file.
if ef.Version != f.Version || ef.Flags != f.Flags {
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
if lv := ldbInsert(batch, folder, device, name, f); lv > maxLocalVer {
maxLocalVer = lv
}
if f.IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, name)
ldbRemoveFromGlobal(snap, batch, folder, device, name)
} else {
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
ldbUpdateGlobal(snap, batch, folder, device, name, f.Version)
}
}
}
@ -325,29 +325,29 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
return maxLocalVer
}
func ldbInsert(batch dbWriter, repo, node, name []byte, file protocol.FileInfo) uint64 {
func ldbInsert(batch dbWriter, folder, device, name []byte, file protocol.FileInfo) uint64 {
if debug {
l.Debugf("insert; repo=%q node=%v %v", repo, protocol.NodeIDFromBytes(node), file)
l.Debugf("insert; folder=%q device=%v %v", folder, protocol.DeviceIDFromBytes(device), file)
}
if file.LocalVersion == 0 {
file.LocalVersion = clock(0)
}
nk := nodeKey(repo, node, name)
nk := deviceKey(folder, device, name)
batch.Put(nk, file.MarshalXDR())
return file.LocalVersion
}
// ldbUpdateGlobal adds this node+version to the version list for the given
// file. If the node is already present in the list, the version is updated.
// ldbUpdateGlobal adds this device+version to the version list for the given
// file. If the device is already present in the list, the version is updated.
// If the file does not have an entry in the global list, it is created.
func ldbUpdateGlobal(db dbReader, batch dbWriter, repo, node, file []byte, version uint64) bool {
func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device, file []byte, version uint64) bool {
if debug {
l.Debugf("update global; repo=%q node=%v file=%q version=%d", repo, protocol.NodeIDFromBytes(node), file, version)
l.Debugf("update global; folder=%q device=%v file=%q version=%d", folder, protocol.DeviceIDFromBytes(device), file, version)
}
gk := globalKey(repo, file)
gk := globalKey(folder, file)
svl, err := db.Get(gk, nil)
if err != nil && err != leveldb.ErrNotFound {
panic(err)
@ -355,7 +355,7 @@ func ldbUpdateGlobal(db dbReader, batch dbWriter, repo, node, file []byte, versi
var fl versionList
nv := fileVersion{
node: node,
device: device,
version: version,
}
if svl != nil {
@ -365,7 +365,7 @@ func ldbUpdateGlobal(db dbReader, batch dbWriter, repo, node, file []byte, versi
}
for i := range fl.versions {
if bytes.Compare(fl.versions[i].node, node) == 0 {
if bytes.Compare(fl.versions[i].device, device) == 0 {
if fl.versions[i].version == version {
// No need to do anything
return false
@ -394,15 +394,15 @@ done:
return true
}
// ldbRemoveFromGlobal removes the node from the global version list for the
// ldbRemoveFromGlobal removes the device from the global version list for the
// given file. If the version list is empty after this, the file entry is
// removed entirely.
func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
func ldbRemoveFromGlobal(db dbReader, batch dbWriter, folder, device, file []byte) {
if debug {
l.Debugf("remove from global; repo=%q node=%v file=%q", repo, protocol.NodeIDFromBytes(node), file)
l.Debugf("remove from global; folder=%q device=%v file=%q", folder, protocol.DeviceIDFromBytes(device), file)
}
gk := globalKey(repo, file)
gk := globalKey(folder, file)
svl, err := db.Get(gk, nil)
if err != nil {
// We might be called to "remove" a global version that doesn't exist
@ -417,7 +417,7 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
}
for i := range fl.versions {
if bytes.Compare(fl.versions[i].node, node) == 0 {
if bytes.Compare(fl.versions[i].device, device) == 0 {
fl.versions = append(fl.versions[:i], fl.versions[i+1:]...)
break
}
@ -430,9 +430,9 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
}
}
func ldbWithHave(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterator) {
start := nodeKey(repo, node, nil) // before all repo/node files
limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
func ldbWithHave(db *leveldb.DB, folder, device []byte, truncate bool, fn fileIterator) {
start := deviceKey(folder, device, nil) // before all folder/device files
limit := deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
@ -452,11 +452,11 @@ func ldbWithHave(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterat
}
}
func ldbWithAllRepoTruncated(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol.FileInfoTruncated) bool) {
func ldbWithAllFolderTruncated(db *leveldb.DB, folder []byte, fn func(device []byte, f protocol.FileInfoTruncated) bool) {
runtime.GC()
start := nodeKey(repo, nil, nil) // before all repo/node files
limit := nodeKey(repo, protocol.LocalNodeID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
start := deviceKey(folder, nil, nil) // before all folder/device files
limit := deviceKey(folder, protocol.LocalDeviceID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
@ -466,20 +466,20 @@ func ldbWithAllRepoTruncated(db *leveldb.DB, repo []byte, fn func(node []byte, f
defer dbi.Release()
for dbi.Next() {
node := nodeKeyNode(dbi.Key())
device := deviceKeyDevice(dbi.Key())
var f protocol.FileInfoTruncated
err := f.UnmarshalXDR(dbi.Value())
if err != nil {
panic(err)
}
if cont := fn(node, f); !cont {
if cont := fn(device, f); !cont {
return
}
}
}
func ldbGet(db *leveldb.DB, repo, node, file []byte) protocol.FileInfo {
nk := nodeKey(repo, node, file)
func ldbGet(db *leveldb.DB, folder, device, file []byte) protocol.FileInfo {
nk := deviceKey(folder, device, file)
bs, err := db.Get(nk, nil)
if err == leveldb.ErrNotFound {
return protocol.FileInfo{}
@ -496,8 +496,8 @@ func ldbGet(db *leveldb.DB, repo, node, file []byte) protocol.FileInfo {
return f
}
func ldbGetGlobal(db *leveldb.DB, repo, file []byte) protocol.FileInfo {
k := globalKey(repo, file)
func ldbGetGlobal(db *leveldb.DB, folder, file []byte) protocol.FileInfo {
k := globalKey(folder, file)
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
@ -522,7 +522,7 @@ func ldbGetGlobal(db *leveldb.DB, repo, file []byte) protocol.FileInfo {
panic("no versions?")
}
k = nodeKey(repo, vl.versions[0].node, file)
k = deviceKey(folder, vl.versions[0].device, file)
bs, err = snap.Get(k, nil)
if err != nil {
panic(err)
@ -536,11 +536,11 @@ func ldbGetGlobal(db *leveldb.DB, repo, file []byte) protocol.FileInfo {
return f
}
func ldbWithGlobal(db *leveldb.DB, repo []byte, truncate bool, fn fileIterator) {
func ldbWithGlobal(db *leveldb.DB, folder []byte, truncate bool, fn fileIterator) {
runtime.GC()
start := globalKey(repo, nil)
limit := globalKey(repo, []byte{0xff, 0xff, 0xff, 0xff})
start := globalKey(folder, nil)
limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
@ -559,7 +559,7 @@ func ldbWithGlobal(db *leveldb.DB, repo []byte, truncate bool, fn fileIterator)
l.Debugln(dbi.Key())
panic("no versions?")
}
fk := nodeKey(repo, vl.versions[0].node, globalKeyName(dbi.Key()))
fk := deviceKey(folder, vl.versions[0].device, globalKeyName(dbi.Key()))
bs, err := snap.Get(fk, nil)
if err != nil {
panic(err)
@ -576,8 +576,8 @@ func ldbWithGlobal(db *leveldb.DB, repo []byte, truncate bool, fn fileIterator)
}
}
func ldbAvailability(db *leveldb.DB, repo, file []byte) []protocol.NodeID {
k := globalKey(repo, file)
func ldbAvailability(db *leveldb.DB, folder, file []byte) []protocol.DeviceID {
k := globalKey(folder, file)
bs, err := db.Get(k, nil)
if err == leveldb.ErrNotFound {
return nil
@ -592,23 +592,23 @@ func ldbAvailability(db *leveldb.DB, repo, file []byte) []protocol.NodeID {
panic(err)
}
var nodes []protocol.NodeID
var devices []protocol.DeviceID
for _, v := range vl.versions {
if v.version != vl.versions[0].version {
break
}
n := protocol.NodeIDFromBytes(v.node)
nodes = append(nodes, n)
n := protocol.DeviceIDFromBytes(v.device)
devices = append(devices, n)
}
return nodes
return devices
}
func ldbWithNeed(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterator) {
func ldbWithNeed(db *leveldb.DB, folder, device []byte, truncate bool, fn fileIterator) {
runtime.GC()
start := globalKey(repo, nil)
limit := globalKey(repo, []byte{0xff, 0xff, 0xff, 0xff})
start := globalKey(folder, nil)
limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
@ -633,7 +633,7 @@ outer:
need := false // If we have a lower version of the file
var haveVersion uint64
for _, v := range vl.versions {
if bytes.Compare(v.node, node) == 0 {
if bytes.Compare(v.device, device) == 0 {
have = true
haveVersion = v.version
need = v.version < vl.versions[0].version
@ -650,7 +650,7 @@ outer:
// We haven't found a valid copy of the file with the needed version.
continue outer
}
fk := nodeKey(repo, vl.versions[i].node, name)
fk := deviceKey(folder, vl.versions[i].device, name)
bs, err := snap.Get(fk, nil)
if err != nil {
panic(err)
@ -672,7 +672,7 @@ outer:
}
if debug {
l.Debugf("need repo=%q node=%v name=%q need=%v have=%v haveV=%d globalV=%d", repo, protocol.NodeIDFromBytes(node), name, need, have, haveVersion, vl.versions[0].version)
l.Debugf("need folder=%q device=%v name=%q need=%v have=%v haveV=%d globalV=%d", folder, protocol.DeviceIDFromBytes(device), name, need, have, haveVersion, vl.versions[0].version)
}
if cont := fn(gf); !cont {
@ -686,7 +686,7 @@ outer:
}
}
func ldbListRepos(db *leveldb.DB) []string {
func ldbListFolders(db *leveldb.DB) []string {
runtime.GC()
start := []byte{keyTypeGlobal}
@ -699,24 +699,24 @@ func ldbListRepos(db *leveldb.DB) []string {
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
defer dbi.Release()
repoExists := make(map[string]bool)
folderExists := make(map[string]bool)
for dbi.Next() {
repo := string(globalKeyRepo(dbi.Key()))
if !repoExists[repo] {
repoExists[repo] = true
folder := string(globalKeyFolder(dbi.Key()))
if !folderExists[folder] {
folderExists[folder] = true
}
}
repos := make([]string, 0, len(repoExists))
for k := range repoExists {
repos = append(repos, k)
folders := make([]string, 0, len(folderExists))
for k := range folderExists {
folders = append(folders, k)
}
sort.Strings(repos)
return repos
sort.Strings(folders)
return folders
}
func ldbDropRepo(db *leveldb.DB, repo []byte) {
func ldbDropFolder(db *leveldb.DB, folder []byte) {
runtime.GC()
snap, err := db.GetSnapshot()
@ -725,25 +725,25 @@ func ldbDropRepo(db *leveldb.DB, repo []byte) {
}
defer snap.Release()
// Remove all items related to the given repo from the node->file bucket
start := []byte{keyTypeNode}
limit := []byte{keyTypeNode + 1}
// Remove all items related to the given folder from the device->file bucket
start := []byte{keyTypeDevice}
limit := []byte{keyTypeDevice + 1}
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
for dbi.Next() {
itemRepo := nodeKeyRepo(dbi.Key())
if bytes.Compare(repo, itemRepo) == 0 {
itemFolder := deviceKeyFolder(dbi.Key())
if bytes.Compare(folder, itemFolder) == 0 {
db.Delete(dbi.Key(), nil)
}
}
dbi.Release()
// Remove all items related to the given repo from the global bucket
// Remove all items related to the given folder from the global bucket
start = []byte{keyTypeGlobal}
limit = []byte{keyTypeGlobal + 1}
dbi = snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
for dbi.Next() {
itemRepo := globalKeyRepo(dbi.Key())
if bytes.Compare(repo, itemRepo) == 0 {
itemFolder := globalKeyFolder(dbi.Key())
if bytes.Compare(folder, itemFolder) == 0 {
db.Delete(dbi.Key(), nil)
}
}

View File

@ -22,17 +22,17 @@ fileVersion Structure:
+ version (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of node |
| Length of device |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ node (variable length) \
\ device (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct fileVersion {
unsigned hyper version;
opaque node<>;
opaque device<>;
}
*/
@ -55,7 +55,7 @@ func (o fileVersion) AppendXDR(bs []byte) []byte {
func (o fileVersion) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteUint64(o.version)
xw.WriteBytes(o.node)
xw.WriteBytes(o.device)
return xw.Tot(), xw.Error()
}
@ -72,7 +72,7 @@ func (o *fileVersion) UnmarshalXDR(bs []byte) error {
func (o *fileVersion) decodeXDR(xr *xdr.Reader) error {
o.version = xr.ReadUint64()
o.node = xr.ReadBytes()
o.device = xr.ReadBytes()
return xr.Error()
}

View File

@ -27,147 +27,147 @@ type fileRecord struct {
type bitset uint64
type Set struct {
localVersion map[protocol.NodeID]uint64
localVersion map[protocol.DeviceID]uint64
mutex sync.Mutex
repo string
folder string
db *leveldb.DB
}
func NewSet(repo string, db *leveldb.DB) *Set {
func NewSet(folder string, db *leveldb.DB) *Set {
var s = Set{
localVersion: make(map[protocol.NodeID]uint64),
repo: repo,
localVersion: make(map[protocol.DeviceID]uint64),
folder: folder,
db: db,
}
var nodeID protocol.NodeID
ldbWithAllRepoTruncated(db, []byte(repo), func(node []byte, f protocol.FileInfoTruncated) bool {
copy(nodeID[:], node)
if f.LocalVersion > s.localVersion[nodeID] {
s.localVersion[nodeID] = f.LocalVersion
var deviceID protocol.DeviceID
ldbWithAllFolderTruncated(db, []byte(folder), func(device []byte, f protocol.FileInfoTruncated) bool {
copy(deviceID[:], device)
if f.LocalVersion > s.localVersion[deviceID] {
s.localVersion[deviceID] = f.LocalVersion
}
lamport.Default.Tick(f.Version)
return true
})
if debug {
l.Debugf("loaded localVersion for %q: %#v", repo, s.localVersion)
l.Debugf("loaded localVersion for %q: %#v", folder, s.localVersion)
}
clock(s.localVersion[protocol.LocalNodeID])
clock(s.localVersion[protocol.LocalDeviceID])
return &s
}
func (s *Set) Replace(node protocol.NodeID, fs []protocol.FileInfo) {
func (s *Set) Replace(device protocol.DeviceID, fs []protocol.FileInfo) {
if debug {
l.Debugf("%s Replace(%v, [%d])", s.repo, node, len(fs))
l.Debugf("%s Replace(%v, [%d])", s.folder, device, len(fs))
}
normalizeFilenames(fs)
s.mutex.Lock()
defer s.mutex.Unlock()
s.localVersion[node] = ldbReplace(s.db, []byte(s.repo), node[:], fs)
s.localVersion[device] = ldbReplace(s.db, []byte(s.folder), device[:], fs)
if len(fs) == 0 {
// Reset the local version if all files were removed.
s.localVersion[node] = 0
s.localVersion[device] = 0
}
}
func (s *Set) ReplaceWithDelete(node protocol.NodeID, fs []protocol.FileInfo) {
func (s *Set) ReplaceWithDelete(device protocol.DeviceID, fs []protocol.FileInfo) {
if debug {
l.Debugf("%s ReplaceWithDelete(%v, [%d])", s.repo, node, len(fs))
l.Debugf("%s ReplaceWithDelete(%v, [%d])", s.folder, device, len(fs))
}
normalizeFilenames(fs)
s.mutex.Lock()
defer s.mutex.Unlock()
if lv := ldbReplaceWithDelete(s.db, []byte(s.repo), node[:], fs); lv > s.localVersion[node] {
s.localVersion[node] = lv
if lv := ldbReplaceWithDelete(s.db, []byte(s.folder), device[:], fs); lv > s.localVersion[device] {
s.localVersion[device] = lv
}
}
func (s *Set) Update(node protocol.NodeID, fs []protocol.FileInfo) {
func (s *Set) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
if debug {
l.Debugf("%s Update(%v, [%d])", s.repo, node, len(fs))
l.Debugf("%s Update(%v, [%d])", s.folder, device, len(fs))
}
normalizeFilenames(fs)
s.mutex.Lock()
defer s.mutex.Unlock()
if lv := ldbUpdate(s.db, []byte(s.repo), node[:], fs); lv > s.localVersion[node] {
s.localVersion[node] = lv
if lv := ldbUpdate(s.db, []byte(s.folder), device[:], fs); lv > s.localVersion[device] {
s.localVersion[device] = lv
}
}
func (s *Set) WithNeed(node protocol.NodeID, fn fileIterator) {
func (s *Set) WithNeed(device protocol.DeviceID, fn fileIterator) {
if debug {
l.Debugf("%s WithNeed(%v)", s.repo, node)
l.Debugf("%s WithNeed(%v)", s.folder, device)
}
ldbWithNeed(s.db, []byte(s.repo), node[:], false, nativeFileIterator(fn))
ldbWithNeed(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn))
}
func (s *Set) WithNeedTruncated(node protocol.NodeID, fn fileIterator) {
func (s *Set) WithNeedTruncated(device protocol.DeviceID, fn fileIterator) {
if debug {
l.Debugf("%s WithNeedTruncated(%v)", s.repo, node)
l.Debugf("%s WithNeedTruncated(%v)", s.folder, device)
}
ldbWithNeed(s.db, []byte(s.repo), node[:], true, nativeFileIterator(fn))
ldbWithNeed(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn))
}
func (s *Set) WithHave(node protocol.NodeID, fn fileIterator) {
func (s *Set) WithHave(device protocol.DeviceID, fn fileIterator) {
if debug {
l.Debugf("%s WithHave(%v)", s.repo, node)
l.Debugf("%s WithHave(%v)", s.folder, device)
}
ldbWithHave(s.db, []byte(s.repo), node[:], false, nativeFileIterator(fn))
ldbWithHave(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn))
}
func (s *Set) WithHaveTruncated(node protocol.NodeID, fn fileIterator) {
func (s *Set) WithHaveTruncated(device protocol.DeviceID, fn fileIterator) {
if debug {
l.Debugf("%s WithHaveTruncated(%v)", s.repo, node)
l.Debugf("%s WithHaveTruncated(%v)", s.folder, device)
}
ldbWithHave(s.db, []byte(s.repo), node[:], true, nativeFileIterator(fn))
ldbWithHave(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn))
}
func (s *Set) WithGlobal(fn fileIterator) {
if debug {
l.Debugf("%s WithGlobal()", s.repo)
l.Debugf("%s WithGlobal()", s.folder)
}
ldbWithGlobal(s.db, []byte(s.repo), false, nativeFileIterator(fn))
ldbWithGlobal(s.db, []byte(s.folder), false, nativeFileIterator(fn))
}
func (s *Set) WithGlobalTruncated(fn fileIterator) {
if debug {
l.Debugf("%s WithGlobalTruncated()", s.repo)
l.Debugf("%s WithGlobalTruncated()", s.folder)
}
ldbWithGlobal(s.db, []byte(s.repo), true, nativeFileIterator(fn))
ldbWithGlobal(s.db, []byte(s.folder), true, nativeFileIterator(fn))
}
func (s *Set) Get(node protocol.NodeID, file string) protocol.FileInfo {
f := ldbGet(s.db, []byte(s.repo), node[:], []byte(normalizedFilename(file)))
func (s *Set) Get(device protocol.DeviceID, file string) protocol.FileInfo {
f := ldbGet(s.db, []byte(s.folder), device[:], []byte(normalizedFilename(file)))
f.Name = nativeFilename(f.Name)
return f
}
func (s *Set) GetGlobal(file string) protocol.FileInfo {
f := ldbGetGlobal(s.db, []byte(s.repo), []byte(normalizedFilename(file)))
f := ldbGetGlobal(s.db, []byte(s.folder), []byte(normalizedFilename(file)))
f.Name = nativeFilename(f.Name)
return f
}
func (s *Set) Availability(file string) []protocol.NodeID {
return ldbAvailability(s.db, []byte(s.repo), []byte(normalizedFilename(file)))
func (s *Set) Availability(file string) []protocol.DeviceID {
return ldbAvailability(s.db, []byte(s.folder), []byte(normalizedFilename(file)))
}
func (s *Set) LocalVersion(node protocol.NodeID) uint64 {
func (s *Set) LocalVersion(device protocol.DeviceID) uint64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.localVersion[node]
return s.localVersion[device]
}
// ListRepos returns the repository IDs seen in the database.
func ListRepos(db *leveldb.DB) []string {
return ldbListRepos(db)
// ListFolders returns the folder IDs seen in the database.
func ListFolders(db *leveldb.DB) []string {
return ldbListFolders(db)
}
// DropRepo clears out all information related to the given repo from the
// DropFolder clears out all information related to the given folder from the
// database.
func DropRepo(db *leveldb.DB, repo string) {
ldbDropRepo(db, []byte(repo))
func DropFolder(db *leveldb.DB, folder string) {
ldbDropFolder(db, []byte(folder))
}
func normalizeFilenames(fs []protocol.FileInfo) {

View File

@ -18,11 +18,11 @@ import (
"github.com/syndtr/goleveldb/leveldb/storage"
)
var remoteNode0, remoteNode1 protocol.NodeID
var remoteDevice0, remoteDevice1 protocol.DeviceID
func init() {
remoteNode0, _ = protocol.NodeIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
remoteNode1, _ = protocol.NodeIDFromString("I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU")
remoteDevice0, _ = protocol.DeviceIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
remoteDevice1, _ = protocol.DeviceIDFromString("I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU")
}
func genBlocks(n int) []protocol.BlockInfo {
@ -48,7 +48,7 @@ func globalList(s *files.Set) []protocol.FileInfo {
return fs
}
func haveList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
func haveList(s *files.Set, n protocol.DeviceID) []protocol.FileInfo {
var fs []protocol.FileInfo
s.WithHave(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
@ -58,7 +58,7 @@ func haveList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
return fs
}
func needList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
func needList(s *files.Set, n protocol.DeviceID) []protocol.FileInfo {
var fs []protocol.FileInfo
s.WithNeed(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
@ -158,10 +158,10 @@ func TestGlobalSet(t *testing.T) {
local0[3],
}
m.ReplaceWithDelete(protocol.LocalNodeID, local0)
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
m.Replace(remoteNode0, remote0)
m.Update(remoteNode0, remote1)
m.ReplaceWithDelete(protocol.LocalDeviceID, local0)
m.ReplaceWithDelete(protocol.LocalDeviceID, local1)
m.Replace(remoteDevice0, remote0)
m.Update(remoteDevice0, remote1)
g := fileList(globalList(m))
sort.Sort(g)
@ -170,40 +170,40 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal)
}
h := fileList(haveList(m, protocol.LocalNodeID))
h := fileList(haveList(m, protocol.LocalDeviceID))
sort.Sort(h)
if fmt.Sprint(h) != fmt.Sprint(localTot) {
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, localTot)
}
h = fileList(haveList(m, remoteNode0))
h = fileList(haveList(m, remoteDevice0))
sort.Sort(h)
if fmt.Sprint(h) != fmt.Sprint(remoteTot) {
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, remoteTot)
}
n := fileList(needList(m, protocol.LocalNodeID))
n := fileList(needList(m, protocol.LocalDeviceID))
sort.Sort(n)
if fmt.Sprint(n) != fmt.Sprint(expectedLocalNeed) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedLocalNeed)
}
n = fileList(needList(m, remoteNode0))
n = fileList(needList(m, remoteDevice0))
sort.Sort(n)
if fmt.Sprint(n) != fmt.Sprint(expectedRemoteNeed) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedRemoteNeed)
}
f := m.Get(protocol.LocalNodeID, "b")
f := m.Get(protocol.LocalDeviceID, "b")
if fmt.Sprint(f) != fmt.Sprint(localTot[1]) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, localTot[1])
}
f = m.Get(remoteNode0, "b")
f = m.Get(remoteDevice0, "b")
if fmt.Sprint(f) != fmt.Sprint(remote1[0]) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
}
@ -213,7 +213,7 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, remote1[0])
}
f = m.Get(protocol.LocalNodeID, "zz")
f = m.Get(protocol.LocalDeviceID, "zz")
if f.Name != "" {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
}
@ -223,18 +223,18 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
}
av := []protocol.NodeID{protocol.LocalNodeID, remoteNode0}
av := []protocol.DeviceID{protocol.LocalDeviceID, remoteDevice0}
a := m.Availability("a")
if !(len(a) == 2 && (a[0] == av[0] && a[1] == av[1] || a[0] == av[1] && a[1] == av[0])) {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
}
a = m.Availability("b")
if len(a) != 1 || a[0] != remoteNode0 {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteNode0)
if len(a) != 1 || a[0] != remoteDevice0 {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteDevice0)
}
a = m.Availability("d")
if len(a) != 1 || a[0] != protocol.LocalNodeID {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, protocol.LocalNodeID)
if len(a) != 1 || a[0] != protocol.LocalDeviceID {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, protocol.LocalDeviceID)
}
}
@ -268,11 +268,11 @@ func TestNeedWithInvalid(t *testing.T) {
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
}
s.ReplaceWithDelete(protocol.LocalNodeID, localHave)
s.Replace(remoteNode0, remote0Have)
s.Replace(remoteNode1, remote1Have)
s.ReplaceWithDelete(protocol.LocalDeviceID, localHave)
s.Replace(remoteDevice0, remote0Have)
s.Replace(remoteDevice1, remote1Have)
need := fileList(needList(s, protocol.LocalNodeID))
need := fileList(needList(s, protocol.LocalDeviceID))
sort.Sort(need)
if fmt.Sprint(need) != fmt.Sprint(expectedNeed) {
@ -297,9 +297,9 @@ func TestUpdateToInvalid(t *testing.T) {
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
}
s.ReplaceWithDelete(protocol.LocalNodeID, localHave)
s.ReplaceWithDelete(protocol.LocalDeviceID, localHave)
have := fileList(haveList(s, protocol.LocalNodeID))
have := fileList(haveList(s, protocol.LocalDeviceID))
sort.Sort(have)
if fmt.Sprint(have) != fmt.Sprint(localHave) {
@ -307,9 +307,9 @@ func TestUpdateToInvalid(t *testing.T) {
}
localHave[1] = protocol.FileInfo{Name: "b", Version: 1001, Flags: protocol.FlagInvalid}
s.Update(protocol.LocalNodeID, localHave[1:2])
s.Update(protocol.LocalDeviceID, localHave[1:2])
have = fileList(haveList(s, protocol.LocalNodeID))
have = fileList(haveList(s, protocol.LocalDeviceID))
sort.Sort(have)
if fmt.Sprint(have) != fmt.Sprint(localHave) {
@ -340,18 +340,18 @@ func TestInvalidAvailability(t *testing.T) {
protocol.FileInfo{Name: "none", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
}
s.Replace(remoteNode0, remote0Have)
s.Replace(remoteNode1, remote1Have)
s.Replace(remoteDevice0, remote0Have)
s.Replace(remoteDevice1, remote1Have)
if av := s.Availability("both"); len(av) != 2 {
t.Error("Incorrect availability for 'both':", av)
}
if av := s.Availability("r0only"); len(av) != 1 || av[0] != remoteNode0 {
if av := s.Availability("r0only"); len(av) != 1 || av[0] != remoteDevice0 {
t.Error("Incorrect availability for 'r0only':", av)
}
if av := s.Availability("r1only"); len(av) != 1 || av[0] != remoteNode1 {
if av := s.Availability("r1only"); len(av) != 1 || av[0] != remoteDevice1 {
t.Error("Incorrect availability for 'r1only':", av)
}
@ -376,22 +376,22 @@ func TestLocalDeleted(t *testing.T) {
protocol.FileInfo{Name: "z", Version: 1000, Flags: protocol.FlagDirectory},
}
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
m.ReplaceWithDelete(protocol.LocalDeviceID, local1)
m.ReplaceWithDelete(protocol.LocalNodeID, []protocol.FileInfo{
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
local1[0],
// [1] removed
local1[2],
local1[3],
local1[4],
})
m.ReplaceWithDelete(protocol.LocalNodeID, []protocol.FileInfo{
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
local1[0],
local1[2],
// [3] removed
local1[4],
})
m.ReplaceWithDelete(protocol.LocalNodeID, []protocol.FileInfo{
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
local1[0],
local1[2],
// [4] removed
@ -413,7 +413,7 @@ func TestLocalDeleted(t *testing.T) {
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal1)
}
m.ReplaceWithDelete(protocol.LocalNodeID, []protocol.FileInfo{
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
local1[0],
// [2] removed
})
@ -449,7 +449,7 @@ func Benchmark10kReplace(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
m := files.NewSet("test", db)
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
}
}
@ -465,14 +465,14 @@ func Benchmark10kUpdateChg(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode0, remote)
m.Replace(remoteDevice0, remote)
var local []protocol.FileInfo
for i := 0; i < 10000; i++ {
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
b.ResetTimer()
for i := 0; i < b.N; i++ {
@ -481,7 +481,7 @@ func Benchmark10kUpdateChg(b *testing.B) {
local[j].Version++
}
b.StartTimer()
m.Update(protocol.LocalNodeID, local)
m.Update(protocol.LocalDeviceID, local)
}
}
@ -496,18 +496,18 @@ func Benchmark10kUpdateSme(b *testing.B) {
b.Fatal(err)
}
m := files.NewSet("test", db)
m.Replace(remoteNode0, remote)
m.Replace(remoteDevice0, remote)
var local []protocol.FileInfo
for i := 0; i < 10000; i++ {
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Update(protocol.LocalNodeID, local)
m.Update(protocol.LocalDeviceID, local)
}
}
@ -523,7 +523,7 @@ func Benchmark10kNeed2k(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode0, remote)
m.Replace(remoteDevice0, remote)
var local []protocol.FileInfo
for i := 0; i < 8000; i++ {
@ -533,11 +533,11 @@ func Benchmark10kNeed2k(b *testing.B) {
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
b.ResetTimer()
for i := 0; i < b.N; i++ {
fs := needList(m, protocol.LocalNodeID)
fs := needList(m, protocol.LocalDeviceID)
if l := len(fs); l != 2000 {
b.Errorf("wrong length %d != 2k", l)
}
@ -556,7 +556,7 @@ func Benchmark10kHaveFullList(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode0, remote)
m.Replace(remoteDevice0, remote)
var local []protocol.FileInfo
for i := 0; i < 2000; i++ {
@ -566,11 +566,11 @@ func Benchmark10kHaveFullList(b *testing.B) {
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
b.ResetTimer()
for i := 0; i < b.N; i++ {
fs := haveList(m, protocol.LocalNodeID)
fs := haveList(m, protocol.LocalDeviceID)
if l := len(fs); l != 10000 {
b.Errorf("wrong length %d != 10k", l)
}
@ -589,7 +589,7 @@ func Benchmark10kGlobal(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode0, remote)
m.Replace(remoteDevice0, remote)
var local []protocol.FileInfo
for i := 0; i < 2000; i++ {
@ -599,7 +599,7 @@ func Benchmark10kGlobal(b *testing.B) {
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
b.ResetTimer()
for i := 0; i < b.N; i++ {
@ -632,7 +632,7 @@ func TestGlobalReset(t *testing.T) {
protocol.FileInfo{Name: "e", Version: 1000},
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
g := globalList(m)
sort.Sort(fileList(g))
@ -640,8 +640,8 @@ func TestGlobalReset(t *testing.T) {
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
}
m.Replace(remoteNode0, remote)
m.Replace(remoteNode0, nil)
m.Replace(remoteDevice0, remote)
m.Replace(remoteDevice0, nil)
g = globalList(m)
sort.Sort(fileList(g))
@ -679,10 +679,10 @@ func TestNeed(t *testing.T) {
protocol.FileInfo{Name: "e", Version: 1000},
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.Replace(remoteNode0, remote)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
m.Replace(remoteDevice0, remote)
need := needList(m, protocol.LocalNodeID)
need := needList(m, protocol.LocalDeviceID)
sort.Sort(fileList(need))
sort.Sort(fileList(shouldNeed))
@ -715,23 +715,23 @@ func TestLocalVersion(t *testing.T) {
protocol.FileInfo{Name: "e", Version: 1000},
}
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
c0 := m.LocalVersion(protocol.LocalNodeID)
m.ReplaceWithDelete(protocol.LocalDeviceID, local1)
c0 := m.LocalVersion(protocol.LocalDeviceID)
m.ReplaceWithDelete(protocol.LocalNodeID, local2)
c1 := m.LocalVersion(protocol.LocalNodeID)
m.ReplaceWithDelete(protocol.LocalDeviceID, local2)
c1 := m.LocalVersion(protocol.LocalDeviceID)
if !(c1 > c0) {
t.Fatal("Local version number should have incremented")
}
m.ReplaceWithDelete(protocol.LocalNodeID, local2)
c2 := m.LocalVersion(protocol.LocalNodeID)
m.ReplaceWithDelete(protocol.LocalDeviceID, local2)
c2 := m.LocalVersion(protocol.LocalDeviceID)
if c2 != c1 {
t.Fatal("Local version number should be unchanged")
}
}
func TestListDropRepo(t *testing.T) {
func TestListDropFolder(t *testing.T) {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
t.Fatal(err)
@ -743,7 +743,7 @@ func TestListDropRepo(t *testing.T) {
protocol.FileInfo{Name: "b", Version: 1000},
protocol.FileInfo{Name: "c", Version: 1000},
}
s0.Replace(protocol.LocalNodeID, local1)
s0.Replace(protocol.LocalDeviceID, local1)
s1 := files.NewSet("test1", db)
local2 := []protocol.FileInfo{
@ -751,13 +751,13 @@ func TestListDropRepo(t *testing.T) {
protocol.FileInfo{Name: "e", Version: 1002},
protocol.FileInfo{Name: "f", Version: 1002},
}
s1.Replace(remoteNode0, local2)
s1.Replace(remoteDevice0, local2)
// Check that we have both repos and their data is in the global list
// Check that we have both folders and their data is in the global list
expectedRepoList := []string{"test0", "test1"}
if actualRepoList := files.ListRepos(db); !reflect.DeepEqual(actualRepoList, expectedRepoList) {
t.Fatalf("RepoList mismatch\nE: %v\nA: %v", expectedRepoList, actualRepoList)
expectedFolderList := []string{"test0", "test1"}
if actualFolderList := files.ListFolders(db); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
}
if l := len(globalList(s0)); l != 3 {
t.Errorf("Incorrect global length %d != 3 for s0", l)
@ -768,11 +768,11 @@ func TestListDropRepo(t *testing.T) {
// Drop one of them and check that it's gone.
files.DropRepo(db, "test1")
files.DropFolder(db, "test1")
expectedRepoList = []string{"test0"}
if actualRepoList := files.ListRepos(db); !reflect.DeepEqual(actualRepoList, expectedRepoList) {
t.Fatalf("RepoList mismatch\nE: %v\nA: %v", expectedRepoList, actualRepoList)
expectedFolderList = []string{"test0"}
if actualFolderList := files.ListFolders(db); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
}
if l := len(globalList(s0)); l != 3 {
t.Errorf("Incorrect global length %d != 3 for s0", l)
@ -795,14 +795,14 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
protocol.FileInfo{Name: "b", Version: 1002, Flags: protocol.FlagInvalid},
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(4)},
}
s.Replace(remoteNode0, rem0)
s.Replace(remoteDevice0, rem0)
rem1 := fileList{
protocol.FileInfo{Name: "a", Version: 1002, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "b", Version: 1002, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "c", Version: 1002, Flags: protocol.FlagInvalid},
}
s.Replace(remoteNode1, rem1)
s.Replace(remoteDevice1, rem1)
total := fileList{
// There's a valid copy of each file, so it should be merged
@ -811,7 +811,7 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(4)},
}
need := fileList(needList(s, protocol.LocalNodeID))
need := fileList(needList(s, protocol.LocalDeviceID))
if fmt.Sprint(need) != fmt.Sprint(total) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", need, total)
}
@ -840,7 +840,7 @@ func TestLongPath(t *testing.T) {
protocol.FileInfo{Name: string(name), Version: 1000},
}
s.ReplaceWithDelete(protocol.LocalNodeID, local)
s.ReplaceWithDelete(protocol.LocalDeviceID, local)
gf := globalList(s)
if l := len(gf); l != 1 {
@ -877,8 +877,8 @@ func TestStressGlobalVersion(t *testing.T) {
m := files.NewSet("test", db)
done := make(chan struct{})
go stressWriter(m, remoteNode0, set1, nil, done)
go stressWriter(m, protocol.LocalNodeID, set2, nil, done)
go stressWriter(m, remoteDevice0, set1, nil, done)
go stressWriter(m, protocol.LocalDeviceID, set2, nil, done)
t0 := time.Now()
for time.Since(t0) < dur {
@ -891,7 +891,7 @@ func TestStressGlobalVersion(t *testing.T) {
close(done)
}
func stressWriter(s *files.Set, id protocol.NodeID, set1, set2 []protocol.FileInfo, done chan struct{}) {
func stressWriter(s *files.Set, id protocol.DeviceID, set1, set2 []protocol.FileInfo, done chan struct{}) {
one := true
i := 0
for {

View File

@ -0,0 +1,51 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package model
import (
"sync"
"github.com/syncthing/syncthing/internal/protocol"
)
// deviceActivity tracks the number of outstanding requests per device and can
// answer which device is least busy. It is safe for use from multiple
// goroutines.
type deviceActivity struct {
act map[protocol.DeviceID]int
mut sync.Mutex
}
func newDeviceActivity() *deviceActivity {
return &deviceActivity{
act: make(map[protocol.DeviceID]int),
}
}
func (m deviceActivity) leastBusy(availability []protocol.DeviceID) protocol.DeviceID {
m.mut.Lock()
var low int = 2<<30 - 1
var selected protocol.DeviceID
for _, device := range availability {
if usage := m.act[device]; usage < low {
low = usage
selected = device
}
}
m.mut.Unlock()
return selected
}
func (m deviceActivity) using(device protocol.DeviceID) {
m.mut.Lock()
defer m.mut.Unlock()
m.act[device]++
}
func (m deviceActivity) done(device protocol.DeviceID) {
m.mut.Lock()
defer m.mut.Unlock()
m.act[device]--
}

View File

@ -0,0 +1,56 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package model
import (
"testing"
"github.com/syncthing/syncthing/internal/protocol"
)
func TestDeviceActivity(t *testing.T) {
n0 := protocol.DeviceID{1, 2, 3, 4}
n1 := protocol.DeviceID{5, 6, 7, 8}
n2 := protocol.DeviceID{9, 10, 11, 12}
devices := []protocol.DeviceID{n0, n1, n2}
na := newDeviceActivity()
if lb := na.leastBusy(devices); lb != n0 {
t.Errorf("Least busy device should be n0 (%v) not %v", n0, lb)
}
if lb := na.leastBusy(devices); lb != n0 {
t.Errorf("Least busy device should still be n0 (%v) not %v", n0, lb)
}
na.using(na.leastBusy(devices))
if lb := na.leastBusy(devices); lb != n1 {
t.Errorf("Least busy device should be n1 (%v) not %v", n1, lb)
}
na.using(na.leastBusy(devices))
if lb := na.leastBusy(devices); lb != n2 {
t.Errorf("Least busy device should be n2 (%v) not %v", n2, lb)
}
na.using(na.leastBusy(devices))
if lb := na.leastBusy(devices); lb != n0 {
t.Errorf("Least busy device should be n0 (%v) not %v", n0, lb)
}
na.done(n1)
if lb := na.leastBusy(devices); lb != n1 {
t.Errorf("Least busy device should be n1 (%v) not %v", n1, lb)
}
na.done(n2)
if lb := na.leastBusy(devices); lb != n1 {
t.Errorf("Least busy device should still be n1 (%v) not %v", n1, lb)
}
na.done(n0)
if lb := na.leastBusy(devices); lb != n0 {
t.Errorf("Least busy device should be n0 (%v) not %v", n0, lb)
}
}

View File

@ -2,5 +2,5 @@
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Package model implements repository abstraction and file pulling mechanisms
// Package model implements folder abstraction and file pulling mechanisms
package model

File diff suppressed because it is too large Load Diff

View File

@ -17,11 +17,11 @@ import (
"github.com/syndtr/goleveldb/leveldb/storage"
)
var node1, node2 protocol.NodeID
var device1, device2 protocol.DeviceID
func init() {
node1, _ = protocol.NodeIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
node2, _ = protocol.NodeIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
device1, _ = protocol.DeviceIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
device2, _ = protocol.DeviceIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
}
var testDataExpected = map[string]protocol.FileInfo{
@ -57,11 +57,11 @@ func init() {
func TestRequest(t *testing.T) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", &config.Configuration{}, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", &config.Configuration{}, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
bs, err := m.Request(node1, "default", "foo", 0, 6)
bs, err := m.Request(device1, "default", "foo", 0, 6)
if err != nil {
t.Fatal(err)
}
@ -69,7 +69,7 @@ func TestRequest(t *testing.T) {
t.Errorf("Incorrect data from request: %q", string(bs))
}
bs, err = m.Request(node1, "default", "../walk.go", 0, 6)
bs, err = m.Request(device1, "default", "../walk.go", 0, 6)
if err == nil {
t.Error("Unexpected nil error on insecure file read")
}
@ -94,76 +94,76 @@ func genFiles(n int) []protocol.FileInfo {
func BenchmarkIndex10000(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
files := genFiles(10000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Index(node1, "default", files)
m.Index(device1, "default", files)
}
}
func BenchmarkIndex00100(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
files := genFiles(100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Index(node1, "default", files)
m.Index(device1, "default", files)
}
}
func BenchmarkIndexUpdate10000f10000(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
files := genFiles(10000)
m.Index(node1, "default", files)
m.Index(device1, "default", files)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.IndexUpdate(node1, "default", files)
m.IndexUpdate(device1, "default", files)
}
}
func BenchmarkIndexUpdate10000f00100(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
files := genFiles(10000)
m.Index(node1, "default", files)
m.Index(device1, "default", files)
ufiles := genFiles(100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.IndexUpdate(node1, "default", ufiles)
m.IndexUpdate(device1, "default", ufiles)
}
}
func BenchmarkIndexUpdate10000f00001(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
files := genFiles(10000)
m.Index(node1, "default", files)
m.Index(device1, "default", files)
ufiles := genFiles(1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.IndexUpdate(node1, "default", ufiles)
m.IndexUpdate(device1, "default", ufiles)
}
}
type FakeConnection struct {
id protocol.NodeID
id protocol.DeviceID
requestData []byte
}
@ -171,7 +171,7 @@ func (FakeConnection) Close() error {
return nil
}
func (f FakeConnection) ID() protocol.NodeID {
func (f FakeConnection) ID() protocol.DeviceID {
return f.id
}
@ -191,7 +191,7 @@ func (FakeConnection) IndexUpdate(string, []protocol.FileInfo) error {
return nil
}
func (f FakeConnection) Request(repo, name string, offset int64, size int) ([]byte, error) {
func (f FakeConnection) Request(folder, name string, offset int64, size int) ([]byte, error) {
return f.requestData, nil
}
@ -207,9 +207,9 @@ func (FakeConnection) Statistics() protocol.Statistics {
func BenchmarkRequest(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
const n = 1000
files := make([]protocol.FileInfo, n)
@ -223,15 +223,15 @@ func BenchmarkRequest(b *testing.B) {
}
fc := FakeConnection{
id: node1,
id: device1,
requestData: []byte("some data to return"),
}
m.AddConnection(fc, fc)
m.Index(node1, "default", files)
m.Index(device1, "default", files)
b.ResetTimer()
for i := 0; i < b.N; i++ {
data, err := m.requestGlobal(node1, "default", files[i%n].Name, 0, 32, nil)
data, err := m.requestGlobal(device1, "default", files[i%n].Name, 0, 32, nil)
if err != nil {
b.Error(err)
}
@ -241,28 +241,28 @@ func BenchmarkRequest(b *testing.B) {
}
}
func TestNodeRename(t *testing.T) {
func TestDeviceRename(t *testing.T) {
ccm := protocol.ClusterConfigMessage{
ClientName: "syncthing",
ClientVersion: "v0.9.4",
}
cfg := config.New("/tmp/test", node1)
cfg.Nodes = []config.NodeConfiguration{
cfg := config.New("/tmp/test", device1)
cfg.Devices = []config.DeviceConfiguration{
{
NodeID: node1,
DeviceID: device1,
},
}
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", &cfg, "node", "syncthing", "dev", db)
if cfg.Nodes[0].Name != "" {
t.Errorf("Node already has a name")
m := NewModel("/tmp", &cfg, "device", "syncthing", "dev", db)
if cfg.Devices[0].Name != "" {
t.Errorf("Device already has a name")
}
m.ClusterConfig(node1, ccm)
if cfg.Nodes[0].Name != "" {
t.Errorf("Node already has a name")
m.ClusterConfig(device1, ccm)
if cfg.Devices[0].Name != "" {
t.Errorf("Device already has a name")
}
ccm.Options = []protocol.Option{
@ -271,96 +271,96 @@ func TestNodeRename(t *testing.T) {
Value: "tester",
},
}
m.ClusterConfig(node1, ccm)
if cfg.Nodes[0].Name != "tester" {
t.Errorf("Node did not get a name")
m.ClusterConfig(device1, ccm)
if cfg.Devices[0].Name != "tester" {
t.Errorf("Device did not get a name")
}
ccm.Options[0].Value = "tester2"
m.ClusterConfig(node1, ccm)
if cfg.Nodes[0].Name != "tester" {
t.Errorf("Node name got overwritten")
m.ClusterConfig(device1, ccm)
if cfg.Devices[0].Name != "tester" {
t.Errorf("Device name got overwritten")
}
}
func TestClusterConfig(t *testing.T) {
cfg := config.New("/tmp/test", node1)
cfg.Nodes = []config.NodeConfiguration{
cfg := config.New("/tmp/test", device1)
cfg.Devices = []config.DeviceConfiguration{
{
NodeID: node1,
DeviceID: device1,
Introducer: true,
},
{
NodeID: node2,
DeviceID: device2,
},
}
cfg.Repositories = []config.RepositoryConfiguration{
cfg.Folders = []config.FolderConfiguration{
{
ID: "repo1",
Nodes: []config.RepositoryNodeConfiguration{
{NodeID: node1},
{NodeID: node2},
ID: "folder1",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2},
},
},
{
ID: "repo2",
Nodes: []config.RepositoryNodeConfiguration{
{NodeID: node1},
{NodeID: node2},
ID: "folder2",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2},
},
},
}
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", &cfg, "node", "syncthing", "dev", db)
m.AddRepo(cfg.Repositories[0])
m.AddRepo(cfg.Repositories[1])
m := NewModel("/tmp", &cfg, "device", "syncthing", "dev", db)
m.AddFolder(cfg.Folders[0])
m.AddFolder(cfg.Folders[1])
cm := m.clusterConfig(node2)
cm := m.clusterConfig(device2)
if l := len(cm.Repositories); l != 2 {
t.Fatalf("Incorrect number of repos %d != 2", l)
if l := len(cm.Folders); l != 2 {
t.Fatalf("Incorrect number of folders %d != 2", l)
}
r := cm.Repositories[0]
if r.ID != "repo1" {
t.Errorf("Incorrect repo %q != repo1", r.ID)
r := cm.Folders[0]
if r.ID != "folder1" {
t.Errorf("Incorrect folder %q != folder1", r.ID)
}
if l := len(r.Nodes); l != 2 {
t.Errorf("Incorrect number of nodes %d != 2", l)
if l := len(r.Devices); l != 2 {
t.Errorf("Incorrect number of devices %d != 2", l)
}
if id := r.Nodes[0].ID; bytes.Compare(id, node1[:]) != 0 {
t.Errorf("Incorrect node ID %x != %x", id, node1)
if id := r.Devices[0].ID; bytes.Compare(id, device1[:]) != 0 {
t.Errorf("Incorrect device ID %x != %x", id, device1)
}
if r.Nodes[0].Flags&protocol.FlagIntroducer == 0 {
t.Error("Node1 should be flagged as Introducer")
if r.Devices[0].Flags&protocol.FlagIntroducer == 0 {
t.Error("Device1 should be flagged as Introducer")
}
if id := r.Nodes[1].ID; bytes.Compare(id, node2[:]) != 0 {
t.Errorf("Incorrect node ID %x != %x", id, node2)
if id := r.Devices[1].ID; bytes.Compare(id, device2[:]) != 0 {
t.Errorf("Incorrect device ID %x != %x", id, device2)
}
if r.Nodes[1].Flags&protocol.FlagIntroducer != 0 {
t.Error("Node2 should not be flagged as Introducer")
if r.Devices[1].Flags&protocol.FlagIntroducer != 0 {
t.Error("Device2 should not be flagged as Introducer")
}
r = cm.Repositories[1]
if r.ID != "repo2" {
t.Errorf("Incorrect repo %q != repo2", r.ID)
r = cm.Folders[1]
if r.ID != "folder2" {
t.Errorf("Incorrect folder %q != folder2", r.ID)
}
if l := len(r.Nodes); l != 2 {
t.Errorf("Incorrect number of nodes %d != 2", l)
if l := len(r.Devices); l != 2 {
t.Errorf("Incorrect number of devices %d != 2", l)
}
if id := r.Nodes[0].ID; bytes.Compare(id, node1[:]) != 0 {
t.Errorf("Incorrect node ID %x != %x", id, node1)
if id := r.Devices[0].ID; bytes.Compare(id, device1[:]) != 0 {
t.Errorf("Incorrect device ID %x != %x", id, device1)
}
if r.Nodes[0].Flags&protocol.FlagIntroducer == 0 {
t.Error("Node1 should be flagged as Introducer")
if r.Devices[0].Flags&protocol.FlagIntroducer == 0 {
t.Error("Device1 should be flagged as Introducer")
}
if id := r.Nodes[1].ID; bytes.Compare(id, node2[:]) != 0 {
t.Errorf("Incorrect node ID %x != %x", id, node2)
if id := r.Devices[1].ID; bytes.Compare(id, device2[:]) != 0 {
t.Errorf("Incorrect device ID %x != %x", id, device2)
}
if r.Nodes[1].Flags&protocol.FlagIntroducer != 0 {
t.Error("Node2 should not be flagged as Introducer")
if r.Devices[1].Flags&protocol.FlagIntroducer != 0 {
t.Error("Device2 should not be flagged as Introducer")
}
}
@ -379,8 +379,8 @@ func TestIgnores(t *testing.T) {
}
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
expected := []string{
".*",
@ -440,7 +440,7 @@ func TestIgnores(t *testing.T) {
t.Error("No error")
}
m.AddRepo(config.RepositoryConfiguration{ID: "fresh", Directory: "XXX"})
m.AddFolder(config.FolderConfiguration{ID: "fresh", Directory: "XXX"})
ignores, err = m.GetIgnores("fresh")
if err != nil {
t.Error(err)

View File

@ -1,51 +0,0 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package model
import (
"sync"
"github.com/syncthing/syncthing/internal/protocol"
)
// nodeActivity tracks the number of outstanding requests per node and can
// answer which node is least busy. It is safe for use from multiple
// goroutines.
type nodeActivity struct {
act map[protocol.NodeID]int
mut sync.Mutex
}
func newNodeActivity() *nodeActivity {
return &nodeActivity{
act: make(map[protocol.NodeID]int),
}
}
func (m nodeActivity) leastBusy(availability []protocol.NodeID) protocol.NodeID {
m.mut.Lock()
var low int = 2<<30 - 1
var selected protocol.NodeID
for _, node := range availability {
if usage := m.act[node]; usage < low {
low = usage
selected = node
}
}
m.mut.Unlock()
return selected
}
func (m nodeActivity) using(node protocol.NodeID) {
m.mut.Lock()
defer m.mut.Unlock()
m.act[node]++
}
func (m nodeActivity) done(node protocol.NodeID) {
m.mut.Lock()
defer m.mut.Unlock()
m.act[node]--
}

View File

@ -1,56 +0,0 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package model
import (
"testing"
"github.com/syncthing/syncthing/internal/protocol"
)
func TestNodeActivity(t *testing.T) {
n0 := protocol.NodeID{1, 2, 3, 4}
n1 := protocol.NodeID{5, 6, 7, 8}
n2 := protocol.NodeID{9, 10, 11, 12}
nodes := []protocol.NodeID{n0, n1, n2}
na := newNodeActivity()
if lb := na.leastBusy(nodes); lb != n0 {
t.Errorf("Least busy node should be n0 (%v) not %v", n0, lb)
}
if lb := na.leastBusy(nodes); lb != n0 {
t.Errorf("Least busy node should still be n0 (%v) not %v", n0, lb)
}
na.using(na.leastBusy(nodes))
if lb := na.leastBusy(nodes); lb != n1 {
t.Errorf("Least busy node should be n1 (%v) not %v", n1, lb)
}
na.using(na.leastBusy(nodes))
if lb := na.leastBusy(nodes); lb != n2 {
t.Errorf("Least busy node should be n2 (%v) not %v", n2, lb)
}
na.using(na.leastBusy(nodes))
if lb := na.leastBusy(nodes); lb != n0 {
t.Errorf("Least busy node should be n0 (%v) not %v", n0, lb)
}
na.done(n1)
if lb := na.leastBusy(nodes); lb != n1 {
t.Errorf("Least busy node should be n1 (%v) not %v", n1, lb)
}
na.done(n2)
if lb := na.leastBusy(nodes); lb != n1 {
t.Errorf("Least busy node should still be n1 (%v) not %v", n1, lb)
}
na.done(n0)
if lb := na.leastBusy(nodes); lb != n0 {
t.Errorf("Least busy node should be n0 (%v) not %v", n0, lb)
}
}

View File

@ -23,9 +23,9 @@ import (
// TODO: Stop on errors
const (
copiersPerRepo = 1
pullersPerRepo = 16
finishersPerRepo = 2
copiersPerFolder = 1
pullersPerFolder = 16
finishersPerFolder = 2
pauseIntv = 60 * time.Second
nextPullIntv = 10 * time.Second
checkPullIntv = 1 * time.Second
@ -46,12 +46,12 @@ type copyBlocksState struct {
}
var (
activity = newNodeActivity()
errNoNode = errors.New("no available source node")
activity = newDeviceActivity()
errNoDevice = errors.New("no available source device")
)
type Puller struct {
repo string
folder string
dir string
scanIntv time.Duration
model *Model
@ -75,8 +75,8 @@ func (p *Puller) Serve() {
defer func() {
pullTimer.Stop()
scanTimer.Stop()
// TODO: Should there be an actual RepoStopped state?
p.model.setState(p.repo, RepoIdle)
// TODO: Should there be an actual FolderStopped state?
p.model.setState(p.folder, FolderIdle)
}()
var prevVer uint64
@ -94,10 +94,10 @@ loop:
// Index(), so that we immediately start a pull when new index
// information is available. Before that though, I'd like to build a
// repeatable benchmark of how long it takes to sync a change from
// node A to node B, so we have something to work against.
// device A to device B, so we have something to work against.
case <-pullTimer.C:
// RemoteLocalVersion() is a fast call, doesn't touch the database.
curVer := p.model.RemoteLocalVersion(p.repo)
curVer := p.model.RemoteLocalVersion(p.folder)
if curVer == prevVer {
pullTimer.Reset(checkPullIntv)
continue
@ -106,11 +106,11 @@ loop:
if debug {
l.Debugln(p, "pulling", prevVer, curVer)
}
p.model.setState(p.repo, RepoSyncing)
p.model.setState(p.folder, FolderSyncing)
tries := 0
for {
tries++
changed := p.pullerIteration(copiersPerRepo, pullersPerRepo, finishersPerRepo)
changed := p.pullerIteration(copiersPerFolder, pullersPerFolder, finishersPerFolder)
if debug {
l.Debugln(p, "changed", changed)
}
@ -120,8 +120,8 @@ loop:
// sync. Remember the local version number and
// schedule a resync a little bit into the future.
if lv := p.model.RemoteLocalVersion(p.repo); lv < curVer {
// There's a corner case where the node we needed
if lv := p.model.RemoteLocalVersion(p.folder); lv < curVer {
// There's a corner case where the device we needed
// files from disconnected during the puller
// iteration. The files will have been removed from
// the index, so we've concluded that we don't need
@ -142,12 +142,12 @@ loop:
// we're not making it. Probably there are write
// errors preventing us. Flag this with a warning and
// wait a bit longer before retrying.
l.Warnf("Repo %q isn't making progress - check logs for possible root cause. Pausing puller for %v.", p.repo, pauseIntv)
l.Warnf("Folder %q isn't making progress - check logs for possible root cause. Pausing puller for %v.", p.folder, pauseIntv)
pullTimer.Reset(pauseIntv)
break
}
}
p.model.setState(p.repo, RepoIdle)
p.model.setState(p.folder, FolderIdle)
// The reason for running the scanner from within the puller is that
// this is the easiest way to make sure we are not doing both at the
@ -156,12 +156,12 @@ loop:
if debug {
l.Debugln(p, "rescan")
}
p.model.setState(p.repo, RepoScanning)
if err := p.model.ScanRepo(p.repo); err != nil {
invalidateRepo(p.model.cfg, p.repo, err)
p.model.setState(p.folder, FolderScanning)
if err := p.model.ScanFolder(p.folder); err != nil {
invalidateFolder(p.model.cfg, p.folder, err)
break loop
}
p.model.setState(p.repo, RepoIdle)
p.model.setState(p.folder, FolderIdle)
scanTimer.Reset(p.scanIntv)
}
}
@ -172,13 +172,13 @@ func (p *Puller) Stop() {
}
func (p *Puller) String() string {
return fmt.Sprintf("puller/%s@%p", p.repo, p)
return fmt.Sprintf("puller/%s@%p", p.folder, p)
}
// pullerIteration runs a single puller iteration for the given repo and
// pullerIteration runs a single puller iteration for the given folder and
// returns the number items that should have been synced (even those that
// might have failed). One puller iteration handles all files currently
// flagged as needed in the repo. The specified number of copier, puller and
// flagged as needed in the folder. The specified number of copier, puller and
// finisher routines are used. It's seldom efficient to use more than one
// copier routine, while multiple pullers are essential and multiple finishers
// may be useful (they are primarily CPU bound due to hashing).
@ -218,7 +218,7 @@ func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
}
p.model.rmut.RLock()
files := p.model.repoFiles[p.repo]
files := p.model.folderFiles[p.folder]
p.model.rmut.RUnlock()
// !!!
@ -228,7 +228,7 @@ func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
// !!!
changed := 0
files.WithNeed(protocol.LocalNodeID, func(intf protocol.FileIntf) bool {
files.WithNeed(protocol.LocalDeviceID, func(intf protocol.FileIntf) bool {
// Needed items are delivered sorted lexicographically. This isn't
// really optimal from a performance point of view - it would be
@ -240,7 +240,7 @@ func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
file := intf.(protocol.FileInfo)
events.Default.Log(events.ItemStarted, map[string]string{
"repo": p.repo,
"folder": p.folder,
"item": file.Name,
})
@ -290,7 +290,7 @@ func (p *Puller) handleDir(file protocol.FileInfo) {
mode := os.FileMode(file.Flags & 0777)
if debug {
curFile := p.model.CurrentRepoFile(p.repo, file.Name)
curFile := p.model.CurrentFolderFile(p.folder, file.Name)
l.Debugf("need dir\n\t%v\n\t%v", file, curFile)
}
@ -307,19 +307,19 @@ func (p *Puller) handleDir(file protocol.FileInfo) {
}
if err = osutil.InWritableDir(mkdir, realName); err == nil {
p.model.updateLocal(p.repo, file)
p.model.updateLocal(p.folder, file)
} else {
l.Infof("Puller (repo %q, file %q): %v", p.repo, file.Name, err)
l.Infof("Puller (folder %q, file %q): %v", p.folder, file.Name, err)
}
return
}
// Weird error when stat()'ing the dir. Probably won't work to do
// anything else with it if we can't even stat() it.
l.Infof("Puller (repo %q, file %q): %v", p.repo, file.Name, err)
l.Infof("Puller (folder %q, file %q): %v", p.folder, file.Name, err)
return
} else if !info.IsDir() {
l.Infof("Puller (repo %q, file %q): should be dir, but is not", p.repo, file.Name)
l.Infof("Puller (folder %q, file %q): should be dir, but is not", p.folder, file.Name)
return
}
@ -328,9 +328,9 @@ func (p *Puller) handleDir(file protocol.FileInfo) {
// It's OK to change mode bits on stuff within non-writable directories.
if err := os.Chmod(realName, mode); err == nil {
p.model.updateLocal(p.repo, file)
p.model.updateLocal(p.folder, file)
} else {
l.Infof("Puller (repo %q, file %q): %v", p.repo, file.Name, err)
l.Infof("Puller (folder %q, file %q): %v", p.folder, file.Name, err)
}
}
@ -339,7 +339,7 @@ func (p *Puller) deleteDir(file protocol.FileInfo) {
realName := filepath.Join(p.dir, file.Name)
err := osutil.InWritableDir(os.Remove, realName)
if err == nil || os.IsNotExist(err) {
p.model.updateLocal(p.repo, file)
p.model.updateLocal(p.folder, file)
}
}
@ -355,16 +355,16 @@ func (p *Puller) deleteFile(file protocol.FileInfo) {
}
if err != nil {
l.Infof("Puller (repo %q, file %q): delete: %v", p.repo, file.Name, err)
l.Infof("Puller (folder %q, file %q): delete: %v", p.folder, file.Name, err)
} else {
p.model.updateLocal(p.repo, file)
p.model.updateLocal(p.folder, file)
}
}
// handleFile queues the copies and pulls as necessary for a single new or
// changed file.
func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, pullChan chan<- pullBlockState) {
curFile := p.model.CurrentRepoFile(p.repo, file.Name)
curFile := p.model.CurrentFolderFile(p.folder, file.Name)
copyBlocks, pullBlocks := scanner.BlockDiff(curFile.Blocks, file.Blocks)
if len(copyBlocks) == len(curFile.Blocks) && len(pullBlocks) == 0 {
@ -384,7 +384,7 @@ func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksSt
s := sharedPullerState{
file: file,
repo: p.repo,
folder: p.folder,
tempName: tempName,
realName: realName,
pullNeeded: len(pullBlocks),
@ -422,18 +422,18 @@ func (p *Puller) shortcutFile(file protocol.FileInfo) {
realName := filepath.Join(p.dir, file.Name)
err := os.Chmod(realName, os.FileMode(file.Flags&0777))
if err != nil {
l.Infof("Puller (repo %q, file %q): shortcut: %v", p.repo, file.Name, err)
l.Infof("Puller (folder %q, file %q): shortcut: %v", p.folder, file.Name, err)
return
}
t := time.Unix(file.Modified, 0)
err = os.Chtimes(realName, t, t)
if err != nil {
l.Infof("Puller (repo %q, file %q): shortcut: %v", p.repo, file.Name, err)
l.Infof("Puller (folder %q, file %q): shortcut: %v", p.folder, file.Name, err)
return
}
p.model.updateLocal(p.repo, file)
p.model.updateLocal(p.folder, file)
}
// copierRoutine reads pullerStates until the in channel closes and performs
@ -487,13 +487,13 @@ nextBlock:
continue nextBlock
}
// Select the least busy node to pull the block frop.model. If we found no
// feasible node at all, fail the block (and in the long run, the
// Select the least busy device to pull the block frop.model. If we found no
// feasible device at all, fail the block (and in the long run, the
// file).
potentialNodes := p.model.availability(p.repo, state.file.Name)
selected := activity.leastBusy(potentialNodes)
if selected == (protocol.NodeID{}) {
state.earlyClose("pull", errNoNode)
potentialDevices := p.model.availability(p.folder, state.file.Name)
selected := activity.leastBusy(potentialDevices)
if selected == (protocol.DeviceID{}) {
state.earlyClose("pull", errNoDevice)
continue nextBlock
}
@ -505,10 +505,10 @@ nextBlock:
continue nextBlock
}
// Fetch the block, while marking the selected node as in use so that
// leastBusy can select another node when someone else asks.
// Fetch the block, while marking the selected device as in use so that
// leastBusy can select another device when someone else asks.
activity.using(selected)
buf, err := p.model.requestGlobal(selected, p.repo, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash)
buf, err := p.model.requestGlobal(selected, p.folder, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash)
activity.done(selected)
if err != nil {
state.earlyClose("pull", err)
@ -589,7 +589,7 @@ func (p *Puller) finisherRoutine(in <-chan *sharedPullerState) {
}
// Record the updated file in the index
p.model.updateLocal(p.repo, state.file)
p.model.updateLocal(p.folder, state.file)
}
}
}
@ -609,11 +609,11 @@ func (p *Puller) clean() {
})
}
func invalidateRepo(cfg *config.Configuration, repoID string, err error) {
for i := range cfg.Repositories {
repo := &cfg.Repositories[i]
if repo.ID == repoID {
repo.Invalid = err.Error()
func invalidateFolder(cfg *config.Configuration, folderID string, err error) {
for i := range cfg.Folders {
folder := &cfg.Folders[i]
if folder.ID == folderID {
folder.Invalid = err.Error()
return
}
}

View File

@ -17,7 +17,7 @@ import (
type sharedPullerState struct {
// Immutable, does not require locking
file protocol.FileInfo
repo string
folder string
tempName string
realName string
@ -113,7 +113,7 @@ func (s *sharedPullerState) earlyCloseLocked(context string, err error) {
return
}
l.Infof("Puller (repo %q, file %q): %s: %v", s.repo, s.file.Name, context, err)
l.Infof("Puller (folder %q, file %q): %s: %v", s.folder, s.file.Name, context, err)
s.err = err
if s.fd != nil {
s.fd.Close()
@ -133,7 +133,7 @@ func (s *sharedPullerState) copyDone() {
s.mut.Lock()
s.copyNeeded--
if debug {
l.Debugln("sharedPullerState", s.repo, s.file.Name, "copyNeeded ->", s.pullNeeded)
l.Debugln("sharedPullerState", s.folder, s.file.Name, "copyNeeded ->", s.pullNeeded)
}
s.mut.Unlock()
}
@ -142,7 +142,7 @@ func (s *sharedPullerState) pullDone() {
s.mut.Lock()
s.pullNeeded--
if debug {
l.Debugln("sharedPullerState", s.repo, s.file.Name, "pullNeeded ->", s.pullNeeded)
l.Debugln("sharedPullerState", s.folder, s.file.Name, "pullNeeded ->", s.pullNeeded)
}
s.mut.Unlock()
}

0
internal/model/testdata/.stignore vendored Normal file → Executable file
View File

View File

@ -11,7 +11,7 @@ import (
type TestModel struct {
data []byte
repo string
folder string
name string
offset int64
size int
@ -24,25 +24,25 @@ func newTestModel() *TestModel {
}
}
func (t *TestModel) Index(nodeID NodeID, repo string, files []FileInfo) {
func (t *TestModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
}
func (t *TestModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
func (t *TestModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
}
func (t *TestModel) Request(nodeID NodeID, repo, name string, offset int64, size int) ([]byte, error) {
t.repo = repo
func (t *TestModel) Request(deviceID DeviceID, folder, name string, offset int64, size int) ([]byte, error) {
t.folder = folder
t.name = name
t.offset = offset
t.size = size
return t.data, nil
}
func (t *TestModel) Close(nodeID NodeID, err error) {
func (t *TestModel) Close(deviceID DeviceID, err error) {
close(t.closedCh)
}
func (t *TestModel) ClusterConfig(nodeID NodeID, config ClusterConfigMessage) {
func (t *TestModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
}
func (t *TestModel) isClosed() bool {

View File

@ -16,36 +16,36 @@ import (
"github.com/syncthing/syncthing/internal/luhn"
)
type NodeID [32]byte
type DeviceID [32]byte
var LocalNodeID = NodeID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
var LocalDeviceID = DeviceID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
// NewNodeID generates a new node ID from the raw bytes of a certificate
func NewNodeID(rawCert []byte) NodeID {
var n NodeID
// NewDeviceID generates a new device ID from the raw bytes of a certificate
func NewDeviceID(rawCert []byte) DeviceID {
var n DeviceID
hf := sha256.New()
hf.Write(rawCert)
hf.Sum(n[:0])
return n
}
func NodeIDFromString(s string) (NodeID, error) {
var n NodeID
func DeviceIDFromString(s string) (DeviceID, error) {
var n DeviceID
err := n.UnmarshalText([]byte(s))
return n, err
}
func NodeIDFromBytes(bs []byte) NodeID {
var n NodeID
func DeviceIDFromBytes(bs []byte) DeviceID {
var n DeviceID
if len(bs) != len(n) {
panic("incorrect length of byte slice representing node ID")
panic("incorrect length of byte slice representing device ID")
}
copy(n[:], bs)
return n
}
// String returns the canonical string representation of the node ID
func (n NodeID) String() string {
// String returns the canonical string representation of the device ID
func (n DeviceID) String() string {
id := base32.StdEncoding.EncodeToString(n[:])
id = strings.Trim(id, "=")
id, err := luhnify(id)
@ -57,23 +57,23 @@ func (n NodeID) String() string {
return id
}
func (n NodeID) GoString() string {
func (n DeviceID) GoString() string {
return n.String()
}
func (n NodeID) Compare(other NodeID) int {
func (n DeviceID) Compare(other DeviceID) int {
return bytes.Compare(n[:], other[:])
}
func (n NodeID) Equals(other NodeID) bool {
func (n DeviceID) Equals(other DeviceID) bool {
return bytes.Compare(n[:], other[:]) == 0
}
func (n *NodeID) MarshalText() ([]byte, error) {
func (n *DeviceID) MarshalText() ([]byte, error) {
return []byte(n.String()), nil
}
func (n *NodeID) UnmarshalText(bs []byte) error {
func (n *DeviceID) UnmarshalText(bs []byte) error {
id := string(bs)
id = strings.Trim(id, "=")
id = strings.ToUpper(id)
@ -98,7 +98,7 @@ func (n *NodeID) UnmarshalText(bs []byte) error {
copy(n[:], dec)
return nil
default:
return errors.New("node ID invalid: incorrect length")
return errors.New("device ID invalid: incorrect length")
}
}

View File

@ -20,14 +20,14 @@ var formatCases = []string{
"p561017mzjnu2yiqgdreydm2mgtimgl3bxnpq6w5bmt88z4tjxzwicq2",
}
func TestFormatNodeID(t *testing.T) {
func TestFormatDeviceID(t *testing.T) {
for i, tc := range formatCases {
var id NodeID
var id DeviceID
err := id.UnmarshalText([]byte(tc))
if err != nil {
t.Errorf("#%d UnmarshalText(%q); %v", i, tc, err)
} else if f := id.String(); f != formatted {
t.Errorf("#%d FormatNodeID(%q)\n\t%q !=\n\t%q", i, tc, f, formatted)
t.Errorf("#%d FormatDeviceID(%q)\n\t%q !=\n\t%q", i, tc, f, formatted)
}
}
}
@ -46,20 +46,20 @@ var validateCases = []struct {
{"p56ioi7mzjnu2iqgdreydm2mgtmgl3bxnpq6w5btbbz4tjxzwicqCCCC", false},
}
func TestValidateNodeID(t *testing.T) {
func TestValidateDeviceID(t *testing.T) {
for _, tc := range validateCases {
var id NodeID
var id DeviceID
err := id.UnmarshalText([]byte(tc.s))
if (err == nil && !tc.ok) || (err != nil && tc.ok) {
t.Errorf("ValidateNodeID(%q); %v != %v", tc.s, err, tc.ok)
t.Errorf("ValidateDeviceID(%q); %v != %v", tc.s, err, tc.ok)
}
}
}
func TestMarshallingNodeID(t *testing.T) {
n0 := NodeID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
n1 := NodeID{}
n2 := NodeID{}
func TestMarshallingDeviceID(t *testing.T) {
n0 := DeviceID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
n1 := DeviceID{}
n2 := DeviceID{}
bs, _ := n0.MarshalText()
n1.UnmarshalText(bs)

View File

@ -7,7 +7,7 @@ package protocol
import "fmt"
type IndexMessage struct {
Repository string // max:64
Folder string // max:64
Files []FileInfo
}
@ -90,7 +90,7 @@ func (b BlockInfo) String() string {
}
type RequestMessage struct {
Repository string // max:64
Folder string // max:64
Name string // max:8192
Offset uint64
Size uint32
@ -103,7 +103,7 @@ type ResponseMessage struct {
type ClusterConfigMessage struct {
ClientName string // max:64
ClientVersion string // max:64
Repositories []Repository // max:64
Folders []Folder // max:64
Options []Option // max:64
}
@ -116,12 +116,12 @@ func (o *ClusterConfigMessage) GetOption(key string) string {
return ""
}
type Repository struct {
type Folder struct {
ID string // max:64
Nodes []Node // max:64
Devices []Device // max:64
}
type Node struct {
type Device struct {
ID []byte // max:32
Flags uint32
MaxLocalVersion uint64

View File

@ -18,10 +18,10 @@ IndexMessage Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Repository |
| Length of Folder |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Repository (variable length) \
\ Folder (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Files |
@ -33,7 +33,7 @@ IndexMessage Structure:
struct IndexMessage {
string Repository<64>;
string Folder<64>;
FileInfo Files<>;
}
@ -56,10 +56,10 @@ func (o IndexMessage) AppendXDR(bs []byte) []byte {
}
func (o IndexMessage) encodeXDR(xw *xdr.Writer) (int, error) {
if len(o.Repository) > 64 {
if len(o.Folder) > 64 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteString(o.Repository)
xw.WriteString(o.Folder)
xw.WriteUint32(uint32(len(o.Files)))
for i := range o.Files {
_, err := o.Files[i].encodeXDR(xw)
@ -82,7 +82,7 @@ func (o *IndexMessage) UnmarshalXDR(bs []byte) error {
}
func (o *IndexMessage) decodeXDR(xr *xdr.Reader) error {
o.Repository = xr.ReadStringMax(64)
o.Folder = xr.ReadStringMax(64)
_FilesSize := int(xr.ReadUint32())
o.Files = make([]FileInfo, _FilesSize)
for i := range o.Files {
@ -362,10 +362,10 @@ RequestMessage Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Repository |
| Length of Folder |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Repository (variable length) \
\ Folder (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Name |
@ -383,7 +383,7 @@ RequestMessage Structure:
struct RequestMessage {
string Repository<64>;
string Folder<64>;
string Name<8192>;
unsigned hyper Offset;
unsigned int Size;
@ -408,10 +408,10 @@ func (o RequestMessage) AppendXDR(bs []byte) []byte {
}
func (o RequestMessage) encodeXDR(xw *xdr.Writer) (int, error) {
if len(o.Repository) > 64 {
if len(o.Folder) > 64 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteString(o.Repository)
xw.WriteString(o.Folder)
if len(o.Name) > 8192 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
@ -433,7 +433,7 @@ func (o *RequestMessage) UnmarshalXDR(bs []byte) error {
}
func (o *RequestMessage) decodeXDR(xr *xdr.Reader) error {
o.Repository = xr.ReadStringMax(64)
o.Folder = xr.ReadStringMax(64)
o.Name = xr.ReadStringMax(8192)
o.Offset = xr.ReadUint64()
o.Size = xr.ReadUint32()
@ -517,10 +517,10 @@ ClusterConfigMessage Structure:
\ Client Version (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Repositories |
| Number of Folders |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Zero or more Repository Structures \
\ Zero or more Folder Structures \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Options |
@ -534,7 +534,7 @@ ClusterConfigMessage Structure:
struct ClusterConfigMessage {
string ClientName<64>;
string ClientVersion<64>;
Repository Repositories<64>;
Folder Folders<64>;
Option Options<64>;
}
@ -565,12 +565,12 @@ func (o ClusterConfigMessage) encodeXDR(xw *xdr.Writer) (int, error) {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteString(o.ClientVersion)
if len(o.Repositories) > 64 {
if len(o.Folders) > 64 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteUint32(uint32(len(o.Repositories)))
for i := range o.Repositories {
_, err := o.Repositories[i].encodeXDR(xw)
xw.WriteUint32(uint32(len(o.Folders)))
for i := range o.Folders {
_, err := o.Folders[i].encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@ -602,13 +602,13 @@ func (o *ClusterConfigMessage) UnmarshalXDR(bs []byte) error {
func (o *ClusterConfigMessage) decodeXDR(xr *xdr.Reader) error {
o.ClientName = xr.ReadStringMax(64)
o.ClientVersion = xr.ReadStringMax(64)
_RepositoriesSize := int(xr.ReadUint32())
if _RepositoriesSize > 64 {
_FoldersSize := int(xr.ReadUint32())
if _FoldersSize > 64 {
return xdr.ErrElementSizeExceeded
}
o.Repositories = make([]Repository, _RepositoriesSize)
for i := range o.Repositories {
(&o.Repositories[i]).decodeXDR(xr)
o.Folders = make([]Folder, _FoldersSize)
for i := range o.Folders {
(&o.Folders[i]).decodeXDR(xr)
}
_OptionsSize := int(xr.ReadUint32())
if _OptionsSize > 64 {
@ -623,7 +623,7 @@ func (o *ClusterConfigMessage) decodeXDR(xr *xdr.Reader) error {
/*
Repository Structure:
Folder Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@ -634,48 +634,48 @@ Repository Structure:
\ ID (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Nodes |
| Number of Devices |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Zero or more Node Structures \
\ Zero or more Device Structures \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct Repository {
struct Folder {
string ID<64>;
Node Nodes<64>;
Device Devices<64>;
}
*/
func (o Repository) EncodeXDR(w io.Writer) (int, error) {
func (o Folder) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}
func (o Repository) MarshalXDR() []byte {
func (o Folder) MarshalXDR() []byte {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o Repository) AppendXDR(bs []byte) []byte {
func (o Folder) AppendXDR(bs []byte) []byte {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
o.encodeXDR(xw)
return []byte(aw)
}
func (o Repository) encodeXDR(xw *xdr.Writer) (int, error) {
func (o Folder) encodeXDR(xw *xdr.Writer) (int, error) {
if len(o.ID) > 64 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteString(o.ID)
if len(o.Nodes) > 64 {
if len(o.Devices) > 64 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteUint32(uint32(len(o.Nodes)))
for i := range o.Nodes {
_, err := o.Nodes[i].encodeXDR(xw)
xw.WriteUint32(uint32(len(o.Devices)))
for i := range o.Devices {
_, err := o.Devices[i].encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@ -683,33 +683,33 @@ func (o Repository) encodeXDR(xw *xdr.Writer) (int, error) {
return xw.Tot(), xw.Error()
}
func (o *Repository) DecodeXDR(r io.Reader) error {
func (o *Folder) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}
func (o *Repository) UnmarshalXDR(bs []byte) error {
func (o *Folder) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.decodeXDR(xr)
}
func (o *Repository) decodeXDR(xr *xdr.Reader) error {
func (o *Folder) decodeXDR(xr *xdr.Reader) error {
o.ID = xr.ReadStringMax(64)
_NodesSize := int(xr.ReadUint32())
if _NodesSize > 64 {
_DevicesSize := int(xr.ReadUint32())
if _DevicesSize > 64 {
return xdr.ErrElementSizeExceeded
}
o.Nodes = make([]Node, _NodesSize)
for i := range o.Nodes {
(&o.Nodes[i]).decodeXDR(xr)
o.Devices = make([]Device, _DevicesSize)
for i := range o.Devices {
(&o.Devices[i]).decodeXDR(xr)
}
return xr.Error()
}
/*
Node Structure:
Device Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@ -728,7 +728,7 @@ Node Structure:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct Node {
struct Device {
opaque ID<32>;
unsigned int Flags;
unsigned hyper MaxLocalVersion;
@ -736,23 +736,23 @@ struct Node {
*/
func (o Node) EncodeXDR(w io.Writer) (int, error) {
func (o Device) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}
func (o Node) MarshalXDR() []byte {
func (o Device) MarshalXDR() []byte {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o Node) AppendXDR(bs []byte) []byte {
func (o Device) AppendXDR(bs []byte) []byte {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
o.encodeXDR(xw)
return []byte(aw)
}
func (o Node) encodeXDR(xw *xdr.Writer) (int, error) {
func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
if len(o.ID) > 32 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
@ -762,18 +762,18 @@ func (o Node) encodeXDR(xw *xdr.Writer) (int, error) {
return xw.Tot(), xw.Error()
}
func (o *Node) DecodeXDR(r io.Reader) error {
func (o *Device) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}
func (o *Node) UnmarshalXDR(bs []byte) error {
func (o *Device) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.decodeXDR(xr)
}
func (o *Node) decodeXDR(xr *xdr.Reader) error {
func (o *Device) decodeXDR(xr *xdr.Reader) error {
o.ID = xr.ReadBytesMax(32)
o.Flags = xr.ReadUint32()
o.MaxLocalVersion = xr.ReadUint64()

View File

@ -14,29 +14,29 @@ type nativeModel struct {
next Model
}
func (m nativeModel) Index(nodeID NodeID, repo string, files []FileInfo) {
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
for i := range files {
files[i].Name = norm.NFD.String(files[i].Name)
}
m.next.Index(nodeID, repo, files)
m.next.Index(deviceID, folder, files)
}
func (m nativeModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
for i := range files {
files[i].Name = norm.NFD.String(files[i].Name)
}
m.next.IndexUpdate(nodeID, repo, files)
m.next.IndexUpdate(deviceID, folder, files)
}
func (m nativeModel) Request(nodeID NodeID, repo string, name string, offset int64, size int) ([]byte, error) {
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
name = norm.NFD.String(name)
return m.next.Request(nodeID, repo, name, offset, size)
return m.next.Request(deviceID, folder, name, offset, size)
}
func (m nativeModel) ClusterConfig(nodeID NodeID, config ClusterConfigMessage) {
m.next.ClusterConfig(nodeID, config)
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
m.next.ClusterConfig(deviceID, config)
}
func (m nativeModel) Close(nodeID NodeID, err error) {
m.next.Close(nodeID, err)
func (m nativeModel) Close(deviceID DeviceID, err error) {
m.next.Close(deviceID, err)
}

View File

@ -12,22 +12,22 @@ type nativeModel struct {
next Model
}
func (m nativeModel) Index(nodeID NodeID, repo string, files []FileInfo) {
m.next.Index(nodeID, repo, files)
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
m.next.Index(deviceID, folder, files)
}
func (m nativeModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
m.next.IndexUpdate(nodeID, repo, files)
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
m.next.IndexUpdate(deviceID, folder, files)
}
func (m nativeModel) Request(nodeID NodeID, repo string, name string, offset int64, size int) ([]byte, error) {
return m.next.Request(nodeID, repo, name, offset, size)
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
return m.next.Request(deviceID, folder, name, offset, size)
}
func (m nativeModel) ClusterConfig(nodeID NodeID, config ClusterConfigMessage) {
m.next.ClusterConfig(nodeID, config)
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
m.next.ClusterConfig(deviceID, config)
}
func (m nativeModel) Close(nodeID NodeID, err error) {
m.next.Close(nodeID, err)
func (m nativeModel) Close(deviceID DeviceID, err error) {
m.next.Close(deviceID, err)
}

View File

@ -26,7 +26,7 @@ type nativeModel struct {
next Model
}
func (m nativeModel) Index(nodeID NodeID, repo string, files []FileInfo) {
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
for i, f := range files {
if strings.ContainsAny(f.Name, disallowedCharacters) {
if f.IsDeleted() {
@ -39,10 +39,10 @@ func (m nativeModel) Index(nodeID NodeID, repo string, files []FileInfo) {
}
files[i].Name = filepath.FromSlash(f.Name)
}
m.next.Index(nodeID, repo, files)
m.next.Index(deviceID, folder, files)
}
func (m nativeModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
for i, f := range files {
if strings.ContainsAny(f.Name, disallowedCharacters) {
if f.IsDeleted() {
@ -55,18 +55,18 @@ func (m nativeModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
}
files[i].Name = filepath.FromSlash(files[i].Name)
}
m.next.IndexUpdate(nodeID, repo, files)
m.next.IndexUpdate(deviceID, folder, files)
}
func (m nativeModel) Request(nodeID NodeID, repo string, name string, offset int64, size int) ([]byte, error) {
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
name = filepath.FromSlash(name)
return m.next.Request(nodeID, repo, name, offset, size)
return m.next.Request(deviceID, folder, name, offset, size)
}
func (m nativeModel) ClusterConfig(nodeID NodeID, config ClusterConfigMessage) {
m.next.ClusterConfig(nodeID, config)
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
m.next.ClusterConfig(deviceID, config)
}
func (m nativeModel) Close(nodeID NodeID, err error) {
m.next.Close(nodeID, err)
func (m nativeModel) Close(deviceID DeviceID, err error) {
m.next.Close(deviceID, err)
}

View File

@ -57,30 +57,30 @@ var (
)
type Model interface {
// An index was received from the peer node
Index(nodeID NodeID, repo string, files []FileInfo)
// An index update was received from the peer node
IndexUpdate(nodeID NodeID, repo string, files []FileInfo)
// A request was made by the peer node
Request(nodeID NodeID, repo string, name string, offset int64, size int) ([]byte, error)
// An index was received from the peer device
Index(deviceID DeviceID, folder string, files []FileInfo)
// An index update was received from the peer device
IndexUpdate(deviceID DeviceID, folder string, files []FileInfo)
// A request was made by the peer device
Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error)
// A cluster configuration message was received
ClusterConfig(nodeID NodeID, config ClusterConfigMessage)
// The peer node closed the connection
Close(nodeID NodeID, err error)
ClusterConfig(deviceID DeviceID, config ClusterConfigMessage)
// The peer device closed the connection
Close(deviceID DeviceID, err error)
}
type Connection interface {
ID() NodeID
ID() DeviceID
Name() string
Index(repo string, files []FileInfo) error
IndexUpdate(repo string, files []FileInfo) error
Request(repo string, name string, offset int64, size int) ([]byte, error)
Index(folder string, files []FileInfo) error
IndexUpdate(folder string, files []FileInfo) error
Request(folder string, name string, offset int64, size int) ([]byte, error)
ClusterConfig(config ClusterConfigMessage)
Statistics() Statistics
}
type rawConnection struct {
id NodeID
id DeviceID
name string
receiver Model
state int
@ -123,7 +123,7 @@ const (
pingIdleTime = 60 * time.Second
)
func NewConnection(nodeID NodeID, reader io.Reader, writer io.Writer, receiver Model, name string, compress bool) Connection {
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiver Model, name string, compress bool) Connection {
cr := &countingReader{Reader: reader}
cw := &countingWriter{Writer: writer}
@ -132,7 +132,7 @@ func NewConnection(nodeID NodeID, reader io.Reader, writer io.Writer, receiver M
compThres = 128 // compress messages that are 128 bytes long or larger
}
c := rawConnection{
id: nodeID,
id: deviceID,
name: name,
receiver: nativeModel{receiver},
state: stateInitial,
@ -152,7 +152,7 @@ func NewConnection(nodeID NodeID, reader io.Reader, writer io.Writer, receiver M
return wireFormatConnection{&c}
}
func (c *rawConnection) ID() NodeID {
func (c *rawConnection) ID() DeviceID {
return c.id
}
@ -160,34 +160,34 @@ func (c *rawConnection) Name() string {
return c.name
}
// Index writes the list of file information to the connected peer node
func (c *rawConnection) Index(repo string, idx []FileInfo) error {
// Index writes the list of file information to the connected peer device
func (c *rawConnection) Index(folder string, idx []FileInfo) error {
select {
case <-c.closed:
return ErrClosed
default:
}
c.idxMut.Lock()
c.send(-1, messageTypeIndex, IndexMessage{repo, idx})
c.send(-1, messageTypeIndex, IndexMessage{folder, idx})
c.idxMut.Unlock()
return nil
}
// IndexUpdate writes the list of file information to the connected peer node as an update
func (c *rawConnection) IndexUpdate(repo string, idx []FileInfo) error {
// IndexUpdate writes the list of file information to the connected peer device as an update
func (c *rawConnection) IndexUpdate(folder string, idx []FileInfo) error {
select {
case <-c.closed:
return ErrClosed
default:
}
c.idxMut.Lock()
c.send(-1, messageTypeIndexUpdate, IndexMessage{repo, idx})
c.send(-1, messageTypeIndexUpdate, IndexMessage{folder, idx})
c.idxMut.Unlock()
return nil
}
// Request returns the bytes for the specified block after fetching them from the connected peer.
func (c *rawConnection) Request(repo string, name string, offset int64, size int) ([]byte, error) {
func (c *rawConnection) Request(folder string, name string, offset int64, size int) ([]byte, error) {
var id int
select {
case id = <-c.nextID:
@ -203,7 +203,7 @@ func (c *rawConnection) Request(repo string, name string, offset int64, size int
c.awaiting[id] = rc
c.awaitingMut.Unlock()
ok := c.send(id, messageTypeRequest, RequestMessage{repo, name, uint64(offset), uint32(size)})
ok := c.send(id, messageTypeRequest, RequestMessage{folder, name, uint64(offset), uint32(size)})
if !ok {
return nil, ErrClosed
}
@ -399,20 +399,20 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
func (c *rawConnection) handleIndex(im IndexMessage) {
if debug {
l.Debugf("Index(%v, %v, %d files)", c.id, im.Repository, len(im.Files))
l.Debugf("Index(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
}
c.receiver.Index(c.id, im.Repository, im.Files)
c.receiver.Index(c.id, im.Folder, im.Files)
}
func (c *rawConnection) handleIndexUpdate(im IndexMessage) {
if debug {
l.Debugf("queueing IndexUpdate(%v, %v, %d files)", c.id, im.Repository, len(im.Files))
l.Debugf("queueing IndexUpdate(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
}
c.receiver.IndexUpdate(c.id, im.Repository, im.Files)
c.receiver.IndexUpdate(c.id, im.Folder, im.Files)
}
func (c *rawConnection) handleRequest(msgID int, req RequestMessage) {
data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))
data, _ := c.receiver.Request(c.id, req.Folder, req.Name, int64(req.Offset), int(req.Size))
c.send(msgID, messageTypeResponse, ResponseMessage{data})
}

View File

@ -21,8 +21,8 @@ import (
)
var (
c0ID = NewNodeID([]byte{1})
c1ID = NewNodeID([]byte{2})
c0ID = NewDeviceID([]byte{1})
c1ID = NewDeviceID([]byte{2})
)
func TestHeaderFunctions(t *testing.T) {
@ -140,8 +140,8 @@ func TestPingErr(t *testing.T) {
// if string(d) != "response data" {
// t.Fatalf("Incorrect response data %q", string(d))
// }
// if m0.repo != "default" {
// t.Fatalf("Incorrect repo %q", m0.repo)
// if m0.folder != "default" {
// t.Fatalf("Incorrect folder %q", m0.folder)
// }
// if m0.name != "tn" {
// t.Fatalf("Incorrect name %q", m0.name)
@ -240,13 +240,13 @@ func TestClose(t *testing.T) {
func TestElementSizeExceededNested(t *testing.T) {
m := ClusterConfigMessage{
Repositories: []Repository{
Folders: []Folder{
{ID: "longstringlongstringlongstringinglongstringlongstringlonlongstringlongstringlon"},
},
}
_, err := m.EncodeXDR(ioutil.Discard)
if err == nil {
t.Errorf("ID length %d > max 64, but no error", len(m.Repositories[0].ID))
t.Errorf("ID length %d > max 64, but no error", len(m.Folders[0].ID))
}
}

View File

@ -14,7 +14,7 @@ type wireFormatConnection struct {
next Connection
}
func (c wireFormatConnection) ID() NodeID {
func (c wireFormatConnection) ID() DeviceID {
return c.next.ID()
}
@ -22,7 +22,7 @@ func (c wireFormatConnection) Name() string {
return c.next.Name()
}
func (c wireFormatConnection) Index(repo string, fs []FileInfo) error {
func (c wireFormatConnection) Index(folder string, fs []FileInfo) error {
var myFs = make([]FileInfo, len(fs))
copy(myFs, fs)
@ -30,10 +30,10 @@ func (c wireFormatConnection) Index(repo string, fs []FileInfo) error {
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
}
return c.next.Index(repo, myFs)
return c.next.Index(folder, myFs)
}
func (c wireFormatConnection) IndexUpdate(repo string, fs []FileInfo) error {
func (c wireFormatConnection) IndexUpdate(folder string, fs []FileInfo) error {
var myFs = make([]FileInfo, len(fs))
copy(myFs, fs)
@ -41,12 +41,12 @@ func (c wireFormatConnection) IndexUpdate(repo string, fs []FileInfo) error {
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
}
return c.next.IndexUpdate(repo, myFs)
return c.next.IndexUpdate(folder, myFs)
}
func (c wireFormatConnection) Request(repo, name string, offset int64, size int) ([]byte, error) {
func (c wireFormatConnection) Request(folder, name string, offset int64, size int) ([]byte, error) {
name = norm.NFC.String(filepath.ToSlash(name))
return c.next.Request(repo, name, offset, size)
return c.next.Request(folder, name, offset, size)
}
func (c wireFormatConnection) ClusterConfig(config ClusterConfigMessage) {

View File

@ -48,7 +48,7 @@ type CurrentFiler interface {
CurrentFile(name string) protocol.FileInfo
}
// Walk returns the list of files found in the local repository by scanning the
// Walk returns the list of files found in the local folder by scanning the
// file system. Files are blockwise hashed.
func (w *Walker) Walk() (chan protocol.FileInfo, error) {
if debug {

102
internal/stats/device.go Executable file
View File

@ -0,0 +1,102 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package stats
import (
"time"
"github.com/syncthing/syncthing/internal/protocol"
"github.com/syndtr/goleveldb/leveldb"
)
const (
deviceStatisticTypeLastSeen = iota
)
var deviceStatisticsTypes = []byte{
deviceStatisticTypeLastSeen,
}
type DeviceStatistics struct {
LastSeen time.Time
}
type DeviceStatisticsReference struct {
db *leveldb.DB
device protocol.DeviceID
}
func NewDeviceStatisticsReference(db *leveldb.DB, device protocol.DeviceID) *DeviceStatisticsReference {
return &DeviceStatisticsReference{
db: db,
device: device,
}
}
func (s *DeviceStatisticsReference) key(stat byte) []byte {
k := make([]byte, 1+1+32)
k[0] = keyTypeDeviceStatistic
k[1] = stat
copy(k[1+1:], s.device[:])
return k
}
func (s *DeviceStatisticsReference) GetLastSeen() time.Time {
value, err := s.db.Get(s.key(deviceStatisticTypeLastSeen), nil)
if err != nil {
if err != leveldb.ErrNotFound {
l.Warnln("DeviceStatisticsReference: Failed loading last seen value for", s.device, ":", err)
}
return time.Unix(0, 0)
}
rtime := time.Time{}
err = rtime.UnmarshalBinary(value)
if err != nil {
l.Warnln("DeviceStatisticsReference: Failed parsing last seen value for", s.device, ":", err)
return time.Unix(0, 0)
}
if debug {
l.Debugln("stats.DeviceStatisticsReference.GetLastSeen:", s.device, rtime)
}
return rtime
}
func (s *DeviceStatisticsReference) WasSeen() {
if debug {
l.Debugln("stats.DeviceStatisticsReference.WasSeen:", s.device)
}
value, err := time.Now().MarshalBinary()
if err != nil {
l.Warnln("DeviceStatisticsReference: Failed serializing last seen value for", s.device, ":", err)
return
}
err = s.db.Put(s.key(deviceStatisticTypeLastSeen), value, nil)
if err != nil {
l.Warnln("Failed serializing last seen value for", s.device, ":", err)
}
}
// Never called, maybe because it's worth while to keep the data
// or maybe because we have no easy way of knowing that a device has been removed.
func (s *DeviceStatisticsReference) Delete() error {
for _, stype := range deviceStatisticsTypes {
err := s.db.Delete(s.key(stype), nil)
if debug && err == nil {
l.Debugln("stats.DeviceStatisticsReference.Delete:", s.device, stype)
}
if err != nil && err != leveldb.ErrNotFound {
return err
}
}
return nil
}
func (s *DeviceStatisticsReference) GetStatistics() DeviceStatistics {
return DeviceStatistics{
LastSeen: s.GetLastSeen(),
}
}

View File

@ -6,5 +6,5 @@ package stats
// Same key space as files/leveldb.go keyType* constants
const (
keyTypeNodeStatistic = iota + 30
keyTypeDeviceStatistic = iota + 30
)

View File

@ -1,102 +0,0 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package stats
import (
"time"
"github.com/syncthing/syncthing/internal/protocol"
"github.com/syndtr/goleveldb/leveldb"
)
const (
nodeStatisticTypeLastSeen = iota
)
var nodeStatisticsTypes = []byte{
nodeStatisticTypeLastSeen,
}
type NodeStatistics struct {
LastSeen time.Time
}
type NodeStatisticsReference struct {
db *leveldb.DB
node protocol.NodeID
}
func NewNodeStatisticsReference(db *leveldb.DB, node protocol.NodeID) *NodeStatisticsReference {
return &NodeStatisticsReference{
db: db,
node: node,
}
}
func (s *NodeStatisticsReference) key(stat byte) []byte {
k := make([]byte, 1+1+32)
k[0] = keyTypeNodeStatistic
k[1] = stat
copy(k[1+1:], s.node[:])
return k
}
func (s *NodeStatisticsReference) GetLastSeen() time.Time {
value, err := s.db.Get(s.key(nodeStatisticTypeLastSeen), nil)
if err != nil {
if err != leveldb.ErrNotFound {
l.Warnln("NodeStatisticsReference: Failed loading last seen value for", s.node, ":", err)
}
return time.Unix(0, 0)
}
rtime := time.Time{}
err = rtime.UnmarshalBinary(value)
if err != nil {
l.Warnln("NodeStatisticsReference: Failed parsing last seen value for", s.node, ":", err)
return time.Unix(0, 0)
}
if debug {
l.Debugln("stats.NodeStatisticsReference.GetLastSeen:", s.node, rtime)
}
return rtime
}
func (s *NodeStatisticsReference) WasSeen() {
if debug {
l.Debugln("stats.NodeStatisticsReference.WasSeen:", s.node)
}
value, err := time.Now().MarshalBinary()
if err != nil {
l.Warnln("NodeStatisticsReference: Failed serializing last seen value for", s.node, ":", err)
return
}
err = s.db.Put(s.key(nodeStatisticTypeLastSeen), value, nil)
if err != nil {
l.Warnln("Failed serializing last seen value for", s.node, ":", err)
}
}
// Never called, maybe because it's worth while to keep the data
// or maybe because we have no easy way of knowing that a node has been removed.
func (s *NodeStatisticsReference) Delete() error {
for _, stype := range nodeStatisticsTypes {
err := s.db.Delete(s.key(stype), nil)
if debug && err == nil {
l.Debugln("stats.NodeStatisticsReference.Delete:", s.node, stype)
}
if err != nil && err != leveldb.ErrNotFound {
return err
}
}
return nil
}
func (s *NodeStatisticsReference) GetStatistics() NodeStatistics {
return NodeStatistics{
LastSeen: s.GetLastSeen(),
}
}

View File

@ -21,11 +21,11 @@ func init() {
// The type holds our configuration
type Simple struct {
keep int
repoPath string
folderPath string
}
// The constructor function takes a map of parameters and creates the type.
func NewSimple(repoID, repoPath string, params map[string]string) Versioner {
func NewSimple(folderID, folderPath string, params map[string]string) Versioner {
keep, err := strconv.Atoi(params["keep"])
if err != nil {
keep = 5 // A reasonable default
@ -33,7 +33,7 @@ func NewSimple(repoID, repoPath string, params map[string]string) Versioner {
s := Simple{
keep: keep,
repoPath: repoPath,
folderPath: folderPath,
}
if debug {
@ -57,7 +57,7 @@ func (v Simple) Archive(filePath string) error {
}
}
versionsDir := filepath.Join(v.repoPath, ".stversions")
versionsDir := filepath.Join(v.folderPath, ".stversions")
_, err = os.Stat(versionsDir)
if err != nil {
if os.IsNotExist(err) {
@ -76,12 +76,12 @@ func (v Simple) Archive(filePath string) error {
}
file := filepath.Base(filePath)
inRepoPath, err := filepath.Rel(v.repoPath, filepath.Dir(filePath))
inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath))
if err != nil {
return err
}
dir := filepath.Join(versionsDir, inRepoPath)
dir := filepath.Join(versionsDir, inFolderPath)
err = os.MkdirAll(dir, 0755)
if err != nil && !os.IsExist(err) {
return err

View File

@ -30,7 +30,7 @@ type Interval struct {
type Staggered struct {
versionsPath string
cleanInterval int64
repoPath string
folderPath string
interval [4]Interval
mutex *sync.Mutex
}
@ -83,7 +83,7 @@ func (v Staggered) renameOld() {
}
// The constructor function takes a map of parameters and creates the type.
func NewStaggered(repoID, repoPath string, params map[string]string) Versioner {
func NewStaggered(folderID, folderPath string, params map[string]string) Versioner {
maxAge, err := strconv.ParseInt(params["maxAge"], 10, 0)
if err != nil {
maxAge = 31536000 // Default: ~1 year
@ -93,13 +93,13 @@ func NewStaggered(repoID, repoPath string, params map[string]string) Versioner {
cleanInterval = 3600 // Default: clean once per hour
}
// Use custom path if set, otherwise .stversions in repoPath
// Use custom path if set, otherwise .stversions in folderPath
var versionsDir string
if params["versionsPath"] == "" {
if debug {
l.Debugln("using default dir .stversions")
}
versionsDir = filepath.Join(repoPath, ".stversions")
versionsDir = filepath.Join(folderPath, ".stversions")
} else {
if debug {
l.Debugln("using dir", params["versionsPath"])
@ -111,7 +111,7 @@ func NewStaggered(repoID, repoPath string, params map[string]string) Versioner {
s := Staggered{
versionsPath: versionsDir,
cleanInterval: cleanInterval,
repoPath: repoPath,
folderPath: folderPath,
interval: [4]Interval{
Interval{30, 3600}, // first hour -> 30 sec between versions
Interval{3600, 86400}, // next day -> 1 h between versions
@ -320,12 +320,12 @@ func (v Staggered) Archive(filePath string) error {
}
file := filepath.Base(filePath)
inRepoPath, err := filepath.Rel(v.repoPath, filepath.Dir(filePath))
inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath))
if err != nil {
return err
}
dir := filepath.Join(v.versionsPath, inRepoPath)
dir := filepath.Join(v.versionsPath, inFolderPath)
err = os.MkdirAll(dir, 0755)
if err != nil && !os.IsExist(err) {
return err

View File

@ -10,4 +10,4 @@ type Versioner interface {
Archive(filePath string) error
}
var Factories = map[string]func(repoID string, repoDir string, params map[string]string) Versioner{}
var Factories = map[string]func(folderID string, folderDir string, params map[string]string) Versioner{}

View File

@ -1,4 +1,4 @@
Node Discovery Protocol v2
Device Discovery Protocol v2
==========================
Mode of Operation
@ -9,19 +9,19 @@ segment (broadcast domain) and "global discovery" performed over the
Internet in general with the support of a well known server.
Local discovery does not use Query packets. Instead Announcement packets
are sent periodically and each participating node keeps a table of the
are sent periodically and each participating device keeps a table of the
announcements it has seen. On multihomed hosts the announcement packets
should be sent on each interface that syncthing will accept connections.
It is recommended that local discovery Announcement packets are sent on
a 30 to 60 second interval, possibly with forced transmissions when a
previously unknown node is discovered.
previously unknown device is discovered.
Global discovery is made possible by periodically updating a global server
using Announcement packets indentical to those transmitted for local
discovery. The node performing discovery will transmit a Query packet to
discovery. The device performing discovery will transmit a Query packet to
the global server and expect an Announcement packet in response. In case
the global server has no knowledge of the queried node ID, there will be
the global server has no knowledge of the queried device ID, there will be
no response. A timeout is to be used to determine lookup failure.
There is no message to unregister from the global server; instead
@ -39,17 +39,17 @@ The Announcement packet has the following structure:
| Magic (0x9D79BC39) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Node Structure \
\ Device Structure \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Extra Nodes |
| Number of Extra Devices |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Zero or more Node Structures \
\ Zero or more Device Structures \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Node Structure:
Device Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@ -85,11 +85,11 @@ This is the XDR encoding of:
struct Announcement {
unsigned int Magic;
Node This;
Node Extra<>;
Device This;
Device Extra<>;
}
struct Node {
struct Device {
string ID<>;
Address Addresses<>;
}
@ -99,16 +99,16 @@ This is the XDR encoding of:
unsigned short Port;
}
The first Node structure contains information about the sending node.
The following zero or more Extra nodes contain information about other
nodes known to the sending node.
The first Device structure contains information about the sending device.
The following zero or more Extra devices contain information about other
devices known to the sending device.
In the Address structure, the IP field can be of three differnt kinds;
- A zero length indicates that the IP address should be taken from the
source address of the announcement packet, be it IPv4 or IPv6. The
source address must be a valid unicast address. This is only valid
in the first node structure, not in the list of extras.
in the first device structure, not in the list of extras.
- A four byte length indicates that the address is an IPv4 unicast
address.
@ -123,10 +123,10 @@ The Query packet has the following structure:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Magic Number (0x2CA856F5) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Node ID |
| Length of Device ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Node ID (variable length) \
\ Device ID (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@ -134,5 +134,5 @@ This is the XDR encoding of:
struct Announcement {
unsigned int MagicNumber;
string NodeID<>;
string DeviceID<>;
}

View File

@ -4,14 +4,14 @@ Block Exchange Protocol v1
Introduction and Definitions
----------------------------
BEP is used between two or more _nodes_ thus forming a _cluster_. Each
node has one or more _repositories_ of files described by the _local
BEP is used between two or more _devices_ thus forming a _cluster_. Each
device has one or more _folders_ of files described by the _local
model_, containing metadata and block hashes. The local model is sent to
the other nodes in the cluster. The union of all files in the local
the other devices in the cluster. The union of all files in the local
models, with files selected for highest change version, forms the
_global model_. Each node strives to get its repositories in sync with
_global model_. Each device strives to get its folders in sync with
the global model by requesting missing or outdated blocks from the other
nodes in the cluster.
devices in the cluster.
File data is described and transferred in units of _blocks_, each being
128 KiB (131072 bytes) in size.
@ -50,7 +50,7 @@ connection. Possibilities include certificates signed by a common
trusted CA, preshared certificates, preshared certificate fingerprints
or certificate pinning combined with some out of band first
verification. The reference implementation uses preshared certificate
fingerprints (SHA-256) referred to as "Node IDs".
fingerprints (SHA-256) referred to as "Device IDs".
There is no required order or synchronization among BEP messages except
as noted per message type - any message type may be sent at any time and
@ -158,10 +158,10 @@ Cluster Config messages MUST NOT be sent after the initial exchange.
\ ClientVersion (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Repositories |
| Number of Folders |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Zero or more Repository Structures \
\ Zero or more Folder Structures \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Options |
@ -172,7 +172,7 @@ Cluster Config messages MUST NOT be sent after the initial exchange.
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Repository Structure:
Folder Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@ -183,15 +183,15 @@ Cluster Config messages MUST NOT be sent after the initial exchange.
\ ID (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Nodes |
| Number of Devices |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Zero or more Node Structures \
\ Zero or more Device Structures \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Node Structure:
Device Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@ -238,13 +238,13 @@ ClientVersion is "v0.7.2". The ClientVersion field SHOULD follow the
patterns laid out in the [Semantic Versioning](http://semver.org/)
standard.
The Repositories field lists all repositories that will be synchronized
over the current connection. Each repository has a list of participating
Nodes. Each node has an associated Flags field to indicate the sharing
mode of that node for the repository in question. See the discussion on
The Folders field lists all folders that will be synchronized
over the current connection. Each folder has a list of participating
Devices. Each device has an associated Flags field to indicate the sharing
mode of that device for the folder in question. See the discussion on
Sharing Modes.
The Node Flags field contains the following single bit flags:
The Device Flags field contains the following single bit flags:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@ -252,40 +252,40 @@ The Node Flags field contains the following single bit flags:
| Reserved |Pri| Reserved |I|R|T|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- Bit 31 ("T", Trusted) is set for nodes that participate in trusted
- Bit 31 ("T", Trusted) is set for devices that participate in trusted
mode.
- Bit 30 ("R", Read Only) is set for nodes that participate in read
- Bit 30 ("R", Read Only) is set for devices that participate in read
only mode.
- Bit 29 ("I", Introducer) is set for nodes that are trusted as cluster
- Bit 29 ("I", Introducer) is set for devices that are trusted as cluster
introducers.
- Bits 16 through 28 are reserved and MUST be set to zero.
- Bits 14-15 ("Pri) indicate the node's upload priority for this
repository. Possible values are:
- Bits 14-15 ("Pri) indicate the device's upload priority for this
folder. Possible values are:
- 00: The default. Normal priority.
- 01: High priority. Other nodes SHOULD favour requesting files from
this node over nodes with normal or low priority.
- 01: High priority. Other devices SHOULD favour requesting files from
this device over devices with normal or low priority.
- 10: Low priority. Other nodes SHOULD avoid requesting files from
this node when they are available from other nodes.
- 10: Low priority. Other devices SHOULD avoid requesting files from
this device when they are available from other devices.
- 11: Sharing disabled. Other nodes SHOULD NOT request files from
this node.
- 11: Sharing disabled. Other devices SHOULD NOT request files from
this device.
- Bits 0 through 14 are reserved and MUST be set to zero.
Exactly one of the T and R bits MUST be set.
The per node Max Local Version field contains the highest local file
The per device Max Local Version field contains the highest local file
version number of the files already known to be in the index sent by
this node. If nothing is known about the index of a given node, this
this device. If nothing is known about the index of a given device, this
field MUST be set to zero. When receiving a Cluster Config message with
a non-zero Max Local Version for the local node ID, a node MAY elect to
a non-zero Max Local Version for the local device ID, a device MAY elect to
send an Index Update message containing only files with higher local
version numbers in place of the initial Index message.
@ -295,10 +295,10 @@ items, although it is transmitted in the form of a list of (Key, Value)
pairs, both of string type. Key ID:s are implementation specific. An
implementation MUST ignore unknown keys. An implementation MAY impose
limits on the length keys and values. The options list may be used to
inform nodes of relevant local configuration options such as rate
limiting or make recommendations about request parallelism, node
priorities, etc. An empty options list is valid for nodes not having any
such information to share. Nodes MAY NOT make any assumptions about
inform devices of relevant local configuration options such as rate
limiting or make recommendations about request parallelism, device
priorities, etc. An empty options list is valid for devices not having any
such information to share. Devices MAY NOT make any assumptions about
peers acting in a specific manner as a result of sent options.
#### XDR
@ -306,16 +306,16 @@ peers acting in a specific manner as a result of sent options.
struct ClusterConfigMessage {
string ClientName<>;
string ClientVersion<>;
Repository Repositories<>;
Folder Folders<>;
Option Options<>;
}
struct Repository {
struct Folder {
string ID<>;
Node Nodes<>;
Device Devices<>;
}
struct Node {
struct Device {
string ID<>;
unsigned int Flags;
unsigned hyper MaxLocalVersion;
@ -329,18 +329,18 @@ peers acting in a specific manner as a result of sent options.
### Index (Type = 1) and Index Update (Type = 6)
The Index and Index Update messages define the contents of the senders
repository. An Index message represents the full contents of the
repository and thus supersedes any previous index. An Index Update
folder. An Index message represents the full contents of the
folder and thus supersedes any previous index. An Index Update
amends an existing index with new information, not affecting any entries
not included in the message. An Index Update MAY NOT be sent unless
preceded by an Index, unless a non-zero Max Local Version has been
announced for the given repository by the peer node.
announced for the given folder by the peer device.
An Index or Index Update message MUST be sent for each repository
An Index or Index Update message MUST be sent for each folder
included in the Cluster Config message, and MUST be sent before any
other message referring to that repository. A node with no data to
other message referring to that folder. A device with no data to
advertise MUST send an empty Index message (a file list of zero length).
If the repository contents change from non-empty to empty, an empty
If the folder contents change from non-empty to empty, an empty
Index message MUST be sent. There is no response to the Index message.
#### Graphical Representation
@ -350,10 +350,10 @@ Index message MUST be sent. There is no response to the Index message.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Repository |
| Length of Folder |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Repository (variable length) \
\ Folder (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Files |
@ -413,24 +413,24 @@ Index message MUST be sent. There is no response to the Index message.
#### Fields
The Repository field identifies the repository that the index message
pertains to. For single repository implementations the node MAY send an
empty repository ID or use the string "default".
The Folder field identifies the folder that the index message
pertains to. For single folder implementations the device MAY send an
empty folder ID or use the string "default".
The Name is the file name path relative to the repository root. Like all
The Name is the file name path relative to the folder root. Like all
strings in BEP, the Name is always in UTF-8 NFC regardless of operating
system or file system specific conventions. The Name field uses the
slash character ("/") as path separator, regardless of the
implementation's operating system conventions. The combination of
Repository and Name uniquely identifies each file in a cluster.
Folder and Name uniquely identifies each file in a cluster.
The Version field is the value of a cluster wide Lamport clock
indicating when the change was detected. The clock ticks on every
detected and received change. The combination of Repository, Name and
detected and received change. The combination of Folder, Name and
Version uniquely identifies the contents of a file at a given point in
time.
The Local Version field is the value of a node local monotonic clock at
The Local Version field is the value of a device local monotonic clock at
the time of last local database update to a file. The clock ticks on
every local database update.
@ -471,7 +471,7 @@ The Modified time is expressed as the number of seconds since the Unix
Epoch (1970-01-01 00:00:00 UTC).
In the rare occasion that a file is simultaneously and independently
modified by two nodes in the same cluster and thus end up on the same
modified by two devices in the same cluster and thus end up on the same
Version number after modification, the Modified field is used as a tie
breaker (higher being better), followed by the hash values of the file
blocks (lower being better).
@ -483,7 +483,7 @@ block which may represent a smaller amount of data.
#### XDR
struct IndexMessage {
string Repository<>;
string Folder<>;
FileInfo Files<>;
}
@ -504,7 +504,7 @@ block which may represent a smaller amount of data.
### Request (Type = 2)
The Request message expresses the desire to receive a data block
corresponding to a part of a certain file in the peer's repository.
corresponding to a part of a certain file in the peer's folder.
#### Graphical Representation
@ -513,10 +513,10 @@ corresponding to a part of a certain file in the peer's repository.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Repository |
| Length of Folder |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Repository (variable length) \
\ Folder (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Name |
@ -534,7 +534,7 @@ corresponding to a part of a certain file in the peer's repository.
#### Fields
The Repository and Name fields are as documented for the Index message.
The Folder and Name fields are as documented for the Index message.
The Offset and Size fields specify the region of the file to be
transferred. This SHOULD equate to exactly one block as seen in an Index
message.
@ -542,7 +542,7 @@ message.
#### XDR
struct RequestMessage {
string Repository<>;
string Folder<>;
string Name<>;
unsigned hyper Offset;
unsigned int Size;
@ -624,20 +624,20 @@ directions.
+------------+ Updates /---------\
| | -----------> / \
| Node | | Cluster |
| Device | | Cluster |
| | <----------- \ /
+------------+ Updates \---------/
### Read Only
In read only mode, a node does not synchronize the local repository to
the cluster, but publishes changes to its local repository contents as
usual. The local repository can be seen as a "master copy" that is never
affected by the actions of other cluster nodes.
In read only mode, a device does not synchronize the local folder to
the cluster, but publishes changes to its local folder contents as
usual. The local folder can be seen as a "master copy" that is never
affected by the actions of other cluster devices.
+------------+ Updates /---------\
| | -----------> / \
| Node | | Cluster |
| Device | | Cluster |
| | \ /
+------------+ \---------/
@ -651,7 +651,7 @@ restrictive than the following:
### Index and Index Update Messages
- Repository: 64 bytes
- Folder: 64 bytes
- Number of Files: 10.000.000
- Name: 1024 bytes
- Number of Blocks: 1.000.000
@ -659,7 +659,7 @@ restrictive than the following:
### Request Messages
- Repository: 64 bytes
- Folder: 64 bytes
- Name: 1024 bytes
### Response Messages
@ -695,8 +695,8 @@ The Index records are received and both peers recompute their knowledge
of the data in the cluster. In this example, peer A has four missing or
outdated blocks. At 2 through 5 peer A sends requests for these blocks.
The requests are received by peer B, who retrieves the data from the
repository and transmits Response records (6 through 9). Node A updates
their repository contents and transmits an Index Update message (10).
folder and transmits Response records (6 through 9). Device A updates
their folder contents and transmits an Index Update message (10).
Both peers enter idle state after 10. At some later time 11, peer A
determines that it has not seen data from B for some time and sends a
Ping request. A response is sent at 12.

View File

@ -1,15 +1,15 @@
<configuration version="2">
<repository id="default" directory="s1" ro="true" ignorePerms="false">
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
<folder id="default" directory="s1" ro="true" ignorePerms="false">
<device id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></device>
<device id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></device>
<versioning></versioning>
</repository>
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="f1">
</folder>
<device id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="f1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="f2">
</device>
<device id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="f2">
<address>127.0.0.1:22002</address>
</node>
</device>
<gui enabled="true" tls="false">
<address>127.0.0.1:8081</address>
<apikey>abc123</apikey>

View File

@ -1,17 +1,17 @@
<configuration version="2">
<repository id="default" directory="s2" ro="false" ignorePerms="false">
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
<folder id="default" directory="s2" ro="false" ignorePerms="false">
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<versioning type="simple">
<param key="keep" val="5"></param>
</versioning>
</repository>
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="f1">
</folder>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="f1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="f2">
</device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="f2">
<address>127.0.0.1:22002</address>
</node>
</device>
<gui enabled="true" tls="false">
<address>127.0.0.1:8082</address>
<apikey>abc123</apikey>

View File

@ -1,27 +1,27 @@
<configuration version="4">
<repository id="default" directory="s1" ro="false" rescanIntervalS="10" ignorePerms="false">
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></node>
<folder id="default" directory="s1" ro="false" rescanIntervalS="10" ignorePerms="false">
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
<versioning></versioning>
</repository>
<repository id="s12" directory="s12-1" ro="false" rescanIntervalS="10" ignorePerms="false">
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
</folder>
<folder id="s12" directory="s12-1" ro="false" rescanIntervalS="10" ignorePerms="false">
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<versioning></versioning>
</repository>
<node id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK" name="s4" compression="true">
</folder>
<device id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK" name="s4" compression="true">
<address>127.0.0.1:22004</address>
</node>
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
</device>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
</device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
<address>127.0.0.1:22002</address>
</node>
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
</device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
<address>127.0.0.1:22003</address>
</node>
</device>
<gui enabled="true" tls="false">
<address>127.0.0.1:8081</address>
<user>testuser</user>

View File

@ -1,29 +1,29 @@
<configuration version="4">
<repository id="default" directory="s2" ro="false" rescanIntervalS="15" ignorePerms="false">
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></node>
<folder id="default" directory="s2" ro="false" rescanIntervalS="15" ignorePerms="false">
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
<versioning></versioning>
</repository>
<repository id="s12" directory="s12-2" ro="false" rescanIntervalS="15" ignorePerms="false">
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
</folder>
<folder id="s12" directory="s12-2" ro="false" rescanIntervalS="15" ignorePerms="false">
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<versioning></versioning>
</repository>
<repository id="s23" directory="s23-2" ro="false" rescanIntervalS="15" ignorePerms="false">
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></node>
</folder>
<folder id="s23" directory="s23-2" ro="false" rescanIntervalS="15" ignorePerms="false">
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
<versioning></versioning>
</repository>
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
</folder>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
</device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
<address>127.0.0.1:22002</address>
</node>
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
</device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
<address>127.0.0.1:22003</address>
</node>
</device>
<gui enabled="true" tls="false">
<address>127.0.0.1:8082</address>
<apikey>abc123</apikey>

View File

@ -1,26 +1,26 @@
<configuration version="4">
<repository id="s23" directory="s23-3" ro="false" rescanIntervalS="20" ignorePerms="false">
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></node>
<folder id="s23" directory="s23-3" ro="false" rescanIntervalS="20" ignorePerms="false">
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
<versioning></versioning>
</repository>
<repository id="default" directory="s3" ro="false" rescanIntervalS="20" ignorePerms="false">
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></node>
</folder>
<folder id="default" directory="s3" ro="false" rescanIntervalS="20" ignorePerms="false">
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
<versioning type="simple">
<param key="keep" val="5"></param>
</versioning>
</repository>
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
</folder>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
</device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
<address>127.0.0.1:22002</address>
</node>
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
</device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
<address>127.0.0.1:22003</address>
</node>
</device>
<gui enabled="true" tls="false">
<address>127.0.0.1:8083</address>
<apikey>abc123</apikey>

View File

@ -1,28 +1,28 @@
<configuration version="4">
<repository id="unique" directory="s4" ro="false" rescanIntervalS="60" ignorePerms="false">
<node id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK"></node>
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></node>
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></node>
<folder id="unique" directory="s4" ro="false" rescanIntervalS="60" ignorePerms="false">
<device id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK"></device>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
<versioning></versioning>
</repository>
<repository id="default" directory="s4d" ro="false" rescanIntervalS="60" ignorePerms="false">
<node id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK"></node>
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></node>
</folder>
<folder id="default" directory="s4d" ro="false" rescanIntervalS="60" ignorePerms="false">
<device id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK"></device>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
<versioning></versioning>
</repository>
<node id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
</folder>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
</device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true">
<address>127.0.0.1:22002</address>
</node>
<node id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
</device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true">
<address>127.0.0.1:22003</address>
</node>
<node id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK" name="s4" compression="true">
</device>
<device id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK" name="s4" compression="true">
<address>dynamic</address>
</node>
</device>
<gui enabled="true" tls="false">
<address>127.0.0.1:8084</address>
<apikey>abc123</apikey>

View File

@ -29,16 +29,16 @@ var (
)
var jsonEndpoints = []string{
"/rest/completion?node=I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU&repo=default",
"/rest/completion?device=I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU&folder=default",
"/rest/config",
"/rest/config/sync",
"/rest/connections",
"/rest/errors",
"/rest/events",
"/rest/lang",
"/rest/model?repo=default",
"/rest/model?folder=default",
"/rest/need",
"/rest/nodeid?id=I6KAH7666SLLLB5PFXSOAUFJCDZCYAOMLEKCP2GB32BV5RQST3PSROAU",
"/rest/deviceid?id=I6KAH7666SLLLB5PFXSOAUFJCDZCYAOMLEKCP2GB32BV5RQST3PSROAU",
"/rest/report",
"/rest/system",
}

View File

@ -55,7 +55,7 @@ testConvergence() {
tot=$(($s1comp + $s2comp))
echo $tot / 200
if [[ $tot == 200 ]] ; then
# when fixing up directories, a node will announce completion
# when fixing up directories, a device will announce completion
# slightly before it's actually complete. this is arguably a bug,
# but we let it slide for the moment as long as it gets there
# eventually.
@ -71,7 +71,7 @@ testConvergence() {
popd >/dev/null
if ! cmp dirs-1 dirs-2 ; then
echo Repos differ
echo Folders differ
stop
exit 1
fi

View File

@ -74,7 +74,7 @@ loop:
for _, ev := range evs {
if ev.Type == "StateChanged" {
data := ev.Data.(map[string]interface{})
if data["repo"].(string) != "default" {
if data["folder"].(string) != "default" {
continue
}
log.Println(ev)