mirror of
https://github.com/octoleo/syncthing.git
synced 2024-11-09 23:00:58 +00:00
Implement IgnorePerms
This commit is contained in:
parent
303ce02271
commit
8356b58b1d
File diff suppressed because one or more lines are too long
@ -252,8 +252,8 @@ func main() {
|
|||||||
if repo.Invalid != "" {
|
if repo.Invalid != "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
dir := expandTilde(repo.Directory)
|
repo.Directory = expandTilde(repo.Directory)
|
||||||
m.AddRepo(repo.ID, dir, repo.Nodes)
|
m.AddRepo(repo)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GUI
|
// GUI
|
||||||
|
@ -27,12 +27,13 @@ type Configuration struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type RepositoryConfiguration struct {
|
type RepositoryConfiguration struct {
|
||||||
ID string `xml:"id,attr"`
|
ID string `xml:"id,attr"`
|
||||||
Directory string `xml:"directory,attr"`
|
Directory string `xml:"directory,attr"`
|
||||||
Nodes []NodeConfiguration `xml:"node"`
|
Nodes []NodeConfiguration `xml:"node"`
|
||||||
ReadOnly bool `xml:"ro,attr"`
|
ReadOnly bool `xml:"ro,attr"`
|
||||||
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
|
IgnorePerms bool `xml:"ignorePerms,attr"`
|
||||||
nodeIDs []string
|
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
|
||||||
|
nodeIDs []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RepositoryConfiguration) NodeIDs() []string {
|
func (r *RepositoryConfiguration) NodeIDs() []string {
|
||||||
|
@ -166,6 +166,13 @@
|
|||||||
<span ng-if="!repo.ReadOnly">No</span>
|
<span ng-if="!repo.ReadOnly">No</span>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<th><span class="glyphicon glyphicon-unchecked"></span> Ignore Permissions</th>
|
||||||
|
<td class="text-right">
|
||||||
|
<span ng-if="repo.IgnorePerms">Yes</span>
|
||||||
|
<span ng-if="!repo.IgnorePerms">No</span>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th><span class="glyphicon glyphicon-share-alt"></span> Shared With</th>
|
<th><span class="glyphicon glyphicon-share-alt"></span> Shared With</th>
|
||||||
<td class="text-right">
|
<td class="text-right">
|
||||||
@ -477,6 +484,14 @@
|
|||||||
</div>
|
</div>
|
||||||
<p class="help-block">Files are protected from changes made on other nodes, but changes made on <em>this</em> node will be sent to the rest of the cluster.</p>
|
<p class="help-block">Files are protected from changes made on other nodes, but changes made on <em>this</em> node will be sent to the rest of the cluster.</p>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<div class="checkbox">
|
||||||
|
<label>
|
||||||
|
<input type="checkbox" ng-model="currentRepo.IgnorePerms"> Ignore Permissions
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<p class="help-block">File permission bits are ignored when looking for changes. Use on FAT filesystems.</p>
|
||||||
|
</div>
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label for="nodes">Nodes</label>
|
<label for="nodes">Nodes</label>
|
||||||
<div class="checkbox" ng-repeat="node in otherNodes()">
|
<div class="checkbox" ng-repeat="node in otherNodes()">
|
||||||
|
@ -43,12 +43,12 @@ type Model struct {
|
|||||||
clientName string
|
clientName string
|
||||||
clientVersion string
|
clientVersion string
|
||||||
|
|
||||||
repoDirs map[string]string // repo -> dir
|
repoCfgs map[string]config.RepositoryConfiguration // repo -> cfg
|
||||||
repoFiles map[string]*files.Set // repo -> files
|
repoFiles map[string]*files.Set // repo -> files
|
||||||
repoNodes map[string][]string // repo -> nodeIDs
|
repoNodes map[string][]string // repo -> nodeIDs
|
||||||
nodeRepos map[string][]string // nodeID -> repos
|
nodeRepos map[string][]string // nodeID -> repos
|
||||||
suppressor map[string]*suppressor // repo -> suppressor
|
suppressor map[string]*suppressor // repo -> suppressor
|
||||||
rmut sync.RWMutex // protects the above
|
rmut sync.RWMutex // protects the above
|
||||||
|
|
||||||
repoState map[string]repoState // repo -> state
|
repoState map[string]repoState // repo -> state
|
||||||
smut sync.RWMutex
|
smut sync.RWMutex
|
||||||
@ -80,7 +80,7 @@ func NewModel(indexDir string, cfg *config.Configuration, clientName, clientVers
|
|||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
clientName: clientName,
|
clientName: clientName,
|
||||||
clientVersion: clientVersion,
|
clientVersion: clientVersion,
|
||||||
repoDirs: make(map[string]string),
|
repoCfgs: make(map[string]config.RepositoryConfiguration),
|
||||||
repoFiles: make(map[string]*files.Set),
|
repoFiles: make(map[string]*files.Set),
|
||||||
repoNodes: make(map[string][]string),
|
repoNodes: make(map[string][]string),
|
||||||
nodeRepos: make(map[string][]string),
|
nodeRepos: make(map[string][]string),
|
||||||
@ -104,10 +104,10 @@ func (m *Model) StartRepoRW(repo string, threads int) {
|
|||||||
m.rmut.RLock()
|
m.rmut.RLock()
|
||||||
defer m.rmut.RUnlock()
|
defer m.rmut.RUnlock()
|
||||||
|
|
||||||
if dir, ok := m.repoDirs[repo]; !ok {
|
if cfg, ok := m.repoCfgs[repo]; !ok {
|
||||||
panic("cannot start without repo")
|
panic("cannot start without repo")
|
||||||
} else {
|
} else {
|
||||||
newPuller(repo, dir, m, threads, m.cfg)
|
newPuller(cfg, m, threads, m.cfg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -386,7 +386,7 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
|
|||||||
l.Debugf("REQ(in): %s: %q / %q o=%d s=%d", nodeID, repo, name, offset, size)
|
l.Debugf("REQ(in): %s: %q / %q o=%d s=%d", nodeID, repo, name, offset, size)
|
||||||
}
|
}
|
||||||
m.rmut.RLock()
|
m.rmut.RLock()
|
||||||
fn := filepath.Join(m.repoDirs[repo], name)
|
fn := filepath.Join(m.repoCfgs[repo].Directory, name)
|
||||||
m.rmut.RUnlock()
|
m.rmut.RUnlock()
|
||||||
fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
|
fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -582,23 +582,23 @@ func (m *Model) broadcastIndexLoop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Model) AddRepo(id, dir string, nodes []config.NodeConfiguration) {
|
func (m *Model) AddRepo(cfg config.RepositoryConfiguration) {
|
||||||
if m.started {
|
if m.started {
|
||||||
panic("cannot add repo to started model")
|
panic("cannot add repo to started model")
|
||||||
}
|
}
|
||||||
if len(id) == 0 {
|
if len(cfg.ID) == 0 {
|
||||||
panic("cannot add empty repo id")
|
panic("cannot add empty repo id")
|
||||||
}
|
}
|
||||||
|
|
||||||
m.rmut.Lock()
|
m.rmut.Lock()
|
||||||
m.repoDirs[id] = dir
|
m.repoCfgs[cfg.ID] = cfg
|
||||||
m.repoFiles[id] = files.NewSet()
|
m.repoFiles[cfg.ID] = files.NewSet()
|
||||||
m.suppressor[id] = &suppressor{threshold: int64(m.cfg.Options.MaxChangeKbps)}
|
m.suppressor[cfg.ID] = &suppressor{threshold: int64(m.cfg.Options.MaxChangeKbps)}
|
||||||
|
|
||||||
m.repoNodes[id] = make([]string, len(nodes))
|
m.repoNodes[cfg.ID] = make([]string, len(cfg.Nodes))
|
||||||
for i, node := range nodes {
|
for i, node := range cfg.Nodes {
|
||||||
m.repoNodes[id][i] = node.NodeID
|
m.repoNodes[cfg.ID][i] = node.NodeID
|
||||||
m.nodeRepos[node.NodeID] = append(m.nodeRepos[node.NodeID], id)
|
m.nodeRepos[node.NodeID] = append(m.nodeRepos[node.NodeID], cfg.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.addedRepo = true
|
m.addedRepo = true
|
||||||
@ -607,8 +607,8 @@ func (m *Model) AddRepo(id, dir string, nodes []config.NodeConfiguration) {
|
|||||||
|
|
||||||
func (m *Model) ScanRepos() {
|
func (m *Model) ScanRepos() {
|
||||||
m.rmut.RLock()
|
m.rmut.RLock()
|
||||||
var repos = make([]string, 0, len(m.repoDirs))
|
var repos = make([]string, 0, len(m.repoCfgs))
|
||||||
for repo := range m.repoDirs {
|
for repo := range m.repoCfgs {
|
||||||
repos = append(repos, repo)
|
repos = append(repos, repo)
|
||||||
}
|
}
|
||||||
m.rmut.RUnlock()
|
m.rmut.RUnlock()
|
||||||
@ -627,9 +627,9 @@ func (m *Model) ScanRepos() {
|
|||||||
|
|
||||||
func (m *Model) CleanRepos() {
|
func (m *Model) CleanRepos() {
|
||||||
m.rmut.RLock()
|
m.rmut.RLock()
|
||||||
var dirs = make([]string, 0, len(m.repoDirs))
|
var dirs = make([]string, 0, len(m.repoCfgs))
|
||||||
for _, dir := range m.repoDirs {
|
for _, cfg := range m.repoCfgs {
|
||||||
dirs = append(dirs, dir)
|
dirs = append(dirs, cfg.Directory)
|
||||||
}
|
}
|
||||||
m.rmut.RUnlock()
|
m.rmut.RUnlock()
|
||||||
|
|
||||||
@ -651,12 +651,13 @@ func (m *Model) CleanRepos() {
|
|||||||
func (m *Model) ScanRepo(repo string) error {
|
func (m *Model) ScanRepo(repo string) error {
|
||||||
m.rmut.RLock()
|
m.rmut.RLock()
|
||||||
w := &scanner.Walker{
|
w := &scanner.Walker{
|
||||||
Dir: m.repoDirs[repo],
|
Dir: m.repoCfgs[repo].Directory,
|
||||||
IgnoreFile: ".stignore",
|
IgnoreFile: ".stignore",
|
||||||
BlockSize: scanner.StandardBlockSize,
|
BlockSize: scanner.StandardBlockSize,
|
||||||
TempNamer: defTempNamer,
|
TempNamer: defTempNamer,
|
||||||
Suppressor: m.suppressor[repo],
|
Suppressor: m.suppressor[repo],
|
||||||
CurrentFiler: cFiler{m, repo},
|
CurrentFiler: cFiler{m, repo},
|
||||||
|
IgnorePerms: m.repoCfgs[repo].IgnorePerms,
|
||||||
}
|
}
|
||||||
m.rmut.RUnlock()
|
m.rmut.RUnlock()
|
||||||
m.setState(repo, RepoScanning)
|
m.setState(repo, RepoScanning)
|
||||||
@ -671,7 +672,7 @@ func (m *Model) ScanRepo(repo string) error {
|
|||||||
|
|
||||||
func (m *Model) SaveIndexes(dir string) {
|
func (m *Model) SaveIndexes(dir string) {
|
||||||
m.rmut.RLock()
|
m.rmut.RLock()
|
||||||
for repo := range m.repoDirs {
|
for repo := range m.repoCfgs {
|
||||||
fs := m.protocolIndex(repo)
|
fs := m.protocolIndex(repo)
|
||||||
m.saveIndex(repo, dir, fs)
|
m.saveIndex(repo, dir, fs)
|
||||||
}
|
}
|
||||||
@ -680,7 +681,7 @@ func (m *Model) SaveIndexes(dir string) {
|
|||||||
|
|
||||||
func (m *Model) LoadIndexes(dir string) {
|
func (m *Model) LoadIndexes(dir string) {
|
||||||
m.rmut.RLock()
|
m.rmut.RLock()
|
||||||
for repo := range m.repoDirs {
|
for repo := range m.repoCfgs {
|
||||||
fs := m.loadIndex(repo, dir)
|
fs := m.loadIndex(repo, dir)
|
||||||
m.SeedLocal(repo, fs)
|
m.SeedLocal(repo, fs)
|
||||||
}
|
}
|
||||||
@ -688,7 +689,7 @@ func (m *Model) LoadIndexes(dir string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Model) saveIndex(repo string, dir string, fs []protocol.FileInfo) {
|
func (m *Model) saveIndex(repo string, dir string, fs []protocol.FileInfo) {
|
||||||
id := fmt.Sprintf("%x", sha1.Sum([]byte(m.repoDirs[repo])))
|
id := fmt.Sprintf("%x", sha1.Sum([]byte(m.repoCfgs[repo].Directory)))
|
||||||
name := id + ".idx.gz"
|
name := id + ".idx.gz"
|
||||||
name = filepath.Join(dir, name)
|
name = filepath.Join(dir, name)
|
||||||
|
|
||||||
@ -710,7 +711,7 @@ func (m *Model) saveIndex(repo string, dir string, fs []protocol.FileInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Model) loadIndex(repo string, dir string) []protocol.FileInfo {
|
func (m *Model) loadIndex(repo string, dir string) []protocol.FileInfo {
|
||||||
id := fmt.Sprintf("%x", sha1.Sum([]byte(m.repoDirs[repo])))
|
id := fmt.Sprintf("%x", sha1.Sum([]byte(m.repoCfgs[repo].Directory)))
|
||||||
name := id + ".idx.gz"
|
name := id + ".idx.gz"
|
||||||
name = filepath.Join(dir, name)
|
name = filepath.Join(dir, name)
|
||||||
|
|
||||||
|
112
model/puller.go
112
model/puller.go
@ -63,8 +63,7 @@ var errNoNode = errors.New("no available source node")
|
|||||||
|
|
||||||
type puller struct {
|
type puller struct {
|
||||||
cfg *config.Configuration
|
cfg *config.Configuration
|
||||||
repo string
|
repoCfg config.RepositoryConfiguration
|
||||||
dir string
|
|
||||||
bq *blockQueue
|
bq *blockQueue
|
||||||
model *Model
|
model *Model
|
||||||
oustandingPerNode activityMap
|
oustandingPerNode activityMap
|
||||||
@ -74,11 +73,10 @@ type puller struct {
|
|||||||
requestResults chan requestResult
|
requestResults chan requestResult
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPuller(repo, dir string, model *Model, slots int, cfg *config.Configuration) *puller {
|
func newPuller(repoCfg config.RepositoryConfiguration, model *Model, slots int, cfg *config.Configuration) *puller {
|
||||||
p := &puller{
|
p := &puller{
|
||||||
|
repoCfg: repoCfg,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
repo: repo,
|
|
||||||
dir: dir,
|
|
||||||
bq: newBlockQueue(),
|
bq: newBlockQueue(),
|
||||||
model: model,
|
model: model,
|
||||||
oustandingPerNode: make(activityMap),
|
oustandingPerNode: make(activityMap),
|
||||||
@ -94,13 +92,13 @@ func newPuller(repo, dir string, model *Model, slots int, cfg *config.Configurat
|
|||||||
p.requestSlots <- true
|
p.requestSlots <- true
|
||||||
}
|
}
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("starting puller; repo %q dir %q slots %d", repo, dir, slots)
|
l.Debugf("starting puller; repo %q dir %q slots %d", repoCfg.ID, repoCfg.Directory, slots)
|
||||||
}
|
}
|
||||||
go p.run()
|
go p.run()
|
||||||
} else {
|
} else {
|
||||||
// Read only
|
// Read only
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("starting puller; repo %q dir %q (read only)", repo, dir)
|
l.Debugf("starting puller; repo %q dir %q (read only)", repoCfg.ID, repoCfg.Directory)
|
||||||
}
|
}
|
||||||
go p.runRO()
|
go p.runRO()
|
||||||
}
|
}
|
||||||
@ -114,7 +112,7 @@ func (p *puller) run() {
|
|||||||
<-p.requestSlots
|
<-p.requestSlots
|
||||||
b := p.bq.get()
|
b := p.bq.get()
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("filler: queueing %q / %q offset %d copy %d", p.repo, b.file.Name, b.block.Offset, len(b.copy))
|
l.Debugf("filler: queueing %q / %q offset %d copy %d", p.repoCfg.ID, b.file.Name, b.block.Offset, len(b.copy))
|
||||||
}
|
}
|
||||||
p.blocks <- b
|
p.blocks <- b
|
||||||
}
|
}
|
||||||
@ -130,13 +128,13 @@ func (p *puller) run() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case res := <-p.requestResults:
|
case res := <-p.requestResults:
|
||||||
p.model.setState(p.repo, RepoSyncing)
|
p.model.setState(p.repoCfg.ID, RepoSyncing)
|
||||||
changed = true
|
changed = true
|
||||||
p.requestSlots <- true
|
p.requestSlots <- true
|
||||||
p.handleRequestResult(res)
|
p.handleRequestResult(res)
|
||||||
|
|
||||||
case b := <-p.blocks:
|
case b := <-p.blocks:
|
||||||
p.model.setState(p.repo, RepoSyncing)
|
p.model.setState(p.repoCfg.ID, RepoSyncing)
|
||||||
changed = true
|
changed = true
|
||||||
if p.handleBlock(b) {
|
if p.handleBlock(b) {
|
||||||
// Block was fully handled, free up the slot
|
// Block was fully handled, free up the slot
|
||||||
@ -149,7 +147,7 @@ func (p *puller) run() {
|
|||||||
break pull
|
break pull
|
||||||
}
|
}
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("%q: idle but have %d open files", p.repo, len(p.openFiles))
|
l.Debugf("%q: idle but have %d open files", p.repoCfg.ID, len(p.openFiles))
|
||||||
i := 5
|
i := 5
|
||||||
for _, f := range p.openFiles {
|
for _, f := range p.openFiles {
|
||||||
l.Debugf(" %v", f)
|
l.Debugf(" %v", f)
|
||||||
@ -163,22 +161,22 @@ func (p *puller) run() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if changed {
|
if changed {
|
||||||
p.model.setState(p.repo, RepoCleaning)
|
p.model.setState(p.repoCfg.ID, RepoCleaning)
|
||||||
p.fixupDirectories()
|
p.fixupDirectories()
|
||||||
changed = false
|
changed = false
|
||||||
}
|
}
|
||||||
|
|
||||||
p.model.setState(p.repo, RepoIdle)
|
p.model.setState(p.repoCfg.ID, RepoIdle)
|
||||||
|
|
||||||
// Do a rescan if it's time for it
|
// Do a rescan if it's time for it
|
||||||
select {
|
select {
|
||||||
case <-walkTicker:
|
case <-walkTicker:
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("%q: time for rescan", p.repo)
|
l.Debugf("%q: time for rescan", p.repoCfg.ID)
|
||||||
}
|
}
|
||||||
err := p.model.ScanRepo(p.repo)
|
err := p.model.ScanRepo(p.repoCfg.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
invalidateRepo(p.cfg, p.repo, err)
|
invalidateRepo(p.cfg, p.repoCfg.ID, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,11 +193,11 @@ func (p *puller) runRO() {
|
|||||||
|
|
||||||
for _ = range walkTicker {
|
for _ = range walkTicker {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("%q: time for rescan", p.repo)
|
l.Debugf("%q: time for rescan", p.repoCfg.ID)
|
||||||
}
|
}
|
||||||
err := p.model.ScanRepo(p.repo)
|
err := p.model.ScanRepo(p.repoCfg.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
invalidateRepo(p.cfg, p.repo, err)
|
invalidateRepo(p.cfg, p.repoCfg.ID, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -214,7 +212,7 @@ func (p *puller) fixupDirectories() {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
rn, err := filepath.Rel(p.dir, path)
|
rn, err := filepath.Rel(p.repoCfg.Directory, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -223,7 +221,7 @@ func (p *puller) fixupDirectories() {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cur := p.model.CurrentRepoFile(p.repo, rn)
|
cur := p.model.CurrentRepoFile(p.repoCfg.ID, rn)
|
||||||
if cur.Name != rn {
|
if cur.Name != rn {
|
||||||
// No matching dir in current list; weird
|
// No matching dir in current list; weird
|
||||||
if debug {
|
if debug {
|
||||||
@ -276,7 +274,7 @@ func (p *puller) fixupDirectories() {
|
|||||||
for {
|
for {
|
||||||
deleteDirs = nil
|
deleteDirs = nil
|
||||||
changed = 0
|
changed = 0
|
||||||
filepath.Walk(p.dir, walkFn)
|
filepath.Walk(p.repoCfg.Directory, walkFn)
|
||||||
|
|
||||||
var deleted = 0
|
var deleted = 0
|
||||||
// Delete any queued directories
|
// Delete any queued directories
|
||||||
@ -320,7 +318,7 @@ func (p *puller) handleRequestResult(res requestResult) {
|
|||||||
p.openFiles[f.Name] = of
|
p.openFiles[f.Name] = of
|
||||||
|
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("pull: wrote %q / %q offset %d outstanding %d done %v", p.repo, f.Name, res.offset, of.outstanding, of.done)
|
l.Debugf("pull: wrote %q / %q offset %d outstanding %d done %v", p.repoCfg.ID, f.Name, res.offset, of.outstanding, of.done)
|
||||||
}
|
}
|
||||||
|
|
||||||
if of.done && of.outstanding == 0 {
|
if of.done && of.outstanding == 0 {
|
||||||
@ -338,7 +336,7 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
|||||||
// Deleted directories we mark as handled and delete later.
|
// Deleted directories we mark as handled and delete later.
|
||||||
if protocol.IsDirectory(f.Flags) {
|
if protocol.IsDirectory(f.Flags) {
|
||||||
if !protocol.IsDeleted(f.Flags) {
|
if !protocol.IsDeleted(f.Flags) {
|
||||||
path := filepath.Join(p.dir, f.Name)
|
path := filepath.Join(p.repoCfg.Directory, f.Name)
|
||||||
_, err := os.Stat(path)
|
_, err := os.Stat(path)
|
||||||
if err != nil && os.IsNotExist(err) {
|
if err != nil && os.IsNotExist(err) {
|
||||||
if debug {
|
if debug {
|
||||||
@ -352,7 +350,7 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
|||||||
} else if debug {
|
} else if debug {
|
||||||
l.Debugf("ignore delete dir: %v", f)
|
l.Debugf("ignore delete dir: %v", f)
|
||||||
}
|
}
|
||||||
p.model.updateLocal(p.repo, f)
|
p.model.updateLocal(p.repoCfg.ID, f)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -361,12 +359,12 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
|||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("pull: %q: opening file %q", p.repo, f.Name)
|
l.Debugf("pull: %q: opening file %q", p.repoCfg.ID, f.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
of.availability = uint64(p.model.repoFiles[p.repo].Availability(f.Name))
|
of.availability = uint64(p.model.repoFiles[p.repoCfg.ID].Availability(f.Name))
|
||||||
of.filepath = filepath.Join(p.dir, f.Name)
|
of.filepath = filepath.Join(p.repoCfg.Directory, f.Name)
|
||||||
of.temp = filepath.Join(p.dir, defTempNamer.TempName(f.Name))
|
of.temp = filepath.Join(p.repoCfg.Directory, defTempNamer.TempName(f.Name))
|
||||||
|
|
||||||
dirName := filepath.Dir(of.filepath)
|
dirName := filepath.Dir(of.filepath)
|
||||||
_, err := os.Stat(dirName)
|
_, err := os.Stat(dirName)
|
||||||
@ -374,13 +372,13 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
|||||||
err = os.MkdirAll(dirName, 0777)
|
err = os.MkdirAll(dirName, 0777)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, err)
|
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
of.file, of.err = os.Create(of.temp)
|
of.file, of.err = os.Create(of.temp)
|
||||||
if of.err != nil {
|
if of.err != nil {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, of.err)
|
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
||||||
}
|
}
|
||||||
if !b.last {
|
if !b.last {
|
||||||
p.openFiles[f.Name] = of
|
p.openFiles[f.Name] = of
|
||||||
@ -393,7 +391,7 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
|||||||
if of.err != nil {
|
if of.err != nil {
|
||||||
// We have already failed this file.
|
// We have already failed this file.
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("pull: error: %q / %q has already failed: %v", p.repo, f.Name, of.err)
|
l.Debugf("pull: error: %q / %q has already failed: %v", p.repoCfg.ID, f.Name, of.err)
|
||||||
}
|
}
|
||||||
if b.last {
|
if b.last {
|
||||||
delete(p.openFiles, f.Name)
|
delete(p.openFiles, f.Name)
|
||||||
@ -424,14 +422,14 @@ func (p *puller) handleCopyBlock(b bqBlock) {
|
|||||||
of := p.openFiles[f.Name]
|
of := p.openFiles[f.Name]
|
||||||
|
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("pull: copying %d blocks for %q / %q", len(b.copy), p.repo, f.Name)
|
l.Debugf("pull: copying %d blocks for %q / %q", len(b.copy), p.repoCfg.ID, f.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
var exfd *os.File
|
var exfd *os.File
|
||||||
exfd, of.err = os.Open(of.filepath)
|
exfd, of.err = os.Open(of.filepath)
|
||||||
if of.err != nil {
|
if of.err != nil {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, of.err)
|
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
||||||
}
|
}
|
||||||
of.file.Close()
|
of.file.Close()
|
||||||
of.file = nil
|
of.file = nil
|
||||||
@ -450,7 +448,7 @@ func (p *puller) handleCopyBlock(b bqBlock) {
|
|||||||
buffers.Put(bs)
|
buffers.Put(bs)
|
||||||
if of.err != nil {
|
if of.err != nil {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, of.err)
|
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
||||||
}
|
}
|
||||||
exfd.Close()
|
exfd.Close()
|
||||||
of.file.Close()
|
of.file.Close()
|
||||||
@ -493,10 +491,10 @@ func (p *puller) handleRequestBlock(b bqBlock) bool {
|
|||||||
|
|
||||||
go func(node string, b bqBlock) {
|
go func(node string, b bqBlock) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("pull: requesting %q / %q offset %d size %d from %q outstanding %d", p.repo, f.Name, b.block.Offset, b.block.Size, node, of.outstanding)
|
l.Debugf("pull: requesting %q / %q offset %d size %d from %q outstanding %d", p.repoCfg.ID, f.Name, b.block.Offset, b.block.Size, node, of.outstanding)
|
||||||
}
|
}
|
||||||
|
|
||||||
bs, err := p.model.requestGlobal(node, p.repo, f.Name, b.block.Offset, int(b.block.Size), nil)
|
bs, err := p.model.requestGlobal(node, p.repoCfg.ID, f.Name, b.block.Offset, int(b.block.Size), nil)
|
||||||
p.requestResults <- requestResult{
|
p.requestResults <- requestResult{
|
||||||
node: node,
|
node: node,
|
||||||
file: f,
|
file: f,
|
||||||
@ -527,24 +525,24 @@ func (p *puller) handleEmptyBlock(b bqBlock) {
|
|||||||
os.Remove(of.temp)
|
os.Remove(of.temp)
|
||||||
os.Chmod(of.filepath, 0666)
|
os.Chmod(of.filepath, 0666)
|
||||||
if err := os.Remove(of.filepath); err == nil || os.IsNotExist(err) {
|
if err := os.Remove(of.filepath); err == nil || os.IsNotExist(err) {
|
||||||
p.model.updateLocal(p.repo, f)
|
p.model.updateLocal(p.repoCfg.ID, f)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("pull: no blocks to fetch and nothing to copy for %q / %q", p.repo, f.Name)
|
l.Debugf("pull: no blocks to fetch and nothing to copy for %q / %q", p.repoCfg.ID, f.Name)
|
||||||
}
|
}
|
||||||
t := time.Unix(f.Modified, 0)
|
t := time.Unix(f.Modified, 0)
|
||||||
if os.Chtimes(of.temp, t, t) != nil {
|
if os.Chtimes(of.temp, t, t) != nil {
|
||||||
delete(p.openFiles, f.Name)
|
delete(p.openFiles, f.Name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if protocol.HasPermissionBits(f.Flags) && os.Chmod(of.temp, os.FileMode(f.Flags&0777)) != nil {
|
if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(f.Flags) && os.Chmod(of.temp, os.FileMode(f.Flags&0777)) != nil {
|
||||||
delete(p.openFiles, f.Name)
|
delete(p.openFiles, f.Name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defTempNamer.Show(of.temp)
|
defTempNamer.Show(of.temp)
|
||||||
if Rename(of.temp, of.filepath) == nil {
|
if Rename(of.temp, of.filepath) == nil {
|
||||||
p.model.updateLocal(p.repo, f)
|
p.model.updateLocal(p.repoCfg.ID, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete(p.openFiles, f.Name)
|
delete(p.openFiles, f.Name)
|
||||||
@ -552,8 +550,8 @@ func (p *puller) handleEmptyBlock(b bqBlock) {
|
|||||||
|
|
||||||
func (p *puller) queueNeededBlocks() {
|
func (p *puller) queueNeededBlocks() {
|
||||||
queued := 0
|
queued := 0
|
||||||
for _, f := range p.model.NeedFilesRepo(p.repo) {
|
for _, f := range p.model.NeedFilesRepo(p.repoCfg.ID) {
|
||||||
lf := p.model.CurrentRepoFile(p.repo, f.Name)
|
lf := p.model.CurrentRepoFile(p.repoCfg.ID, f.Name)
|
||||||
have, need := scanner.BlockDiff(lf.Blocks, f.Blocks)
|
have, need := scanner.BlockDiff(lf.Blocks, f.Blocks)
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("need:\n local: %v\n global: %v\n haveBlocks: %v\n needBlocks: %v", lf, f, have, need)
|
l.Debugf("need:\n local: %v\n global: %v\n haveBlocks: %v\n needBlocks: %v", lf, f, have, need)
|
||||||
@ -566,13 +564,13 @@ func (p *puller) queueNeededBlocks() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
if debug && queued > 0 {
|
if debug && queued > 0 {
|
||||||
l.Debugf("%q: queued %d blocks", p.repo, queued)
|
l.Debugf("%q: queued %d blocks", p.repoCfg.ID, queued)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *puller) closeFile(f scanner.File) {
|
func (p *puller) closeFile(f scanner.File) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("pull: closing %q / %q", p.repo, f.Name)
|
l.Debugf("pull: closing %q / %q", p.repoCfg.ID, f.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
of := p.openFiles[f.Name]
|
of := p.openFiles[f.Name]
|
||||||
@ -584,7 +582,7 @@ func (p *puller) closeFile(f scanner.File) {
|
|||||||
fd, err := os.Open(of.temp)
|
fd, err := os.Open(of.temp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, err)
|
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -593,31 +591,37 @@ func (p *puller) closeFile(f scanner.File) {
|
|||||||
|
|
||||||
if l0, l1 := len(hb), len(f.Blocks); l0 != l1 {
|
if l0, l1 := len(hb), len(f.Blocks); l0 != l1 {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("pull: %q / %q: nblocks %d != %d", p.repo, f.Name, l0, l1)
|
l.Debugf("pull: %q / %q: nblocks %d != %d", p.repoCfg.ID, f.Name, l0, l1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range hb {
|
for i := range hb {
|
||||||
if bytes.Compare(hb[i].Hash, f.Blocks[i].Hash) != 0 {
|
if bytes.Compare(hb[i].Hash, f.Blocks[i].Hash) != 0 {
|
||||||
l.Debugf("pull: %q / %q: block %d hash mismatch", p.repo, f.Name, i)
|
l.Debugf("pull: %q / %q: block %d hash mismatch", p.repoCfg.ID, f.Name, i)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t := time.Unix(f.Modified, 0)
|
t := time.Unix(f.Modified, 0)
|
||||||
os.Chtimes(of.temp, t, t)
|
err = os.Chtimes(of.temp, t, t)
|
||||||
if protocol.HasPermissionBits(f.Flags) {
|
if debug && err != nil {
|
||||||
os.Chmod(of.temp, os.FileMode(f.Flags&0777))
|
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||||
|
}
|
||||||
|
if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(f.Flags) {
|
||||||
|
err = os.Chmod(of.temp, os.FileMode(f.Flags&0777))
|
||||||
|
if debug && err != nil {
|
||||||
|
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
defTempNamer.Show(of.temp)
|
defTempNamer.Show(of.temp)
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugf("pull: rename %q / %q: %q", p.repo, f.Name, of.filepath)
|
l.Debugf("pull: rename %q / %q: %q", p.repoCfg.ID, f.Name, of.filepath)
|
||||||
}
|
}
|
||||||
if err := Rename(of.temp, of.filepath); err == nil {
|
if err := Rename(of.temp, of.filepath); err == nil {
|
||||||
p.model.updateLocal(p.repo, f)
|
p.model.updateLocal(p.repoCfg.ID, f)
|
||||||
} else {
|
} else {
|
||||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, err)
|
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,10 +29,10 @@ type Walker struct {
|
|||||||
// Suppressed files will be returned with empty metadata and the Suppressed flag set.
|
// Suppressed files will be returned with empty metadata and the Suppressed flag set.
|
||||||
// Requires CurrentFiler to be set.
|
// Requires CurrentFiler to be set.
|
||||||
Suppressor Suppressor
|
Suppressor Suppressor
|
||||||
// If IgnorePermissions is true, changes to permission bits will not be
|
// If IgnorePerms is true, changes to permission bits will not be
|
||||||
// detected. Scanned files will get zero permission bits and the
|
// detected. Scanned files will get zero permission bits and the
|
||||||
// NoPermissionBits flag set.
|
// NoPermissionBits flag set.
|
||||||
IgnorePermissions bool
|
IgnorePerms bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type TempNamer interface {
|
type TempNamer interface {
|
||||||
@ -166,7 +166,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
|
|||||||
if info.Mode().IsDir() {
|
if info.Mode().IsDir() {
|
||||||
if w.CurrentFiler != nil {
|
if w.CurrentFiler != nil {
|
||||||
cf := w.CurrentFiler.CurrentFile(rn)
|
cf := w.CurrentFiler.CurrentFile(rn)
|
||||||
permUnchanged := w.IgnorePermissions || !protocol.HasPermissionBits(cf.Flags) || PermsEqual(cf.Flags, uint32(info.Mode()))
|
permUnchanged := w.IgnorePerms || !protocol.HasPermissionBits(cf.Flags) || PermsEqual(cf.Flags, uint32(info.Mode()))
|
||||||
if cf.Modified == info.ModTime().Unix() && protocol.IsDirectory(cf.Flags) && permUnchanged {
|
if cf.Modified == info.ModTime().Unix() && protocol.IsDirectory(cf.Flags) && permUnchanged {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugln("unchanged:", cf)
|
l.Debugln("unchanged:", cf)
|
||||||
@ -174,7 +174,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
|
|||||||
*res = append(*res, cf)
|
*res = append(*res, cf)
|
||||||
} else {
|
} else {
|
||||||
var flags uint32 = protocol.FlagDirectory
|
var flags uint32 = protocol.FlagDirectory
|
||||||
if w.IgnorePermissions {
|
if w.IgnorePerms {
|
||||||
flags |= protocol.FlagNoPermBits
|
flags |= protocol.FlagNoPermBits
|
||||||
} else {
|
} else {
|
||||||
flags |= uint32(info.Mode() & os.ModePerm)
|
flags |= uint32(info.Mode() & os.ModePerm)
|
||||||
@ -197,7 +197,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
|
|||||||
if info.Mode().IsRegular() {
|
if info.Mode().IsRegular() {
|
||||||
if w.CurrentFiler != nil {
|
if w.CurrentFiler != nil {
|
||||||
cf := w.CurrentFiler.CurrentFile(rn)
|
cf := w.CurrentFiler.CurrentFile(rn)
|
||||||
permUnchanged := w.IgnorePermissions || !protocol.HasPermissionBits(cf.Flags) || PermsEqual(cf.Flags, uint32(info.Mode()))
|
permUnchanged := w.IgnorePerms || !protocol.HasPermissionBits(cf.Flags) || PermsEqual(cf.Flags, uint32(info.Mode()))
|
||||||
if !protocol.IsDeleted(cf.Flags) && cf.Modified == info.ModTime().Unix() && permUnchanged {
|
if !protocol.IsDeleted(cf.Flags) && cf.Modified == info.ModTime().Unix() && permUnchanged {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugln("unchanged:", cf)
|
l.Debugln("unchanged:", cf)
|
||||||
@ -249,7 +249,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
|
|||||||
}
|
}
|
||||||
|
|
||||||
var flags = uint32(info.Mode() & os.ModePerm)
|
var flags = uint32(info.Mode() & os.ModePerm)
|
||||||
if w.IgnorePermissions {
|
if w.IgnorePerms {
|
||||||
flags = protocol.FlagNoPermBits
|
flags = protocol.FlagNoPermBits
|
||||||
}
|
}
|
||||||
f := File{
|
f := File{
|
||||||
|
Loading…
Reference in New Issue
Block a user