mirror of
https://github.com/octoleo/syncthing.git
synced 2024-11-09 23:00:58 +00:00
Implement IgnorePerms
This commit is contained in:
parent
303ce02271
commit
8356b58b1d
File diff suppressed because one or more lines are too long
@ -252,8 +252,8 @@ func main() {
|
||||
if repo.Invalid != "" {
|
||||
continue
|
||||
}
|
||||
dir := expandTilde(repo.Directory)
|
||||
m.AddRepo(repo.ID, dir, repo.Nodes)
|
||||
repo.Directory = expandTilde(repo.Directory)
|
||||
m.AddRepo(repo)
|
||||
}
|
||||
|
||||
// GUI
|
||||
|
@ -31,6 +31,7 @@ type RepositoryConfiguration struct {
|
||||
Directory string `xml:"directory,attr"`
|
||||
Nodes []NodeConfiguration `xml:"node"`
|
||||
ReadOnly bool `xml:"ro,attr"`
|
||||
IgnorePerms bool `xml:"ignorePerms,attr"`
|
||||
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
|
||||
nodeIDs []string
|
||||
}
|
||||
|
@ -166,6 +166,13 @@
|
||||
<span ng-if="!repo.ReadOnly">No</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-unchecked"></span> Ignore Permissions</th>
|
||||
<td class="text-right">
|
||||
<span ng-if="repo.IgnorePerms">Yes</span>
|
||||
<span ng-if="!repo.IgnorePerms">No</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-share-alt"></span> Shared With</th>
|
||||
<td class="text-right">
|
||||
@ -477,6 +484,14 @@
|
||||
</div>
|
||||
<p class="help-block">Files are protected from changes made on other nodes, but changes made on <em>this</em> node will be sent to the rest of the cluster.</p>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<div class="checkbox">
|
||||
<label>
|
||||
<input type="checkbox" ng-model="currentRepo.IgnorePerms"> Ignore Permissions
|
||||
</label>
|
||||
</div>
|
||||
<p class="help-block">File permission bits are ignored when looking for changes. Use on FAT filesystems.</p>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label for="nodes">Nodes</label>
|
||||
<div class="checkbox" ng-repeat="node in otherNodes()">
|
||||
|
@ -43,7 +43,7 @@ type Model struct {
|
||||
clientName string
|
||||
clientVersion string
|
||||
|
||||
repoDirs map[string]string // repo -> dir
|
||||
repoCfgs map[string]config.RepositoryConfiguration // repo -> cfg
|
||||
repoFiles map[string]*files.Set // repo -> files
|
||||
repoNodes map[string][]string // repo -> nodeIDs
|
||||
nodeRepos map[string][]string // nodeID -> repos
|
||||
@ -80,7 +80,7 @@ func NewModel(indexDir string, cfg *config.Configuration, clientName, clientVers
|
||||
cfg: cfg,
|
||||
clientName: clientName,
|
||||
clientVersion: clientVersion,
|
||||
repoDirs: make(map[string]string),
|
||||
repoCfgs: make(map[string]config.RepositoryConfiguration),
|
||||
repoFiles: make(map[string]*files.Set),
|
||||
repoNodes: make(map[string][]string),
|
||||
nodeRepos: make(map[string][]string),
|
||||
@ -104,10 +104,10 @@ func (m *Model) StartRepoRW(repo string, threads int) {
|
||||
m.rmut.RLock()
|
||||
defer m.rmut.RUnlock()
|
||||
|
||||
if dir, ok := m.repoDirs[repo]; !ok {
|
||||
if cfg, ok := m.repoCfgs[repo]; !ok {
|
||||
panic("cannot start without repo")
|
||||
} else {
|
||||
newPuller(repo, dir, m, threads, m.cfg)
|
||||
newPuller(cfg, m, threads, m.cfg)
|
||||
}
|
||||
}
|
||||
|
||||
@ -386,7 +386,7 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
|
||||
l.Debugf("REQ(in): %s: %q / %q o=%d s=%d", nodeID, repo, name, offset, size)
|
||||
}
|
||||
m.rmut.RLock()
|
||||
fn := filepath.Join(m.repoDirs[repo], name)
|
||||
fn := filepath.Join(m.repoCfgs[repo].Directory, name)
|
||||
m.rmut.RUnlock()
|
||||
fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
|
||||
if err != nil {
|
||||
@ -582,23 +582,23 @@ func (m *Model) broadcastIndexLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Model) AddRepo(id, dir string, nodes []config.NodeConfiguration) {
|
||||
func (m *Model) AddRepo(cfg config.RepositoryConfiguration) {
|
||||
if m.started {
|
||||
panic("cannot add repo to started model")
|
||||
}
|
||||
if len(id) == 0 {
|
||||
if len(cfg.ID) == 0 {
|
||||
panic("cannot add empty repo id")
|
||||
}
|
||||
|
||||
m.rmut.Lock()
|
||||
m.repoDirs[id] = dir
|
||||
m.repoFiles[id] = files.NewSet()
|
||||
m.suppressor[id] = &suppressor{threshold: int64(m.cfg.Options.MaxChangeKbps)}
|
||||
m.repoCfgs[cfg.ID] = cfg
|
||||
m.repoFiles[cfg.ID] = files.NewSet()
|
||||
m.suppressor[cfg.ID] = &suppressor{threshold: int64(m.cfg.Options.MaxChangeKbps)}
|
||||
|
||||
m.repoNodes[id] = make([]string, len(nodes))
|
||||
for i, node := range nodes {
|
||||
m.repoNodes[id][i] = node.NodeID
|
||||
m.nodeRepos[node.NodeID] = append(m.nodeRepos[node.NodeID], id)
|
||||
m.repoNodes[cfg.ID] = make([]string, len(cfg.Nodes))
|
||||
for i, node := range cfg.Nodes {
|
||||
m.repoNodes[cfg.ID][i] = node.NodeID
|
||||
m.nodeRepos[node.NodeID] = append(m.nodeRepos[node.NodeID], cfg.ID)
|
||||
}
|
||||
|
||||
m.addedRepo = true
|
||||
@ -607,8 +607,8 @@ func (m *Model) AddRepo(id, dir string, nodes []config.NodeConfiguration) {
|
||||
|
||||
func (m *Model) ScanRepos() {
|
||||
m.rmut.RLock()
|
||||
var repos = make([]string, 0, len(m.repoDirs))
|
||||
for repo := range m.repoDirs {
|
||||
var repos = make([]string, 0, len(m.repoCfgs))
|
||||
for repo := range m.repoCfgs {
|
||||
repos = append(repos, repo)
|
||||
}
|
||||
m.rmut.RUnlock()
|
||||
@ -627,9 +627,9 @@ func (m *Model) ScanRepos() {
|
||||
|
||||
func (m *Model) CleanRepos() {
|
||||
m.rmut.RLock()
|
||||
var dirs = make([]string, 0, len(m.repoDirs))
|
||||
for _, dir := range m.repoDirs {
|
||||
dirs = append(dirs, dir)
|
||||
var dirs = make([]string, 0, len(m.repoCfgs))
|
||||
for _, cfg := range m.repoCfgs {
|
||||
dirs = append(dirs, cfg.Directory)
|
||||
}
|
||||
m.rmut.RUnlock()
|
||||
|
||||
@ -651,12 +651,13 @@ func (m *Model) CleanRepos() {
|
||||
func (m *Model) ScanRepo(repo string) error {
|
||||
m.rmut.RLock()
|
||||
w := &scanner.Walker{
|
||||
Dir: m.repoDirs[repo],
|
||||
Dir: m.repoCfgs[repo].Directory,
|
||||
IgnoreFile: ".stignore",
|
||||
BlockSize: scanner.StandardBlockSize,
|
||||
TempNamer: defTempNamer,
|
||||
Suppressor: m.suppressor[repo],
|
||||
CurrentFiler: cFiler{m, repo},
|
||||
IgnorePerms: m.repoCfgs[repo].IgnorePerms,
|
||||
}
|
||||
m.rmut.RUnlock()
|
||||
m.setState(repo, RepoScanning)
|
||||
@ -671,7 +672,7 @@ func (m *Model) ScanRepo(repo string) error {
|
||||
|
||||
func (m *Model) SaveIndexes(dir string) {
|
||||
m.rmut.RLock()
|
||||
for repo := range m.repoDirs {
|
||||
for repo := range m.repoCfgs {
|
||||
fs := m.protocolIndex(repo)
|
||||
m.saveIndex(repo, dir, fs)
|
||||
}
|
||||
@ -680,7 +681,7 @@ func (m *Model) SaveIndexes(dir string) {
|
||||
|
||||
func (m *Model) LoadIndexes(dir string) {
|
||||
m.rmut.RLock()
|
||||
for repo := range m.repoDirs {
|
||||
for repo := range m.repoCfgs {
|
||||
fs := m.loadIndex(repo, dir)
|
||||
m.SeedLocal(repo, fs)
|
||||
}
|
||||
@ -688,7 +689,7 @@ func (m *Model) LoadIndexes(dir string) {
|
||||
}
|
||||
|
||||
func (m *Model) saveIndex(repo string, dir string, fs []protocol.FileInfo) {
|
||||
id := fmt.Sprintf("%x", sha1.Sum([]byte(m.repoDirs[repo])))
|
||||
id := fmt.Sprintf("%x", sha1.Sum([]byte(m.repoCfgs[repo].Directory)))
|
||||
name := id + ".idx.gz"
|
||||
name = filepath.Join(dir, name)
|
||||
|
||||
@ -710,7 +711,7 @@ func (m *Model) saveIndex(repo string, dir string, fs []protocol.FileInfo) {
|
||||
}
|
||||
|
||||
func (m *Model) loadIndex(repo string, dir string) []protocol.FileInfo {
|
||||
id := fmt.Sprintf("%x", sha1.Sum([]byte(m.repoDirs[repo])))
|
||||
id := fmt.Sprintf("%x", sha1.Sum([]byte(m.repoCfgs[repo].Directory)))
|
||||
name := id + ".idx.gz"
|
||||
name = filepath.Join(dir, name)
|
||||
|
||||
|
112
model/puller.go
112
model/puller.go
@ -63,8 +63,7 @@ var errNoNode = errors.New("no available source node")
|
||||
|
||||
type puller struct {
|
||||
cfg *config.Configuration
|
||||
repo string
|
||||
dir string
|
||||
repoCfg config.RepositoryConfiguration
|
||||
bq *blockQueue
|
||||
model *Model
|
||||
oustandingPerNode activityMap
|
||||
@ -74,11 +73,10 @@ type puller struct {
|
||||
requestResults chan requestResult
|
||||
}
|
||||
|
||||
func newPuller(repo, dir string, model *Model, slots int, cfg *config.Configuration) *puller {
|
||||
func newPuller(repoCfg config.RepositoryConfiguration, model *Model, slots int, cfg *config.Configuration) *puller {
|
||||
p := &puller{
|
||||
repoCfg: repoCfg,
|
||||
cfg: cfg,
|
||||
repo: repo,
|
||||
dir: dir,
|
||||
bq: newBlockQueue(),
|
||||
model: model,
|
||||
oustandingPerNode: make(activityMap),
|
||||
@ -94,13 +92,13 @@ func newPuller(repo, dir string, model *Model, slots int, cfg *config.Configurat
|
||||
p.requestSlots <- true
|
||||
}
|
||||
if debug {
|
||||
l.Debugf("starting puller; repo %q dir %q slots %d", repo, dir, slots)
|
||||
l.Debugf("starting puller; repo %q dir %q slots %d", repoCfg.ID, repoCfg.Directory, slots)
|
||||
}
|
||||
go p.run()
|
||||
} else {
|
||||
// Read only
|
||||
if debug {
|
||||
l.Debugf("starting puller; repo %q dir %q (read only)", repo, dir)
|
||||
l.Debugf("starting puller; repo %q dir %q (read only)", repoCfg.ID, repoCfg.Directory)
|
||||
}
|
||||
go p.runRO()
|
||||
}
|
||||
@ -114,7 +112,7 @@ func (p *puller) run() {
|
||||
<-p.requestSlots
|
||||
b := p.bq.get()
|
||||
if debug {
|
||||
l.Debugf("filler: queueing %q / %q offset %d copy %d", p.repo, b.file.Name, b.block.Offset, len(b.copy))
|
||||
l.Debugf("filler: queueing %q / %q offset %d copy %d", p.repoCfg.ID, b.file.Name, b.block.Offset, len(b.copy))
|
||||
}
|
||||
p.blocks <- b
|
||||
}
|
||||
@ -130,13 +128,13 @@ func (p *puller) run() {
|
||||
for {
|
||||
select {
|
||||
case res := <-p.requestResults:
|
||||
p.model.setState(p.repo, RepoSyncing)
|
||||
p.model.setState(p.repoCfg.ID, RepoSyncing)
|
||||
changed = true
|
||||
p.requestSlots <- true
|
||||
p.handleRequestResult(res)
|
||||
|
||||
case b := <-p.blocks:
|
||||
p.model.setState(p.repo, RepoSyncing)
|
||||
p.model.setState(p.repoCfg.ID, RepoSyncing)
|
||||
changed = true
|
||||
if p.handleBlock(b) {
|
||||
// Block was fully handled, free up the slot
|
||||
@ -149,7 +147,7 @@ func (p *puller) run() {
|
||||
break pull
|
||||
}
|
||||
if debug {
|
||||
l.Debugf("%q: idle but have %d open files", p.repo, len(p.openFiles))
|
||||
l.Debugf("%q: idle but have %d open files", p.repoCfg.ID, len(p.openFiles))
|
||||
i := 5
|
||||
for _, f := range p.openFiles {
|
||||
l.Debugf(" %v", f)
|
||||
@ -163,22 +161,22 @@ func (p *puller) run() {
|
||||
}
|
||||
|
||||
if changed {
|
||||
p.model.setState(p.repo, RepoCleaning)
|
||||
p.model.setState(p.repoCfg.ID, RepoCleaning)
|
||||
p.fixupDirectories()
|
||||
changed = false
|
||||
}
|
||||
|
||||
p.model.setState(p.repo, RepoIdle)
|
||||
p.model.setState(p.repoCfg.ID, RepoIdle)
|
||||
|
||||
// Do a rescan if it's time for it
|
||||
select {
|
||||
case <-walkTicker:
|
||||
if debug {
|
||||
l.Debugf("%q: time for rescan", p.repo)
|
||||
l.Debugf("%q: time for rescan", p.repoCfg.ID)
|
||||
}
|
||||
err := p.model.ScanRepo(p.repo)
|
||||
err := p.model.ScanRepo(p.repoCfg.ID)
|
||||
if err != nil {
|
||||
invalidateRepo(p.cfg, p.repo, err)
|
||||
invalidateRepo(p.cfg, p.repoCfg.ID, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -195,11 +193,11 @@ func (p *puller) runRO() {
|
||||
|
||||
for _ = range walkTicker {
|
||||
if debug {
|
||||
l.Debugf("%q: time for rescan", p.repo)
|
||||
l.Debugf("%q: time for rescan", p.repoCfg.ID)
|
||||
}
|
||||
err := p.model.ScanRepo(p.repo)
|
||||
err := p.model.ScanRepo(p.repoCfg.ID)
|
||||
if err != nil {
|
||||
invalidateRepo(p.cfg, p.repo, err)
|
||||
invalidateRepo(p.cfg, p.repoCfg.ID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -214,7 +212,7 @@ func (p *puller) fixupDirectories() {
|
||||
return nil
|
||||
}
|
||||
|
||||
rn, err := filepath.Rel(p.dir, path)
|
||||
rn, err := filepath.Rel(p.repoCfg.Directory, path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@ -223,7 +221,7 @@ func (p *puller) fixupDirectories() {
|
||||
return nil
|
||||
}
|
||||
|
||||
cur := p.model.CurrentRepoFile(p.repo, rn)
|
||||
cur := p.model.CurrentRepoFile(p.repoCfg.ID, rn)
|
||||
if cur.Name != rn {
|
||||
// No matching dir in current list; weird
|
||||
if debug {
|
||||
@ -276,7 +274,7 @@ func (p *puller) fixupDirectories() {
|
||||
for {
|
||||
deleteDirs = nil
|
||||
changed = 0
|
||||
filepath.Walk(p.dir, walkFn)
|
||||
filepath.Walk(p.repoCfg.Directory, walkFn)
|
||||
|
||||
var deleted = 0
|
||||
// Delete any queued directories
|
||||
@ -320,7 +318,7 @@ func (p *puller) handleRequestResult(res requestResult) {
|
||||
p.openFiles[f.Name] = of
|
||||
|
||||
if debug {
|
||||
l.Debugf("pull: wrote %q / %q offset %d outstanding %d done %v", p.repo, f.Name, res.offset, of.outstanding, of.done)
|
||||
l.Debugf("pull: wrote %q / %q offset %d outstanding %d done %v", p.repoCfg.ID, f.Name, res.offset, of.outstanding, of.done)
|
||||
}
|
||||
|
||||
if of.done && of.outstanding == 0 {
|
||||
@ -338,7 +336,7 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
||||
// Deleted directories we mark as handled and delete later.
|
||||
if protocol.IsDirectory(f.Flags) {
|
||||
if !protocol.IsDeleted(f.Flags) {
|
||||
path := filepath.Join(p.dir, f.Name)
|
||||
path := filepath.Join(p.repoCfg.Directory, f.Name)
|
||||
_, err := os.Stat(path)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if debug {
|
||||
@ -352,7 +350,7 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
||||
} else if debug {
|
||||
l.Debugf("ignore delete dir: %v", f)
|
||||
}
|
||||
p.model.updateLocal(p.repo, f)
|
||||
p.model.updateLocal(p.repoCfg.ID, f)
|
||||
return true
|
||||
}
|
||||
|
||||
@ -361,12 +359,12 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
||||
|
||||
if !ok {
|
||||
if debug {
|
||||
l.Debugf("pull: %q: opening file %q", p.repo, f.Name)
|
||||
l.Debugf("pull: %q: opening file %q", p.repoCfg.ID, f.Name)
|
||||
}
|
||||
|
||||
of.availability = uint64(p.model.repoFiles[p.repo].Availability(f.Name))
|
||||
of.filepath = filepath.Join(p.dir, f.Name)
|
||||
of.temp = filepath.Join(p.dir, defTempNamer.TempName(f.Name))
|
||||
of.availability = uint64(p.model.repoFiles[p.repoCfg.ID].Availability(f.Name))
|
||||
of.filepath = filepath.Join(p.repoCfg.Directory, f.Name)
|
||||
of.temp = filepath.Join(p.repoCfg.Directory, defTempNamer.TempName(f.Name))
|
||||
|
||||
dirName := filepath.Dir(of.filepath)
|
||||
_, err := os.Stat(dirName)
|
||||
@ -374,13 +372,13 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
||||
err = os.MkdirAll(dirName, 0777)
|
||||
}
|
||||
if err != nil {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, err)
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||
}
|
||||
|
||||
of.file, of.err = os.Create(of.temp)
|
||||
if of.err != nil {
|
||||
if debug {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, of.err)
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
||||
}
|
||||
if !b.last {
|
||||
p.openFiles[f.Name] = of
|
||||
@ -393,7 +391,7 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
||||
if of.err != nil {
|
||||
// We have already failed this file.
|
||||
if debug {
|
||||
l.Debugf("pull: error: %q / %q has already failed: %v", p.repo, f.Name, of.err)
|
||||
l.Debugf("pull: error: %q / %q has already failed: %v", p.repoCfg.ID, f.Name, of.err)
|
||||
}
|
||||
if b.last {
|
||||
delete(p.openFiles, f.Name)
|
||||
@ -424,14 +422,14 @@ func (p *puller) handleCopyBlock(b bqBlock) {
|
||||
of := p.openFiles[f.Name]
|
||||
|
||||
if debug {
|
||||
l.Debugf("pull: copying %d blocks for %q / %q", len(b.copy), p.repo, f.Name)
|
||||
l.Debugf("pull: copying %d blocks for %q / %q", len(b.copy), p.repoCfg.ID, f.Name)
|
||||
}
|
||||
|
||||
var exfd *os.File
|
||||
exfd, of.err = os.Open(of.filepath)
|
||||
if of.err != nil {
|
||||
if debug {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, of.err)
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
||||
}
|
||||
of.file.Close()
|
||||
of.file = nil
|
||||
@ -450,7 +448,7 @@ func (p *puller) handleCopyBlock(b bqBlock) {
|
||||
buffers.Put(bs)
|
||||
if of.err != nil {
|
||||
if debug {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, of.err)
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
||||
}
|
||||
exfd.Close()
|
||||
of.file.Close()
|
||||
@ -493,10 +491,10 @@ func (p *puller) handleRequestBlock(b bqBlock) bool {
|
||||
|
||||
go func(node string, b bqBlock) {
|
||||
if debug {
|
||||
l.Debugf("pull: requesting %q / %q offset %d size %d from %q outstanding %d", p.repo, f.Name, b.block.Offset, b.block.Size, node, of.outstanding)
|
||||
l.Debugf("pull: requesting %q / %q offset %d size %d from %q outstanding %d", p.repoCfg.ID, f.Name, b.block.Offset, b.block.Size, node, of.outstanding)
|
||||
}
|
||||
|
||||
bs, err := p.model.requestGlobal(node, p.repo, f.Name, b.block.Offset, int(b.block.Size), nil)
|
||||
bs, err := p.model.requestGlobal(node, p.repoCfg.ID, f.Name, b.block.Offset, int(b.block.Size), nil)
|
||||
p.requestResults <- requestResult{
|
||||
node: node,
|
||||
file: f,
|
||||
@ -527,24 +525,24 @@ func (p *puller) handleEmptyBlock(b bqBlock) {
|
||||
os.Remove(of.temp)
|
||||
os.Chmod(of.filepath, 0666)
|
||||
if err := os.Remove(of.filepath); err == nil || os.IsNotExist(err) {
|
||||
p.model.updateLocal(p.repo, f)
|
||||
p.model.updateLocal(p.repoCfg.ID, f)
|
||||
}
|
||||
} else {
|
||||
if debug {
|
||||
l.Debugf("pull: no blocks to fetch and nothing to copy for %q / %q", p.repo, f.Name)
|
||||
l.Debugf("pull: no blocks to fetch and nothing to copy for %q / %q", p.repoCfg.ID, f.Name)
|
||||
}
|
||||
t := time.Unix(f.Modified, 0)
|
||||
if os.Chtimes(of.temp, t, t) != nil {
|
||||
delete(p.openFiles, f.Name)
|
||||
return
|
||||
}
|
||||
if protocol.HasPermissionBits(f.Flags) && os.Chmod(of.temp, os.FileMode(f.Flags&0777)) != nil {
|
||||
if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(f.Flags) && os.Chmod(of.temp, os.FileMode(f.Flags&0777)) != nil {
|
||||
delete(p.openFiles, f.Name)
|
||||
return
|
||||
}
|
||||
defTempNamer.Show(of.temp)
|
||||
if Rename(of.temp, of.filepath) == nil {
|
||||
p.model.updateLocal(p.repo, f)
|
||||
p.model.updateLocal(p.repoCfg.ID, f)
|
||||
}
|
||||
}
|
||||
delete(p.openFiles, f.Name)
|
||||
@ -552,8 +550,8 @@ func (p *puller) handleEmptyBlock(b bqBlock) {
|
||||
|
||||
func (p *puller) queueNeededBlocks() {
|
||||
queued := 0
|
||||
for _, f := range p.model.NeedFilesRepo(p.repo) {
|
||||
lf := p.model.CurrentRepoFile(p.repo, f.Name)
|
||||
for _, f := range p.model.NeedFilesRepo(p.repoCfg.ID) {
|
||||
lf := p.model.CurrentRepoFile(p.repoCfg.ID, f.Name)
|
||||
have, need := scanner.BlockDiff(lf.Blocks, f.Blocks)
|
||||
if debug {
|
||||
l.Debugf("need:\n local: %v\n global: %v\n haveBlocks: %v\n needBlocks: %v", lf, f, have, need)
|
||||
@ -566,13 +564,13 @@ func (p *puller) queueNeededBlocks() {
|
||||
})
|
||||
}
|
||||
if debug && queued > 0 {
|
||||
l.Debugf("%q: queued %d blocks", p.repo, queued)
|
||||
l.Debugf("%q: queued %d blocks", p.repoCfg.ID, queued)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *puller) closeFile(f scanner.File) {
|
||||
if debug {
|
||||
l.Debugf("pull: closing %q / %q", p.repo, f.Name)
|
||||
l.Debugf("pull: closing %q / %q", p.repoCfg.ID, f.Name)
|
||||
}
|
||||
|
||||
of := p.openFiles[f.Name]
|
||||
@ -584,7 +582,7 @@ func (p *puller) closeFile(f scanner.File) {
|
||||
fd, err := os.Open(of.temp)
|
||||
if err != nil {
|
||||
if debug {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, err)
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -593,31 +591,37 @@ func (p *puller) closeFile(f scanner.File) {
|
||||
|
||||
if l0, l1 := len(hb), len(f.Blocks); l0 != l1 {
|
||||
if debug {
|
||||
l.Debugf("pull: %q / %q: nblocks %d != %d", p.repo, f.Name, l0, l1)
|
||||
l.Debugf("pull: %q / %q: nblocks %d != %d", p.repoCfg.ID, f.Name, l0, l1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for i := range hb {
|
||||
if bytes.Compare(hb[i].Hash, f.Blocks[i].Hash) != 0 {
|
||||
l.Debugf("pull: %q / %q: block %d hash mismatch", p.repo, f.Name, i)
|
||||
l.Debugf("pull: %q / %q: block %d hash mismatch", p.repoCfg.ID, f.Name, i)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t := time.Unix(f.Modified, 0)
|
||||
os.Chtimes(of.temp, t, t)
|
||||
if protocol.HasPermissionBits(f.Flags) {
|
||||
os.Chmod(of.temp, os.FileMode(f.Flags&0777))
|
||||
err = os.Chtimes(of.temp, t, t)
|
||||
if debug && err != nil {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||
}
|
||||
if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(f.Flags) {
|
||||
err = os.Chmod(of.temp, os.FileMode(f.Flags&0777))
|
||||
if debug && err != nil {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||
}
|
||||
}
|
||||
defTempNamer.Show(of.temp)
|
||||
if debug {
|
||||
l.Debugf("pull: rename %q / %q: %q", p.repo, f.Name, of.filepath)
|
||||
l.Debugf("pull: rename %q / %q: %q", p.repoCfg.ID, f.Name, of.filepath)
|
||||
}
|
||||
if err := Rename(of.temp, of.filepath); err == nil {
|
||||
p.model.updateLocal(p.repo, f)
|
||||
p.model.updateLocal(p.repoCfg.ID, f)
|
||||
} else {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, err)
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -29,10 +29,10 @@ type Walker struct {
|
||||
// Suppressed files will be returned with empty metadata and the Suppressed flag set.
|
||||
// Requires CurrentFiler to be set.
|
||||
Suppressor Suppressor
|
||||
// If IgnorePermissions is true, changes to permission bits will not be
|
||||
// If IgnorePerms is true, changes to permission bits will not be
|
||||
// detected. Scanned files will get zero permission bits and the
|
||||
// NoPermissionBits flag set.
|
||||
IgnorePermissions bool
|
||||
IgnorePerms bool
|
||||
}
|
||||
|
||||
type TempNamer interface {
|
||||
@ -166,7 +166,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
|
||||
if info.Mode().IsDir() {
|
||||
if w.CurrentFiler != nil {
|
||||
cf := w.CurrentFiler.CurrentFile(rn)
|
||||
permUnchanged := w.IgnorePermissions || !protocol.HasPermissionBits(cf.Flags) || PermsEqual(cf.Flags, uint32(info.Mode()))
|
||||
permUnchanged := w.IgnorePerms || !protocol.HasPermissionBits(cf.Flags) || PermsEqual(cf.Flags, uint32(info.Mode()))
|
||||
if cf.Modified == info.ModTime().Unix() && protocol.IsDirectory(cf.Flags) && permUnchanged {
|
||||
if debug {
|
||||
l.Debugln("unchanged:", cf)
|
||||
@ -174,7 +174,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
|
||||
*res = append(*res, cf)
|
||||
} else {
|
||||
var flags uint32 = protocol.FlagDirectory
|
||||
if w.IgnorePermissions {
|
||||
if w.IgnorePerms {
|
||||
flags |= protocol.FlagNoPermBits
|
||||
} else {
|
||||
flags |= uint32(info.Mode() & os.ModePerm)
|
||||
@ -197,7 +197,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
|
||||
if info.Mode().IsRegular() {
|
||||
if w.CurrentFiler != nil {
|
||||
cf := w.CurrentFiler.CurrentFile(rn)
|
||||
permUnchanged := w.IgnorePermissions || !protocol.HasPermissionBits(cf.Flags) || PermsEqual(cf.Flags, uint32(info.Mode()))
|
||||
permUnchanged := w.IgnorePerms || !protocol.HasPermissionBits(cf.Flags) || PermsEqual(cf.Flags, uint32(info.Mode()))
|
||||
if !protocol.IsDeleted(cf.Flags) && cf.Modified == info.ModTime().Unix() && permUnchanged {
|
||||
if debug {
|
||||
l.Debugln("unchanged:", cf)
|
||||
@ -249,7 +249,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
|
||||
}
|
||||
|
||||
var flags = uint32(info.Mode() & os.ModePerm)
|
||||
if w.IgnorePermissions {
|
||||
if w.IgnorePerms {
|
||||
flags = protocol.FlagNoPermBits
|
||||
}
|
||||
f := File{
|
||||
|
Loading…
Reference in New Issue
Block a user