2015-01-01 19:49:30 +00:00
|
|
|
package fzf
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"runtime"
|
|
|
|
"sort"
|
2015-01-11 14:49:12 +00:00
|
|
|
"sync"
|
2015-01-01 19:49:30 +00:00
|
|
|
"time"
|
2015-01-12 03:56:17 +00:00
|
|
|
|
|
|
|
"github.com/junegunn/fzf/src/util"
|
2015-01-01 19:49:30 +00:00
|
|
|
)
|
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
// MatchRequest represents a search request
|
2015-01-01 19:49:30 +00:00
|
|
|
type MatchRequest struct {
|
2019-11-10 02:36:22 +00:00
|
|
|
chunks []*Chunk
|
|
|
|
pattern *Pattern
|
|
|
|
final bool
|
|
|
|
sort bool
|
|
|
|
clearCache bool
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
// Matcher is responsible for performing search
|
2015-01-01 19:49:30 +00:00
|
|
|
type Matcher struct {
|
|
|
|
patternBuilder func([]rune) *Pattern
|
|
|
|
sort bool
|
2015-02-25 16:42:15 +00:00
|
|
|
tac bool
|
2015-01-12 03:56:17 +00:00
|
|
|
eventBox *util.EventBox
|
|
|
|
reqBox *util.EventBox
|
2015-01-01 19:49:30 +00:00
|
|
|
partitions int
|
2016-09-07 00:58:18 +00:00
|
|
|
slab []*util.Slab
|
2015-01-09 16:06:08 +00:00
|
|
|
mergerCache map[string]*Merger
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
2015-01-12 03:56:17 +00:00
|
|
|
reqRetry util.EventType = iota
|
2015-01-11 18:01:24 +00:00
|
|
|
reqReset
|
2015-01-01 19:49:30 +00:00
|
|
|
)
|
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
// NewMatcher returns a new Matcher
|
2015-01-01 19:49:30 +00:00
|
|
|
func NewMatcher(patternBuilder func([]rune) *Pattern,
|
2015-02-25 16:42:15 +00:00
|
|
|
sort bool, tac bool, eventBox *util.EventBox) *Matcher {
|
2016-09-07 00:58:18 +00:00
|
|
|
partitions := util.Min(numPartitionsMultiplier*runtime.NumCPU(), maxPartitions)
|
2015-01-01 19:49:30 +00:00
|
|
|
return &Matcher{
|
|
|
|
patternBuilder: patternBuilder,
|
|
|
|
sort: sort,
|
2015-02-25 16:42:15 +00:00
|
|
|
tac: tac,
|
2015-01-01 19:49:30 +00:00
|
|
|
eventBox: eventBox,
|
2015-01-12 03:56:17 +00:00
|
|
|
reqBox: util.NewEventBox(),
|
2016-09-07 00:58:18 +00:00
|
|
|
partitions: partitions,
|
|
|
|
slab: make([]*util.Slab, partitions),
|
2015-01-09 16:06:08 +00:00
|
|
|
mergerCache: make(map[string]*Merger)}
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
// Loop puts Matcher in action
|
2015-01-01 19:49:30 +00:00
|
|
|
func (m *Matcher) Loop() {
|
|
|
|
prevCount := 0
|
|
|
|
|
|
|
|
for {
|
|
|
|
var request MatchRequest
|
|
|
|
|
2015-01-12 03:56:17 +00:00
|
|
|
m.reqBox.Wait(func(events *util.Events) {
|
2015-01-01 19:49:30 +00:00
|
|
|
for _, val := range *events {
|
|
|
|
switch val := val.(type) {
|
|
|
|
case MatchRequest:
|
|
|
|
request = val
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("Unexpected type: %T", val))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
events.Clear()
|
|
|
|
})
|
|
|
|
|
2019-11-10 02:36:22 +00:00
|
|
|
if request.sort != m.sort || request.clearCache {
|
2015-03-31 13:05:02 +00:00
|
|
|
m.sort = request.sort
|
|
|
|
m.mergerCache = make(map[string]*Merger)
|
|
|
|
clearChunkCache()
|
|
|
|
}
|
|
|
|
|
2015-01-01 19:49:30 +00:00
|
|
|
// Restart search
|
|
|
|
patternString := request.pattern.AsString()
|
2015-01-09 16:06:08 +00:00
|
|
|
var merger *Merger
|
2015-01-01 19:49:30 +00:00
|
|
|
cancelled := false
|
|
|
|
count := CountItems(request.chunks)
|
|
|
|
|
|
|
|
foundCache := false
|
|
|
|
if count == prevCount {
|
2015-01-09 16:06:08 +00:00
|
|
|
// Look up mergerCache
|
|
|
|
if cached, found := m.mergerCache[patternString]; found {
|
2015-01-01 19:49:30 +00:00
|
|
|
foundCache = true
|
2015-01-09 16:06:08 +00:00
|
|
|
merger = cached
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
} else {
|
2015-01-09 16:06:08 +00:00
|
|
|
// Invalidate mergerCache
|
2015-01-01 19:49:30 +00:00
|
|
|
prevCount = count
|
2015-01-09 16:06:08 +00:00
|
|
|
m.mergerCache = make(map[string]*Merger)
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if !foundCache {
|
2015-02-17 15:08:17 +00:00
|
|
|
merger, cancelled = m.scan(request)
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if !cancelled {
|
2015-08-02 04:06:15 +00:00
|
|
|
if merger.cacheable() {
|
2015-04-17 13:23:52 +00:00
|
|
|
m.mergerCache[patternString] = merger
|
|
|
|
}
|
2015-02-17 15:51:44 +00:00
|
|
|
merger.final = request.final
|
2015-01-11 18:01:24 +00:00
|
|
|
m.eventBox.Set(EvtSearchFin, merger)
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Matcher) sliceChunks(chunks []*Chunk) [][]*Chunk {
|
2016-08-17 16:38:41 +00:00
|
|
|
partitions := m.partitions
|
|
|
|
perSlice := len(chunks) / partitions
|
2015-01-01 19:49:30 +00:00
|
|
|
|
|
|
|
if perSlice == 0 {
|
2016-08-17 16:38:41 +00:00
|
|
|
partitions = len(chunks)
|
|
|
|
perSlice = 1
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2016-08-17 16:38:41 +00:00
|
|
|
slices := make([][]*Chunk, partitions)
|
|
|
|
for i := 0; i < partitions; i++ {
|
2015-01-01 19:49:30 +00:00
|
|
|
start := i * perSlice
|
|
|
|
end := start + perSlice
|
2016-08-17 16:38:41 +00:00
|
|
|
if i == partitions-1 {
|
2015-01-01 19:49:30 +00:00
|
|
|
end = len(chunks)
|
|
|
|
}
|
|
|
|
slices[i] = chunks[start:end]
|
|
|
|
}
|
|
|
|
return slices
|
|
|
|
}
|
|
|
|
|
|
|
|
type partialResult struct {
|
|
|
|
index int
|
2017-07-17 18:10:49 +00:00
|
|
|
matches []Result
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2015-02-17 15:08:17 +00:00
|
|
|
func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
2015-01-01 19:49:30 +00:00
|
|
|
startedAt := time.Now()
|
|
|
|
|
|
|
|
numChunks := len(request.chunks)
|
|
|
|
if numChunks == 0 {
|
2015-01-09 16:06:08 +00:00
|
|
|
return EmptyMerger, false
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
pattern := request.pattern
|
2015-03-22 07:05:54 +00:00
|
|
|
if pattern.IsEmpty() {
|
2015-03-19 10:59:38 +00:00
|
|
|
return PassMerger(&request.chunks, m.tac), false
|
|
|
|
}
|
|
|
|
|
2015-01-12 03:56:17 +00:00
|
|
|
cancelled := util.NewAtomicBool(false)
|
2015-01-01 19:49:30 +00:00
|
|
|
|
|
|
|
slices := m.sliceChunks(request.chunks)
|
|
|
|
numSlices := len(slices)
|
|
|
|
resultChan := make(chan partialResult, numSlices)
|
2015-01-11 14:49:12 +00:00
|
|
|
countChan := make(chan int, numChunks)
|
|
|
|
waitGroup := sync.WaitGroup{}
|
2015-01-01 19:49:30 +00:00
|
|
|
|
|
|
|
for idx, chunks := range slices {
|
2015-01-11 14:49:12 +00:00
|
|
|
waitGroup.Add(1)
|
2016-09-07 00:58:18 +00:00
|
|
|
if m.slab[idx] == nil {
|
|
|
|
m.slab[idx] = util.MakeSlab(slab16Size, slab32Size)
|
|
|
|
}
|
|
|
|
go func(idx int, slab *util.Slab, chunks []*Chunk) {
|
2015-01-11 14:49:12 +00:00
|
|
|
defer func() { waitGroup.Done() }()
|
2016-08-18 17:39:32 +00:00
|
|
|
count := 0
|
2017-07-17 18:10:49 +00:00
|
|
|
allMatches := make([][]Result, len(chunks))
|
2016-08-18 17:39:32 +00:00
|
|
|
for idx, chunk := range chunks {
|
2016-09-07 00:58:18 +00:00
|
|
|
matches := request.pattern.Match(chunk, slab)
|
2016-08-18 17:39:32 +00:00
|
|
|
allMatches[idx] = matches
|
|
|
|
count += len(matches)
|
2015-01-01 19:49:30 +00:00
|
|
|
if cancelled.Get() {
|
|
|
|
return
|
|
|
|
}
|
2015-01-03 16:47:59 +00:00
|
|
|
countChan <- len(matches)
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2017-07-17 18:10:49 +00:00
|
|
|
sliceMatches := make([]Result, 0, count)
|
2016-08-18 17:39:32 +00:00
|
|
|
for _, matches := range allMatches {
|
|
|
|
sliceMatches = append(sliceMatches, matches...)
|
|
|
|
}
|
2015-03-22 07:05:54 +00:00
|
|
|
if m.sort {
|
2015-02-25 16:42:15 +00:00
|
|
|
if m.tac {
|
|
|
|
sort.Sort(ByRelevanceTac(sliceMatches))
|
|
|
|
} else {
|
|
|
|
sort.Sort(ByRelevance(sliceMatches))
|
|
|
|
}
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
resultChan <- partialResult{idx, sliceMatches}
|
2016-09-07 00:58:18 +00:00
|
|
|
}(idx, m.slab[idx], chunks)
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2015-01-11 14:49:12 +00:00
|
|
|
wait := func() bool {
|
|
|
|
cancelled.Set(true)
|
|
|
|
waitGroup.Wait()
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-01-01 19:49:30 +00:00
|
|
|
count := 0
|
|
|
|
matchCount := 0
|
|
|
|
for matchesInChunk := range countChan {
|
2015-01-11 18:01:24 +00:00
|
|
|
count++
|
2015-01-01 19:49:30 +00:00
|
|
|
matchCount += matchesInChunk
|
|
|
|
|
|
|
|
if count == numChunks {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2015-03-22 07:05:54 +00:00
|
|
|
if m.reqBox.Peek(reqReset) {
|
2015-01-11 14:49:12 +00:00
|
|
|
return nil, wait()
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2019-07-19 04:22:35 +00:00
|
|
|
if time.Since(startedAt) > progressMinDuration {
|
2015-01-11 18:01:24 +00:00
|
|
|
m.eventBox.Set(EvtSearchProgress, float32(count)/float32(numChunks))
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-17 18:10:49 +00:00
|
|
|
partialResults := make([][]Result, numSlices)
|
2019-07-19 04:22:35 +00:00
|
|
|
for range slices {
|
2015-01-01 19:49:30 +00:00
|
|
|
partialResult := <-resultChan
|
|
|
|
partialResults[partialResult.index] = partialResult.matches
|
|
|
|
}
|
2016-08-19 16:46:54 +00:00
|
|
|
return NewMerger(pattern, partialResults, m.sort, m.tac), false
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
// Reset is called to interrupt/signal the ongoing search
|
2019-11-10 02:36:22 +00:00
|
|
|
func (m *Matcher) Reset(chunks []*Chunk, patternRunes []rune, cancel bool, final bool, sort bool, clearCache bool) {
|
2015-01-01 19:49:30 +00:00
|
|
|
pattern := m.patternBuilder(patternRunes)
|
|
|
|
|
2015-01-12 03:56:17 +00:00
|
|
|
var event util.EventType
|
2015-01-01 19:49:30 +00:00
|
|
|
if cancel {
|
2015-01-11 18:01:24 +00:00
|
|
|
event = reqReset
|
2015-01-01 19:49:30 +00:00
|
|
|
} else {
|
2015-01-11 18:01:24 +00:00
|
|
|
event = reqRetry
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2019-11-10 02:36:22 +00:00
|
|
|
m.reqBox.Set(event, MatchRequest{chunks, pattern, final, sort && pattern.sortable, clearCache})
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|