2015-01-12 03:56:17 +00:00
|
|
|
package algo
|
2015-01-01 19:49:30 +00:00
|
|
|
|
2016-09-07 00:58:18 +00:00
|
|
|
/*
|
|
|
|
|
|
|
|
Algorithm
|
|
|
|
---------
|
|
|
|
|
|
|
|
FuzzyMatchV1 finds the first "fuzzy" occurrence of the pattern within the given
|
|
|
|
text in O(n) time where n is the length of the text. Once the position of the
|
|
|
|
last character is located, it traverses backwards to see if there's a shorter
|
|
|
|
substring that matches the pattern.
|
|
|
|
|
|
|
|
a_____b___abc__ To find "abc"
|
|
|
|
*-----*-----*> 1. Forward scan
|
|
|
|
<*** 2. Backward scan
|
|
|
|
|
|
|
|
The algorithm is simple and fast, but as it only sees the first occurrence,
|
|
|
|
it is not guaranteed to find the occurrence with the highest score.
|
|
|
|
|
|
|
|
a_____b__c__abc
|
|
|
|
*-----*--* ***
|
|
|
|
|
|
|
|
FuzzyMatchV2 implements a modified version of Smith-Waterman algorithm to find
|
|
|
|
the optimal solution (highest score) according to the scoring criteria. Unlike
|
|
|
|
the original algorithm, omission or mismatch of a character in the pattern is
|
|
|
|
not allowed.
|
|
|
|
|
|
|
|
Performance
|
|
|
|
-----------
|
|
|
|
|
|
|
|
The new V2 algorithm is slower than V1 as it examines all occurrences of the
|
|
|
|
pattern instead of stopping immediately after finding the first one. The time
|
|
|
|
complexity of the algorithm is O(nm) if a match is found and O(n) otherwise
|
|
|
|
where n is the length of the item and m is the length of the pattern. Thus, the
|
|
|
|
performance overhead may not be noticeable for a query with high selectivity.
|
|
|
|
However, if the performance is more important than the quality of the result,
|
|
|
|
you can still choose v1 algorithm with --algo=v1.
|
|
|
|
|
|
|
|
Scoring criteria
|
|
|
|
----------------
|
|
|
|
|
|
|
|
- We prefer matches at special positions, such as the start of a word, or
|
|
|
|
uppercase character in camelCase words.
|
|
|
|
|
|
|
|
- That is, we prefer an occurrence of the pattern with more characters
|
|
|
|
matching at special positions, even if the total match length is longer.
|
|
|
|
e.g. "fuzzyfinder" vs. "fuzzy-finder" on "ff"
|
|
|
|
````````````
|
|
|
|
- Also, if the first character in the pattern appears at one of the special
|
|
|
|
positions, the bonus point for the position is multiplied by a constant
|
|
|
|
as it is extremely likely that the first character in the typed pattern
|
|
|
|
has more significance than the rest.
|
|
|
|
e.g. "fo-bar" vs. "foob-r" on "br"
|
|
|
|
``````
|
|
|
|
- But since fzf is still a fuzzy finder, not an acronym finder, we should also
|
|
|
|
consider the total length of the matched substring. This is why we have the
|
|
|
|
gap penalty. The gap penalty increases as the length of the gap (distance
|
|
|
|
between the matching characters) increases, so the effect of the bonus is
|
|
|
|
eventually cancelled at some point.
|
|
|
|
e.g. "fuzzyfinder" vs. "fuzzy-blurry-finder" on "ff"
|
|
|
|
```````````
|
|
|
|
- Consequently, it is crucial to find the right balance between the bonus
|
|
|
|
and the gap penalty. The parameters were chosen that the bonus is cancelled
|
|
|
|
when the gap size increases beyond 8 characters.
|
|
|
|
|
|
|
|
- The bonus mechanism can have the undesirable side effect where consecutive
|
|
|
|
matches are ranked lower than the ones with gaps.
|
|
|
|
e.g. "foobar" vs. "foo-bar" on "foob"
|
|
|
|
```````
|
|
|
|
- To correct this anomaly, we also give extra bonus point to each character
|
|
|
|
in a consecutive matching chunk.
|
|
|
|
e.g. "foobar" vs. "foo-bar" on "foob"
|
|
|
|
``````
|
|
|
|
- The amount of consecutive bonus is primarily determined by the bonus of the
|
|
|
|
first character in the chunk.
|
|
|
|
e.g. "foobar" vs. "out-of-bound" on "oob"
|
|
|
|
````````````
|
|
|
|
*/
|
|
|
|
|
2015-04-14 12:45:37 +00:00
|
|
|
import (
|
2017-07-30 08:31:50 +00:00
|
|
|
"bytes"
|
2016-09-07 00:58:18 +00:00
|
|
|
"fmt"
|
2015-06-08 14:16:31 +00:00
|
|
|
"strings"
|
2015-04-14 12:45:37 +00:00
|
|
|
"unicode"
|
2017-07-30 08:31:50 +00:00
|
|
|
"unicode/utf8"
|
2015-04-17 13:23:52 +00:00
|
|
|
|
|
|
|
"github.com/junegunn/fzf/src/util"
|
2015-04-14 12:45:37 +00:00
|
|
|
)
|
2015-01-01 19:49:30 +00:00
|
|
|
|
2016-09-07 00:58:18 +00:00
|
|
|
var DEBUG bool
|
2015-01-01 19:49:30 +00:00
|
|
|
|
2016-08-13 15:39:44 +00:00
|
|
|
func indexAt(index int, max int, forward bool) int {
|
2015-09-12 02:37:55 +00:00
|
|
|
if forward {
|
2016-08-13 15:39:44 +00:00
|
|
|
return index
|
2015-09-12 02:37:55 +00:00
|
|
|
}
|
2016-08-13 15:39:44 +00:00
|
|
|
return max - index - 1
|
|
|
|
}
|
|
|
|
|
2016-09-10 19:40:55 +00:00
|
|
|
// Result contains the results of running a match function.
|
2016-04-16 05:02:43 +00:00
|
|
|
type Result struct {
|
2016-09-07 00:58:18 +00:00
|
|
|
// TODO int32 should suffice
|
2016-08-18 17:39:32 +00:00
|
|
|
Start int
|
|
|
|
End int
|
2016-09-07 00:58:18 +00:00
|
|
|
Score int
|
2016-04-16 05:02:43 +00:00
|
|
|
}
|
|
|
|
|
2016-09-07 00:58:18 +00:00
|
|
|
const (
|
|
|
|
scoreMatch = 16
|
|
|
|
scoreGapStart = -3
|
2021-08-17 07:40:24 +00:00
|
|
|
scoreGapExtension = -1
|
2016-09-07 00:58:18 +00:00
|
|
|
|
|
|
|
// We prefer matches at the beginning of a word, but the bonus should not be
|
|
|
|
// too great to prevent the longer acronym matches from always winning over
|
|
|
|
// shorter fuzzy matches. The bonus point here was specifically chosen that
|
|
|
|
// the bonus is cancelled when the gap between the acronyms grows over
|
|
|
|
// 8 characters, which is approximately the average length of the words found
|
|
|
|
// in web2 dictionary and my file system.
|
|
|
|
bonusBoundary = scoreMatch / 2
|
|
|
|
|
|
|
|
// Although bonus point for non-word characters is non-contextual, we need it
|
|
|
|
// for computing bonus points for consecutive chunks starting with a non-word
|
|
|
|
// character.
|
|
|
|
bonusNonWord = scoreMatch / 2
|
|
|
|
|
|
|
|
// Edge-triggered bonus for matches in camelCase words.
|
|
|
|
// Compared to word-boundary case, they don't accompany single-character gaps
|
|
|
|
// (e.g. FooBar vs. foo-bar), so we deduct bonus point accordingly.
|
2021-08-17 07:40:24 +00:00
|
|
|
bonusCamel123 = bonusBoundary + scoreGapExtension
|
2016-09-07 00:58:18 +00:00
|
|
|
|
|
|
|
// Minimum bonus point given to characters in consecutive chunks.
|
|
|
|
// Note that bonus points for consecutive matches shouldn't have needed if we
|
|
|
|
// used fixed match score as in the original algorithm.
|
2021-08-17 07:40:24 +00:00
|
|
|
bonusConsecutive = -(scoreGapStart + scoreGapExtension)
|
2016-09-07 00:58:18 +00:00
|
|
|
|
|
|
|
// The first character in the typed pattern usually has more significance
|
|
|
|
// than the rest so it's important that it appears at special positions where
|
2021-08-17 07:40:24 +00:00
|
|
|
// bonus points are given, e.g. "to-go" vs. "ongoing" on "og" or on "ogo".
|
2016-09-07 00:58:18 +00:00
|
|
|
// The amount of the extra bonus should be limited so that the gap penalty is
|
|
|
|
// still respected.
|
|
|
|
bonusFirstCharMultiplier = 2
|
|
|
|
)
|
|
|
|
|
2016-04-16 05:02:43 +00:00
|
|
|
type charClass int
|
|
|
|
|
|
|
|
const (
|
|
|
|
charNonWord charClass = iota
|
|
|
|
charLower
|
|
|
|
charUpper
|
|
|
|
charLetter
|
|
|
|
charNumber
|
|
|
|
)
|
|
|
|
|
2016-09-07 00:58:18 +00:00
|
|
|
func posArray(withPos bool, len int) *[]int {
|
|
|
|
if withPos {
|
|
|
|
pos := make([]int, 0, len)
|
|
|
|
return &pos
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-19 18:45:17 +00:00
|
|
|
func alloc16(offset int, slab *util.Slab, size int) (int, []int16) {
|
2016-09-07 00:58:18 +00:00
|
|
|
if slab != nil && cap(slab.I16) > offset+size {
|
|
|
|
slice := slab.I16[offset : offset+size]
|
|
|
|
return offset + size, slice
|
|
|
|
}
|
|
|
|
return offset, make([]int16, size)
|
|
|
|
}
|
|
|
|
|
2017-08-19 18:45:17 +00:00
|
|
|
func alloc32(offset int, slab *util.Slab, size int) (int, []int32) {
|
2016-09-07 00:58:18 +00:00
|
|
|
if slab != nil && cap(slab.I32) > offset+size {
|
|
|
|
slice := slab.I32[offset : offset+size]
|
|
|
|
return offset + size, slice
|
|
|
|
}
|
|
|
|
return offset, make([]int32, size)
|
|
|
|
}
|
|
|
|
|
|
|
|
func charClassOfAscii(char rune) charClass {
|
|
|
|
if char >= 'a' && char <= 'z' {
|
|
|
|
return charLower
|
|
|
|
} else if char >= 'A' && char <= 'Z' {
|
|
|
|
return charUpper
|
|
|
|
} else if char >= '0' && char <= '9' {
|
|
|
|
return charNumber
|
|
|
|
}
|
|
|
|
return charNonWord
|
|
|
|
}
|
|
|
|
|
|
|
|
func charClassOfNonAscii(char rune) charClass {
|
|
|
|
if unicode.IsLower(char) {
|
|
|
|
return charLower
|
|
|
|
} else if unicode.IsUpper(char) {
|
|
|
|
return charUpper
|
|
|
|
} else if unicode.IsNumber(char) {
|
|
|
|
return charNumber
|
|
|
|
} else if unicode.IsLetter(char) {
|
|
|
|
return charLetter
|
|
|
|
}
|
|
|
|
return charNonWord
|
|
|
|
}
|
|
|
|
|
|
|
|
func charClassOf(char rune) charClass {
|
|
|
|
if char <= unicode.MaxASCII {
|
|
|
|
return charClassOfAscii(char)
|
|
|
|
}
|
|
|
|
return charClassOfNonAscii(char)
|
|
|
|
}
|
|
|
|
|
|
|
|
func bonusFor(prevClass charClass, class charClass) int16 {
|
|
|
|
if prevClass == charNonWord && class != charNonWord {
|
|
|
|
// Word boundary
|
|
|
|
return bonusBoundary
|
|
|
|
} else if prevClass == charLower && class == charUpper ||
|
|
|
|
prevClass != charNumber && class == charNumber {
|
|
|
|
// camelCase letter123
|
|
|
|
return bonusCamel123
|
|
|
|
} else if class == charNonWord {
|
|
|
|
return bonusNonWord
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2017-08-19 18:33:55 +00:00
|
|
|
func bonusAt(input *util.Chars, idx int) int16 {
|
2016-09-07 00:58:18 +00:00
|
|
|
if idx == 0 {
|
|
|
|
return bonusBoundary
|
|
|
|
}
|
|
|
|
return bonusFor(charClassOf(input.Get(idx-1)), charClassOf(input.Get(idx)))
|
|
|
|
}
|
|
|
|
|
2017-01-08 18:12:23 +00:00
|
|
|
func normalizeRune(r rune) rune {
|
|
|
|
if r < 0x00C0 || r > 0x2184 {
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
n := normalized[r]
|
|
|
|
if n > 0 {
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2017-01-09 00:52:17 +00:00
|
|
|
// Algo functions make two assumptions
|
|
|
|
// 1. "pattern" is given in lowercase if "caseSensitive" is false
|
|
|
|
// 2. "pattern" is already normalized if "normalize" is true
|
2017-08-19 18:33:55 +00:00
|
|
|
type Algo func(caseSensitive bool, normalize bool, forward bool, input *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int)
|
2017-01-08 18:12:23 +00:00
|
|
|
|
2017-07-30 08:31:50 +00:00
|
|
|
func trySkip(input *util.Chars, caseSensitive bool, b byte, from int) int {
|
|
|
|
byteArray := input.Bytes()[from:]
|
|
|
|
idx := bytes.IndexByte(byteArray, b)
|
|
|
|
if idx == 0 {
|
|
|
|
// Can't skip any further
|
|
|
|
return from
|
|
|
|
}
|
|
|
|
// We may need to search for the uppercase letter again. We don't have to
|
|
|
|
// consider normalization as we can be sure that this is an ASCII string.
|
|
|
|
if !caseSensitive && b >= 'a' && b <= 'z' {
|
2017-08-17 20:30:13 +00:00
|
|
|
if idx > 0 {
|
|
|
|
byteArray = byteArray[:idx]
|
|
|
|
}
|
2017-07-30 08:31:50 +00:00
|
|
|
uidx := bytes.IndexByte(byteArray, b-32)
|
2017-08-17 20:30:13 +00:00
|
|
|
if uidx >= 0 {
|
2017-07-30 08:31:50 +00:00
|
|
|
idx = uidx
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if idx < 0 {
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
return from + idx
|
|
|
|
}
|
|
|
|
|
2017-07-30 09:15:02 +00:00
|
|
|
func isAscii(runes []rune) bool {
|
|
|
|
for _, r := range runes {
|
|
|
|
if r >= utf8.RuneSelf {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func asciiFuzzyIndex(input *util.Chars, pattern []rune, caseSensitive bool) int {
|
|
|
|
// Can't determine
|
|
|
|
if !input.IsBytes() {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Not possible
|
|
|
|
if !isAscii(pattern) {
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
|
|
|
|
firstIdx, idx := 0, 0
|
|
|
|
for pidx := 0; pidx < len(pattern); pidx++ {
|
|
|
|
idx = trySkip(input, caseSensitive, byte(pattern[pidx]), idx)
|
|
|
|
if idx < 0 {
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
if pidx == 0 && idx > 0 {
|
|
|
|
// Step back to find the right bonus point
|
|
|
|
firstIdx = idx - 1
|
|
|
|
}
|
|
|
|
idx++
|
|
|
|
}
|
|
|
|
return firstIdx
|
|
|
|
}
|
|
|
|
|
2017-08-19 18:51:53 +00:00
|
|
|
func debugV2(T []rune, pattern []rune, F []int32, lastIdx int, H []int16, C []int16) {
|
|
|
|
width := lastIdx - int(F[0]) + 1
|
|
|
|
|
|
|
|
for i, f := range F {
|
|
|
|
I := i * width
|
|
|
|
if i == 0 {
|
|
|
|
fmt.Print(" ")
|
|
|
|
for j := int(f); j <= lastIdx; j++ {
|
|
|
|
fmt.Printf(" " + string(T[j]) + " ")
|
|
|
|
}
|
|
|
|
fmt.Println()
|
|
|
|
}
|
|
|
|
fmt.Print(string(pattern[i]) + " ")
|
|
|
|
for idx := int(F[0]); idx < int(f); idx++ {
|
|
|
|
fmt.Print(" 0 ")
|
|
|
|
}
|
|
|
|
for idx := int(f); idx <= lastIdx; idx++ {
|
|
|
|
fmt.Printf("%2d ", H[i*width+idx-int(F[0])])
|
|
|
|
}
|
|
|
|
fmt.Println()
|
|
|
|
|
|
|
|
fmt.Print(" ")
|
|
|
|
for idx, p := range C[I : I+width] {
|
|
|
|
if idx+int(F[0]) < int(F[i]) {
|
|
|
|
p = 0
|
|
|
|
}
|
2017-08-25 16:28:39 +00:00
|
|
|
if p > 0 {
|
|
|
|
fmt.Printf("%2d ", p)
|
|
|
|
} else {
|
|
|
|
fmt.Print(" ")
|
|
|
|
}
|
2017-08-19 18:51:53 +00:00
|
|
|
}
|
|
|
|
fmt.Println()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-19 18:33:55 +00:00
|
|
|
func FuzzyMatchV2(caseSensitive bool, normalize bool, forward bool, input *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
2016-09-07 00:58:18 +00:00
|
|
|
// Assume that pattern is given in lowercase if case-insensitive.
|
|
|
|
// First check if there's a match and calculate bonus for each position.
|
|
|
|
// If the input string is too long, consider finding the matching chars in
|
|
|
|
// this phase as well (non-optimal alignment).
|
|
|
|
M := len(pattern)
|
2017-07-30 08:31:50 +00:00
|
|
|
if M == 0 {
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{0, 0, 0}, posArray(withPos, M)
|
|
|
|
}
|
2017-07-30 08:31:50 +00:00
|
|
|
N := input.Length()
|
2016-09-07 00:58:18 +00:00
|
|
|
|
|
|
|
// Since O(nm) algorithm can be prohibitively expensive for large input,
|
|
|
|
// we fall back to the greedy algorithm.
|
|
|
|
if slab != nil && N*M > cap(slab.I16) {
|
2017-01-08 18:12:23 +00:00
|
|
|
return FuzzyMatchV1(caseSensitive, normalize, forward, input, pattern, withPos, slab)
|
|
|
|
}
|
|
|
|
|
2017-08-19 03:14:48 +00:00
|
|
|
// Phase 1. Optimized search for ASCII string
|
2017-08-19 18:33:55 +00:00
|
|
|
idx := asciiFuzzyIndex(input, pattern, caseSensitive)
|
2017-08-19 03:14:48 +00:00
|
|
|
if idx < 0 {
|
|
|
|
return Result{-1, -1, 0}, nil
|
|
|
|
}
|
|
|
|
|
2016-09-07 00:58:18 +00:00
|
|
|
// Reuse pre-allocated integer slice to avoid unnecessary sweeping of garbages
|
2016-09-20 16:15:06 +00:00
|
|
|
offset16 := 0
|
|
|
|
offset32 := 0
|
2017-08-19 19:06:21 +00:00
|
|
|
offset16, H0 := alloc16(offset16, slab, N)
|
|
|
|
offset16, C0 := alloc16(offset16, slab, N)
|
2016-09-07 00:58:18 +00:00
|
|
|
// Bonus point for each position
|
2017-08-19 18:45:17 +00:00
|
|
|
offset16, B := alloc16(offset16, slab, N)
|
2016-09-07 00:58:18 +00:00
|
|
|
// The first occurrence of each character in the pattern
|
2017-08-19 18:45:17 +00:00
|
|
|
offset32, F := alloc32(offset32, slab, M)
|
2016-09-07 00:58:18 +00:00
|
|
|
// Rune array
|
Improvements to code quality and readability (#1737)
* Remove 1 unused field and 3 unused functions
unused elements fount by running
golangci-lint run --disable-all --enable unused
src/result.go:19:2: field `index` is unused (unused)
index int32
^
src/tui/light.go:716:23: func `(*LightWindow).stderr` is unused (unused)
func (w *LightWindow) stderr(str string) {
^
src/terminal.go:1015:6: func `numLinesMax` is unused (unused)
func numLinesMax(str string, max int) int {
^
src/tui/tui.go:167:20: func `ColorPair.is24` is unused (unused)
func (p ColorPair) is24() bool {
^
* Address warnings from "gosimple" linter
src/options.go:389:83: S1003: should use strings.Contains(str, ",,,") instead (gosimple)
if str == "," || strings.HasPrefix(str, ",,") || strings.HasSuffix(str, ",,") || strings.Index(str, ",,,") >= 0 {
^
src/options.go:630:18: S1007: should use raw string (`...`) with regexp.MustCompile to avoid having to escape twice (gosimple)
executeRegexp = regexp.MustCompile(
^
src/terminal.go:29:16: S1007: should use raw string (`...`) with regexp.MustCompile to avoid having to escape twice (gosimple)
placeholder = regexp.MustCompile("\\\\?(?:{[+sf]*[0-9,-.]*}|{q}|{\\+?f?nf?})")
^
src/terminal_test.go:92:10: S1007: should use raw string (`...`) with regexp.MustCompile to avoid having to escape twice (gosimple)
regex = regexp.MustCompile("\\w+")
^
* Address warnings from "staticcheck" linter
src/algo/algo.go:374:2: SA4006: this value of `offset32` is never used (staticcheck)
offset32, T := alloc32(offset32, slab, N)
^
src/algo/algo.go:456:2: SA4006: this value of `offset16` is never used (staticcheck)
offset16, C := alloc16(offset16, slab, width*M)
^
src/tui/tui.go:119:2: SA9004: only the first constant in this group has an explicit type (staticcheck)
colUndefined Color = -2
^
2019-11-05 00:46:51 +00:00
|
|
|
_, T := alloc32(offset32, slab, N)
|
2017-08-19 19:06:21 +00:00
|
|
|
input.CopyRunes(T)
|
2016-09-07 00:58:18 +00:00
|
|
|
|
2017-07-30 08:31:50 +00:00
|
|
|
// Phase 2. Calculate bonus for each point
|
2017-08-19 19:06:21 +00:00
|
|
|
maxScore, maxScorePos := int16(0), 0
|
|
|
|
pidx, lastIdx := 0, 0
|
2017-08-25 16:28:39 +00:00
|
|
|
pchar0, pchar, prevH0, prevClass, inGap := pattern[0], pattern[0], int16(0), charNonWord, false
|
|
|
|
Tsub := T[idx:]
|
|
|
|
H0sub, C0sub, Bsub := H0[idx:][:len(Tsub)], C0[idx:][:len(Tsub)], B[idx:][:len(Tsub)]
|
|
|
|
for off, char := range Tsub {
|
2016-04-23 10:48:06 +00:00
|
|
|
var class charClass
|
2016-09-07 00:58:18 +00:00
|
|
|
if char <= unicode.MaxASCII {
|
|
|
|
class = charClassOfAscii(char)
|
2017-08-26 11:09:46 +00:00
|
|
|
if !caseSensitive && class == charUpper {
|
|
|
|
char += 32
|
|
|
|
}
|
2016-04-23 10:48:06 +00:00
|
|
|
} else {
|
2016-09-07 00:58:18 +00:00
|
|
|
class = charClassOfNonAscii(char)
|
2017-08-26 11:09:46 +00:00
|
|
|
if !caseSensitive && class == charUpper {
|
2016-09-07 00:58:18 +00:00
|
|
|
char = unicode.To(unicode.LowerCase, char)
|
|
|
|
}
|
2017-08-26 11:09:46 +00:00
|
|
|
if normalize {
|
|
|
|
char = normalizeRune(char)
|
|
|
|
}
|
2017-01-08 18:12:23 +00:00
|
|
|
}
|
|
|
|
|
2017-08-25 16:28:39 +00:00
|
|
|
Tsub[off] = char
|
2017-08-19 19:06:21 +00:00
|
|
|
bonus := bonusFor(prevClass, class)
|
2017-08-25 16:28:39 +00:00
|
|
|
Bsub[off] = bonus
|
2016-04-23 10:48:06 +00:00
|
|
|
prevClass = class
|
|
|
|
|
2017-08-25 16:28:39 +00:00
|
|
|
if char == pchar {
|
2017-08-19 19:06:21 +00:00
|
|
|
if pidx < M {
|
2017-08-25 16:28:39 +00:00
|
|
|
F[pidx] = int32(idx + off)
|
2016-09-07 00:58:18 +00:00
|
|
|
pidx++
|
2017-08-25 16:28:39 +00:00
|
|
|
pchar = pattern[util.Min(pidx, M-1)]
|
2016-09-07 00:58:18 +00:00
|
|
|
}
|
2017-08-25 16:28:39 +00:00
|
|
|
lastIdx = idx + off
|
2017-08-19 19:06:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if char == pchar0 {
|
|
|
|
score := scoreMatch + bonus*bonusFirstCharMultiplier
|
2017-08-25 16:28:39 +00:00
|
|
|
H0sub[off] = score
|
|
|
|
C0sub[off] = 1
|
2017-08-19 19:06:21 +00:00
|
|
|
if M == 1 && (forward && score > maxScore || !forward && score >= maxScore) {
|
2017-08-25 16:28:39 +00:00
|
|
|
maxScore, maxScorePos = score, idx+off
|
2017-08-19 19:06:21 +00:00
|
|
|
if forward && bonus == bonusBoundary {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
inGap = false
|
2016-09-07 00:58:18 +00:00
|
|
|
} else {
|
2017-08-25 16:28:39 +00:00
|
|
|
if inGap {
|
2021-08-17 07:40:24 +00:00
|
|
|
H0sub[off] = util.Max16(prevH0+scoreGapExtension, 0)
|
2017-08-19 19:06:21 +00:00
|
|
|
} else {
|
2017-08-25 16:28:39 +00:00
|
|
|
H0sub[off] = util.Max16(prevH0+scoreGapStart, 0)
|
2016-09-07 00:58:18 +00:00
|
|
|
}
|
2017-08-25 16:28:39 +00:00
|
|
|
C0sub[off] = 0
|
2017-08-19 19:06:21 +00:00
|
|
|
inGap = true
|
2016-09-07 00:58:18 +00:00
|
|
|
}
|
2017-08-25 16:28:39 +00:00
|
|
|
prevH0 = H0sub[off]
|
2016-09-07 00:58:18 +00:00
|
|
|
}
|
|
|
|
if pidx != M {
|
|
|
|
return Result{-1, -1, 0}, nil
|
|
|
|
}
|
2017-08-19 19:06:21 +00:00
|
|
|
if M == 1 {
|
|
|
|
result := Result{maxScorePos, maxScorePos + 1, int(maxScore)}
|
2017-07-30 08:31:50 +00:00
|
|
|
if !withPos {
|
|
|
|
return result, nil
|
|
|
|
}
|
2017-08-19 19:06:21 +00:00
|
|
|
pos := []int{maxScorePos}
|
2017-07-30 08:31:50 +00:00
|
|
|
return result, &pos
|
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
|
2017-07-30 08:31:50 +00:00
|
|
|
// Phase 3. Fill in score matrix (H)
|
2016-09-07 00:58:18 +00:00
|
|
|
// Unlike the original algorithm, we do not allow omission.
|
2017-08-19 19:06:21 +00:00
|
|
|
f0 := int(F[0])
|
|
|
|
width := lastIdx - f0 + 1
|
2017-08-19 18:45:17 +00:00
|
|
|
offset16, H := alloc16(offset16, slab, width*M)
|
2017-08-19 19:06:21 +00:00
|
|
|
copy(H, H0[f0:lastIdx+1])
|
2016-09-07 00:58:18 +00:00
|
|
|
|
|
|
|
// Possible length of consecutive chunk at each position.
|
Improvements to code quality and readability (#1737)
* Remove 1 unused field and 3 unused functions
unused elements fount by running
golangci-lint run --disable-all --enable unused
src/result.go:19:2: field `index` is unused (unused)
index int32
^
src/tui/light.go:716:23: func `(*LightWindow).stderr` is unused (unused)
func (w *LightWindow) stderr(str string) {
^
src/terminal.go:1015:6: func `numLinesMax` is unused (unused)
func numLinesMax(str string, max int) int {
^
src/tui/tui.go:167:20: func `ColorPair.is24` is unused (unused)
func (p ColorPair) is24() bool {
^
* Address warnings from "gosimple" linter
src/options.go:389:83: S1003: should use strings.Contains(str, ",,,") instead (gosimple)
if str == "," || strings.HasPrefix(str, ",,") || strings.HasSuffix(str, ",,") || strings.Index(str, ",,,") >= 0 {
^
src/options.go:630:18: S1007: should use raw string (`...`) with regexp.MustCompile to avoid having to escape twice (gosimple)
executeRegexp = regexp.MustCompile(
^
src/terminal.go:29:16: S1007: should use raw string (`...`) with regexp.MustCompile to avoid having to escape twice (gosimple)
placeholder = regexp.MustCompile("\\\\?(?:{[+sf]*[0-9,-.]*}|{q}|{\\+?f?nf?})")
^
src/terminal_test.go:92:10: S1007: should use raw string (`...`) with regexp.MustCompile to avoid having to escape twice (gosimple)
regex = regexp.MustCompile("\\w+")
^
* Address warnings from "staticcheck" linter
src/algo/algo.go:374:2: SA4006: this value of `offset32` is never used (staticcheck)
offset32, T := alloc32(offset32, slab, N)
^
src/algo/algo.go:456:2: SA4006: this value of `offset16` is never used (staticcheck)
offset16, C := alloc16(offset16, slab, width*M)
^
src/tui/tui.go:119:2: SA9004: only the first constant in this group has an explicit type (staticcheck)
colUndefined Color = -2
^
2019-11-05 00:46:51 +00:00
|
|
|
_, C := alloc16(offset16, slab, width*M)
|
2017-08-19 19:06:21 +00:00
|
|
|
copy(C, C0[f0:lastIdx+1])
|
2016-09-07 00:58:18 +00:00
|
|
|
|
2017-08-25 16:28:39 +00:00
|
|
|
Fsub := F[1:]
|
|
|
|
Psub := pattern[1:][:len(Fsub)]
|
|
|
|
for off, f := range Fsub {
|
|
|
|
f := int(f)
|
|
|
|
pchar := Psub[off]
|
|
|
|
pidx := off + 1
|
|
|
|
row := pidx * width
|
2016-09-07 00:58:18 +00:00
|
|
|
inGap := false
|
2017-08-25 16:28:39 +00:00
|
|
|
Tsub := T[f : lastIdx+1]
|
|
|
|
Bsub := B[f:][:len(Tsub)]
|
|
|
|
Csub := C[row+f-f0:][:len(Tsub)]
|
|
|
|
Cdiag := C[row+f-f0-1-width:][:len(Tsub)]
|
|
|
|
Hsub := H[row+f-f0:][:len(Tsub)]
|
|
|
|
Hdiag := H[row+f-f0-1-width:][:len(Tsub)]
|
|
|
|
Hleft := H[row+f-f0-1:][:len(Tsub)]
|
|
|
|
Hleft[0] = 0
|
|
|
|
for off, char := range Tsub {
|
|
|
|
col := off + f
|
2016-09-07 00:58:18 +00:00
|
|
|
var s1, s2, consecutive int16
|
|
|
|
|
2017-08-25 16:28:39 +00:00
|
|
|
if inGap {
|
2021-08-17 07:40:24 +00:00
|
|
|
s2 = Hleft[off] + scoreGapExtension
|
2017-08-25 16:28:39 +00:00
|
|
|
} else {
|
|
|
|
s2 = Hleft[off] + scoreGapStart
|
2016-04-23 10:48:06 +00:00
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
|
2017-08-25 16:28:39 +00:00
|
|
|
if pchar == char {
|
|
|
|
s1 = Hdiag[off] + scoreMatch
|
|
|
|
b := Bsub[off]
|
|
|
|
consecutive = Cdiag[off] + 1
|
2017-08-19 19:06:21 +00:00
|
|
|
// Break consecutive chunk
|
|
|
|
if b == bonusBoundary {
|
2016-09-07 00:58:18 +00:00
|
|
|
consecutive = 1
|
2017-08-19 19:06:21 +00:00
|
|
|
} else if consecutive > 1 {
|
2017-08-25 16:28:39 +00:00
|
|
|
b = util.Max16(b, util.Max16(bonusConsecutive, B[col-int(consecutive)+1]))
|
2016-04-23 10:48:06 +00:00
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
if s1+b < s2 {
|
2017-08-25 16:28:39 +00:00
|
|
|
s1 += Bsub[off]
|
2016-09-07 00:58:18 +00:00
|
|
|
consecutive = 0
|
|
|
|
} else {
|
|
|
|
s1 += b
|
|
|
|
}
|
|
|
|
}
|
2017-08-25 16:28:39 +00:00
|
|
|
Csub[off] = consecutive
|
2016-04-23 10:48:06 +00:00
|
|
|
|
2016-09-07 00:58:18 +00:00
|
|
|
inGap = s1 < s2
|
|
|
|
score := util.Max16(util.Max16(s1, s2), 0)
|
2017-08-25 16:28:39 +00:00
|
|
|
if pidx == M-1 && (forward && score > maxScore || !forward && score >= maxScore) {
|
|
|
|
maxScore, maxScorePos = score, col
|
2016-09-07 00:58:18 +00:00
|
|
|
}
|
2017-08-25 16:28:39 +00:00
|
|
|
Hsub[off] = score
|
2016-09-07 00:58:18 +00:00
|
|
|
}
|
2017-08-19 18:51:53 +00:00
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
|
2017-08-19 18:51:53 +00:00
|
|
|
if DEBUG {
|
|
|
|
debugV2(T, pattern, F, lastIdx, H, C)
|
2016-09-07 00:58:18 +00:00
|
|
|
}
|
|
|
|
|
2017-07-30 08:31:50 +00:00
|
|
|
// Phase 4. (Optional) Backtrace to find character positions
|
2016-09-07 00:58:18 +00:00
|
|
|
pos := posArray(withPos, M)
|
2017-08-19 19:06:21 +00:00
|
|
|
j := f0
|
2016-09-07 00:58:18 +00:00
|
|
|
if withPos {
|
|
|
|
i := M - 1
|
|
|
|
j = maxScorePos
|
|
|
|
preferMatch := true
|
|
|
|
for {
|
|
|
|
I := i * width
|
2017-08-19 19:06:21 +00:00
|
|
|
j0 := j - f0
|
2016-09-07 00:58:18 +00:00
|
|
|
s := H[I+j0]
|
|
|
|
|
|
|
|
var s1, s2 int16
|
|
|
|
if i > 0 && j >= int(F[i]) {
|
|
|
|
s1 = H[I-width+j0-1]
|
|
|
|
}
|
|
|
|
if j > int(F[i]) {
|
|
|
|
s2 = H[I+j0-1]
|
|
|
|
}
|
|
|
|
|
|
|
|
if s > s1 && (s > s2 || s == s2 && preferMatch) {
|
|
|
|
*pos = append(*pos, j)
|
|
|
|
if i == 0 {
|
2016-04-23 10:48:06 +00:00
|
|
|
break
|
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
i--
|
|
|
|
}
|
|
|
|
preferMatch = C[I+j0] > 1 || I+width+j0+1 < len(C) && C[I+width+j0+1] > 0
|
|
|
|
j--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Start offset we return here is only relevant when begin tiebreak is used.
|
|
|
|
// However finding the accurate offset requires backtracking, and we don't
|
|
|
|
// want to pay extra cost for the option that has lost its importance.
|
|
|
|
return Result{j, maxScorePos + 1, int(maxScore)}, pos
|
|
|
|
}
|
|
|
|
|
|
|
|
// Implement the same sorting criteria as V2
|
2017-08-19 18:33:55 +00:00
|
|
|
func calculateScore(caseSensitive bool, normalize bool, text *util.Chars, pattern []rune, sidx int, eidx int, withPos bool) (int, *[]int) {
|
2016-09-07 00:58:18 +00:00
|
|
|
pidx, score, inGap, consecutive, firstBonus := 0, 0, false, 0, int16(0)
|
|
|
|
pos := posArray(withPos, len(pattern))
|
|
|
|
prevClass := charNonWord
|
|
|
|
if sidx > 0 {
|
|
|
|
prevClass = charClassOf(text.Get(sidx - 1))
|
|
|
|
}
|
|
|
|
for idx := sidx; idx < eidx; idx++ {
|
|
|
|
char := text.Get(idx)
|
|
|
|
class := charClassOf(char)
|
|
|
|
if !caseSensitive {
|
|
|
|
if char >= 'A' && char <= 'Z' {
|
|
|
|
char += 32
|
|
|
|
} else if char > unicode.MaxASCII {
|
|
|
|
char = unicode.To(unicode.LowerCase, char)
|
|
|
|
}
|
|
|
|
}
|
2017-01-08 18:12:23 +00:00
|
|
|
// pattern is already normalized
|
|
|
|
if normalize {
|
|
|
|
char = normalizeRune(char)
|
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
if char == pattern[pidx] {
|
|
|
|
if withPos {
|
|
|
|
*pos = append(*pos, idx)
|
|
|
|
}
|
|
|
|
score += scoreMatch
|
|
|
|
bonus := bonusFor(prevClass, class)
|
|
|
|
if consecutive == 0 {
|
|
|
|
firstBonus = bonus
|
2016-04-23 10:48:06 +00:00
|
|
|
} else {
|
2016-09-07 00:58:18 +00:00
|
|
|
// Break consecutive chunk
|
|
|
|
if bonus == bonusBoundary {
|
|
|
|
firstBonus = bonus
|
|
|
|
}
|
|
|
|
bonus = util.Max16(util.Max16(bonus, firstBonus), bonusConsecutive)
|
2016-04-23 10:48:06 +00:00
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
if pidx == 0 {
|
|
|
|
score += int(bonus * bonusFirstCharMultiplier)
|
|
|
|
} else {
|
|
|
|
score += int(bonus)
|
|
|
|
}
|
|
|
|
inGap = false
|
|
|
|
consecutive++
|
|
|
|
pidx++
|
|
|
|
} else {
|
|
|
|
if inGap {
|
2021-08-17 07:40:24 +00:00
|
|
|
score += scoreGapExtension
|
2016-09-07 00:58:18 +00:00
|
|
|
} else {
|
|
|
|
score += scoreGapStart
|
|
|
|
}
|
|
|
|
inGap = true
|
|
|
|
consecutive = 0
|
|
|
|
firstBonus = 0
|
2016-04-23 10:48:06 +00:00
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
prevClass = class
|
2016-04-23 10:48:06 +00:00
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
return score, pos
|
2016-04-23 10:48:06 +00:00
|
|
|
}
|
|
|
|
|
2016-09-07 00:58:18 +00:00
|
|
|
// FuzzyMatchV1 performs fuzzy-match
|
2017-08-19 18:33:55 +00:00
|
|
|
func FuzzyMatchV1(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
2015-03-19 04:06:20 +00:00
|
|
|
if len(pattern) == 0 {
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{0, 0, 0}, nil
|
|
|
|
}
|
2017-08-19 18:33:55 +00:00
|
|
|
if asciiFuzzyIndex(text, pattern, caseSensitive) < 0 {
|
2017-08-11 15:28:30 +00:00
|
|
|
return Result{-1, -1, 0}, nil
|
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
|
2015-01-01 19:49:30 +00:00
|
|
|
pidx := 0
|
|
|
|
sidx := -1
|
|
|
|
eidx := -1
|
|
|
|
|
2016-08-13 15:39:44 +00:00
|
|
|
lenRunes := text.Length()
|
2015-09-12 02:37:55 +00:00
|
|
|
lenPattern := len(pattern)
|
|
|
|
|
2016-08-13 15:39:44 +00:00
|
|
|
for index := 0; index < lenRunes; index++ {
|
2016-08-17 16:48:52 +00:00
|
|
|
char := text.Get(indexAt(index, lenRunes, forward))
|
2015-01-01 19:49:30 +00:00
|
|
|
// This is considerably faster than blindly applying strings.ToLower to the
|
|
|
|
// whole string
|
2015-04-14 12:45:37 +00:00
|
|
|
if !caseSensitive {
|
|
|
|
// Partially inlining `unicode.ToLower`. Ugly, but makes a noticeable
|
|
|
|
// difference in CPU cost. (Measured on Go 1.4.1. Also note that the Go
|
|
|
|
// compiler as of now does not inline non-leaf functions.)
|
|
|
|
if char >= 'A' && char <= 'Z' {
|
|
|
|
char += 32
|
|
|
|
} else if char > unicode.MaxASCII {
|
|
|
|
char = unicode.To(unicode.LowerCase, char)
|
|
|
|
}
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2017-01-08 18:12:23 +00:00
|
|
|
if normalize {
|
|
|
|
char = normalizeRune(char)
|
|
|
|
}
|
2016-08-13 15:39:44 +00:00
|
|
|
pchar := pattern[indexAt(pidx, lenPattern, forward)]
|
2015-09-12 02:37:55 +00:00
|
|
|
if char == pchar {
|
2015-01-01 19:49:30 +00:00
|
|
|
if sidx < 0 {
|
|
|
|
sidx = index
|
|
|
|
}
|
2015-09-12 02:37:55 +00:00
|
|
|
if pidx++; pidx == lenPattern {
|
2015-01-01 19:49:30 +00:00
|
|
|
eidx = index + 1
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if sidx >= 0 && eidx >= 0 {
|
2015-01-11 18:01:24 +00:00
|
|
|
pidx--
|
2015-01-01 19:49:30 +00:00
|
|
|
for index := eidx - 1; index >= sidx; index-- {
|
2016-09-07 00:58:18 +00:00
|
|
|
tidx := indexAt(index, lenRunes, forward)
|
|
|
|
char := text.Get(tidx)
|
2015-04-21 13:10:14 +00:00
|
|
|
if !caseSensitive {
|
|
|
|
if char >= 'A' && char <= 'Z' {
|
|
|
|
char += 32
|
|
|
|
} else if char > unicode.MaxASCII {
|
|
|
|
char = unicode.To(unicode.LowerCase, char)
|
|
|
|
}
|
|
|
|
}
|
2015-09-12 02:37:55 +00:00
|
|
|
|
2016-09-07 00:58:18 +00:00
|
|
|
pidx_ := indexAt(pidx, lenPattern, forward)
|
|
|
|
pchar := pattern[pidx_]
|
2015-09-12 02:37:55 +00:00
|
|
|
if char == pchar {
|
2015-01-11 18:01:24 +00:00
|
|
|
if pidx--; pidx < 0 {
|
2015-01-01 19:49:30 +00:00
|
|
|
sidx = index
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-04-16 05:02:43 +00:00
|
|
|
|
|
|
|
if !forward {
|
|
|
|
sidx, eidx = lenRunes-eidx, lenRunes-sidx
|
|
|
|
}
|
|
|
|
|
2017-01-08 18:12:23 +00:00
|
|
|
score, pos := calculateScore(caseSensitive, normalize, text, pattern, sidx, eidx, withPos)
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{sidx, eidx, score}, pos
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{-1, -1, 0}, nil
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
// ExactMatchNaive is a basic string searching algorithm that handles case
|
|
|
|
// sensitivity. Although naive, it still performs better than the combination
|
|
|
|
// of strings.ToLower + strings.Index for typical fzf use cases where input
|
|
|
|
// strings and patterns are not very long.
|
|
|
|
//
|
2016-09-07 00:58:18 +00:00
|
|
|
// Since 0.15.0, this function searches for the match with the highest
|
|
|
|
// bonus point, instead of stopping immediately after finding the first match.
|
|
|
|
// The solution is much cheaper since there is only one possible alignment of
|
|
|
|
// the pattern.
|
2017-08-19 18:33:55 +00:00
|
|
|
func ExactMatchNaive(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
2015-03-19 04:06:20 +00:00
|
|
|
if len(pattern) == 0 {
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{0, 0, 0}, nil
|
2015-03-19 04:06:20 +00:00
|
|
|
}
|
|
|
|
|
2016-08-13 15:39:44 +00:00
|
|
|
lenRunes := text.Length()
|
2015-09-12 02:37:55 +00:00
|
|
|
lenPattern := len(pattern)
|
|
|
|
|
|
|
|
if lenRunes < lenPattern {
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{-1, -1, 0}, nil
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2017-08-19 18:33:55 +00:00
|
|
|
if asciiFuzzyIndex(text, pattern, caseSensitive) < 0 {
|
2017-07-30 09:15:02 +00:00
|
|
|
return Result{-1, -1, 0}, nil
|
|
|
|
}
|
|
|
|
|
2016-09-07 00:58:18 +00:00
|
|
|
// For simplicity, only look at the bonus at the first character position
|
2015-01-01 19:49:30 +00:00
|
|
|
pidx := 0
|
2016-09-07 00:58:18 +00:00
|
|
|
bestPos, bonus, bestBonus := -1, int16(0), int16(-1)
|
2015-09-12 02:37:55 +00:00
|
|
|
for index := 0; index < lenRunes; index++ {
|
2016-09-07 00:58:18 +00:00
|
|
|
index_ := indexAt(index, lenRunes, forward)
|
|
|
|
char := text.Get(index_)
|
2015-04-14 12:45:37 +00:00
|
|
|
if !caseSensitive {
|
|
|
|
if char >= 'A' && char <= 'Z' {
|
|
|
|
char += 32
|
|
|
|
} else if char > unicode.MaxASCII {
|
|
|
|
char = unicode.To(unicode.LowerCase, char)
|
|
|
|
}
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2017-01-08 18:12:23 +00:00
|
|
|
if normalize {
|
|
|
|
char = normalizeRune(char)
|
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
pidx_ := indexAt(pidx, lenPattern, forward)
|
|
|
|
pchar := pattern[pidx_]
|
2015-09-12 02:37:55 +00:00
|
|
|
if pchar == char {
|
2016-09-07 00:58:18 +00:00
|
|
|
if pidx_ == 0 {
|
|
|
|
bonus = bonusAt(text, index_)
|
|
|
|
}
|
2015-01-11 18:01:24 +00:00
|
|
|
pidx++
|
2015-09-12 02:37:55 +00:00
|
|
|
if pidx == lenPattern {
|
2016-09-07 00:58:18 +00:00
|
|
|
if bonus > bestBonus {
|
|
|
|
bestPos, bestBonus = index, bonus
|
|
|
|
}
|
|
|
|
if bonus == bonusBoundary {
|
|
|
|
break
|
2015-09-12 02:37:55 +00:00
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
index -= pidx - 1
|
|
|
|
pidx, bonus = 0, 0
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
index -= pidx
|
2016-09-07 00:58:18 +00:00
|
|
|
pidx, bonus = 0, 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if bestPos >= 0 {
|
|
|
|
var sidx, eidx int
|
|
|
|
if forward {
|
|
|
|
sidx = bestPos - lenPattern + 1
|
|
|
|
eidx = bestPos + 1
|
|
|
|
} else {
|
|
|
|
sidx = lenRunes - (bestPos + 1)
|
|
|
|
eidx = lenRunes - (bestPos - lenPattern + 1)
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2017-01-08 18:12:23 +00:00
|
|
|
score, _ := calculateScore(caseSensitive, normalize, text, pattern, sidx, eidx, false)
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{sidx, eidx, score}, nil
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{-1, -1, 0}, nil
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
// PrefixMatch performs prefix-match
|
2017-08-19 18:33:55 +00:00
|
|
|
func PrefixMatch(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
2016-09-07 00:58:18 +00:00
|
|
|
if len(pattern) == 0 {
|
|
|
|
return Result{0, 0, 0}, nil
|
|
|
|
}
|
|
|
|
|
2020-03-01 03:36:02 +00:00
|
|
|
trimmedLen := 0
|
|
|
|
if !unicode.IsSpace(pattern[0]) {
|
|
|
|
trimmedLen = text.LeadingWhitespaces()
|
|
|
|
}
|
|
|
|
|
|
|
|
if text.Length()-trimmedLen < len(pattern) {
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{-1, -1, 0}, nil
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for index, r := range pattern {
|
2020-03-01 03:36:02 +00:00
|
|
|
char := text.Get(trimmedLen + index)
|
2015-04-14 12:45:37 +00:00
|
|
|
if !caseSensitive {
|
|
|
|
char = unicode.ToLower(char)
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2017-01-08 18:12:23 +00:00
|
|
|
if normalize {
|
|
|
|
char = normalizeRune(char)
|
|
|
|
}
|
2015-01-01 19:49:30 +00:00
|
|
|
if char != r {
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{-1, -1, 0}, nil
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-23 10:48:06 +00:00
|
|
|
lenPattern := len(pattern)
|
2020-03-01 03:36:02 +00:00
|
|
|
score, _ := calculateScore(caseSensitive, normalize, text, pattern, trimmedLen, trimmedLen+lenPattern, false)
|
|
|
|
return Result{trimmedLen, trimmedLen + lenPattern, score}, nil
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
// SuffixMatch performs suffix-match
|
2017-08-19 18:33:55 +00:00
|
|
|
func SuffixMatch(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
2016-09-07 00:58:18 +00:00
|
|
|
lenRunes := text.Length()
|
2020-03-01 03:36:02 +00:00
|
|
|
trimmedLen := lenRunes
|
|
|
|
if len(pattern) == 0 || !unicode.IsSpace(pattern[len(pattern)-1]) {
|
|
|
|
trimmedLen -= text.TrailingWhitespaces()
|
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
if len(pattern) == 0 {
|
|
|
|
return Result{trimmedLen, trimmedLen, 0}, nil
|
|
|
|
}
|
2015-01-01 19:49:30 +00:00
|
|
|
diff := trimmedLen - len(pattern)
|
|
|
|
if diff < 0 {
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{-1, -1, 0}, nil
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for index, r := range pattern {
|
2016-08-13 15:39:44 +00:00
|
|
|
char := text.Get(index + diff)
|
2015-04-14 12:45:37 +00:00
|
|
|
if !caseSensitive {
|
|
|
|
char = unicode.ToLower(char)
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2017-01-08 18:12:23 +00:00
|
|
|
if normalize {
|
|
|
|
char = normalizeRune(char)
|
|
|
|
}
|
2015-01-01 19:49:30 +00:00
|
|
|
if char != r {
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{-1, -1, 0}, nil
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-23 10:48:06 +00:00
|
|
|
lenPattern := len(pattern)
|
|
|
|
sidx := trimmedLen - lenPattern
|
|
|
|
eidx := trimmedLen
|
2017-01-08 18:12:23 +00:00
|
|
|
score, _ := calculateScore(caseSensitive, normalize, text, pattern, sidx, eidx, false)
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{sidx, eidx, score}, nil
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2015-06-08 14:16:31 +00:00
|
|
|
|
2015-08-02 14:54:53 +00:00
|
|
|
// EqualMatch performs equal-match
|
2017-08-19 18:33:55 +00:00
|
|
|
func EqualMatch(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
2016-09-07 00:58:18 +00:00
|
|
|
lenPattern := len(pattern)
|
2020-03-01 03:36:02 +00:00
|
|
|
if lenPattern == 0 {
|
|
|
|
return Result{-1, -1, 0}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Strip leading whitespaces
|
|
|
|
trimmedLen := 0
|
|
|
|
if !unicode.IsSpace(pattern[0]) {
|
|
|
|
trimmedLen = text.LeadingWhitespaces()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Strip trailing whitespaces
|
|
|
|
trimmedEndLen := 0
|
|
|
|
if !unicode.IsSpace(pattern[lenPattern-1]) {
|
|
|
|
trimmedEndLen = text.TrailingWhitespaces()
|
|
|
|
}
|
|
|
|
|
|
|
|
if text.Length()-trimmedLen-trimmedEndLen != lenPattern {
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{-1, -1, 0}, nil
|
2015-06-08 14:16:31 +00:00
|
|
|
}
|
2017-01-08 18:12:23 +00:00
|
|
|
match := true
|
|
|
|
if normalize {
|
|
|
|
runes := text.ToRunes()
|
|
|
|
for idx, pchar := range pattern {
|
2020-03-01 03:36:02 +00:00
|
|
|
char := runes[trimmedLen+idx]
|
2017-01-08 18:12:23 +00:00
|
|
|
if !caseSensitive {
|
|
|
|
char = unicode.To(unicode.LowerCase, char)
|
|
|
|
}
|
|
|
|
if normalizeRune(pchar) != normalizeRune(char) {
|
|
|
|
match = false
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2020-03-01 03:36:02 +00:00
|
|
|
runes := text.ToRunes()
|
|
|
|
runesStr := string(runes[trimmedLen : len(runes)-trimmedEndLen])
|
2017-01-08 18:12:23 +00:00
|
|
|
if !caseSensitive {
|
|
|
|
runesStr = strings.ToLower(runesStr)
|
|
|
|
}
|
|
|
|
match = runesStr == string(pattern)
|
2015-06-08 14:16:31 +00:00
|
|
|
}
|
2017-01-08 18:12:23 +00:00
|
|
|
if match {
|
2020-03-01 03:36:02 +00:00
|
|
|
return Result{trimmedLen, trimmedLen + lenPattern, (scoreMatch+bonusBoundary)*lenPattern +
|
2016-09-07 00:58:18 +00:00
|
|
|
(bonusFirstCharMultiplier-1)*bonusBoundary}, nil
|
2015-06-08 14:16:31 +00:00
|
|
|
}
|
2016-09-07 00:58:18 +00:00
|
|
|
return Result{-1, -1, 0}, nil
|
2015-06-08 14:16:31 +00:00
|
|
|
}
|