2015-01-01 19:49:30 +00:00
|
|
|
package fzf
|
|
|
|
|
|
|
|
import (
|
2017-07-19 17:44:30 +00:00
|
|
|
"bytes"
|
2018-02-17 22:01:06 +00:00
|
|
|
"fmt"
|
2015-01-01 19:49:30 +00:00
|
|
|
"regexp"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
2015-01-12 03:56:17 +00:00
|
|
|
|
|
|
|
"github.com/junegunn/fzf/src/util"
|
2015-01-01 19:49:30 +00:00
|
|
|
)
|
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
const rangeEllipsis = 0
|
2015-01-01 19:49:30 +00:00
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
// Range represents nth-expression
|
2015-01-01 19:49:30 +00:00
|
|
|
type Range struct {
|
|
|
|
begin int
|
|
|
|
end int
|
|
|
|
}
|
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
// Token contains the tokenized part of the strings and its prefix length
|
2015-01-01 19:49:30 +00:00
|
|
|
type Token struct {
|
2016-08-18 17:39:32 +00:00
|
|
|
text *util.Chars
|
|
|
|
prefixLength int32
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2018-02-17 22:01:06 +00:00
|
|
|
// String returns the string representation of a Token.
|
|
|
|
func (t Token) String() string {
|
|
|
|
return fmt.Sprintf("Token{text: %s, prefixLength: %d}", t.text, t.prefixLength)
|
|
|
|
}
|
|
|
|
|
2015-08-10 09:34:20 +00:00
|
|
|
// Delimiter for tokenizing the input
|
|
|
|
type Delimiter struct {
|
|
|
|
regex *regexp.Regexp
|
|
|
|
str *string
|
|
|
|
}
|
|
|
|
|
2021-01-15 01:10:09 +00:00
|
|
|
// String returns the string representation of a Delimiter.
|
2018-02-17 22:01:06 +00:00
|
|
|
func (d Delimiter) String() string {
|
|
|
|
return fmt.Sprintf("Delimiter{regex: %v, str: &%q}", d.regex, *d.str)
|
|
|
|
}
|
|
|
|
|
2015-01-22 21:26:00 +00:00
|
|
|
func newRange(begin int, end int) Range {
|
|
|
|
if begin == 1 {
|
|
|
|
begin = rangeEllipsis
|
|
|
|
}
|
|
|
|
if end == -1 {
|
|
|
|
end = rangeEllipsis
|
|
|
|
}
|
|
|
|
return Range{begin, end}
|
|
|
|
}
|
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
// ParseRange parses nth-expression and returns the corresponding Range object
|
2015-01-01 19:49:30 +00:00
|
|
|
func ParseRange(str *string) (Range, bool) {
|
|
|
|
if (*str) == ".." {
|
2015-01-22 21:26:00 +00:00
|
|
|
return newRange(rangeEllipsis, rangeEllipsis), true
|
2015-01-01 19:49:30 +00:00
|
|
|
} else if strings.HasPrefix(*str, "..") {
|
|
|
|
end, err := strconv.Atoi((*str)[2:])
|
|
|
|
if err != nil || end == 0 {
|
|
|
|
return Range{}, false
|
|
|
|
}
|
2015-01-22 21:26:00 +00:00
|
|
|
return newRange(rangeEllipsis, end), true
|
2015-01-01 19:49:30 +00:00
|
|
|
} else if strings.HasSuffix(*str, "..") {
|
|
|
|
begin, err := strconv.Atoi((*str)[:len(*str)-2])
|
|
|
|
if err != nil || begin == 0 {
|
|
|
|
return Range{}, false
|
|
|
|
}
|
2015-01-22 21:26:00 +00:00
|
|
|
return newRange(begin, rangeEllipsis), true
|
2015-01-01 19:49:30 +00:00
|
|
|
} else if strings.Contains(*str, "..") {
|
|
|
|
ns := strings.Split(*str, "..")
|
|
|
|
if len(ns) != 2 {
|
|
|
|
return Range{}, false
|
|
|
|
}
|
|
|
|
begin, err1 := strconv.Atoi(ns[0])
|
|
|
|
end, err2 := strconv.Atoi(ns[1])
|
2015-01-22 21:26:00 +00:00
|
|
|
if err1 != nil || err2 != nil || begin == 0 || end == 0 {
|
2015-01-01 19:49:30 +00:00
|
|
|
return Range{}, false
|
|
|
|
}
|
2015-01-22 21:26:00 +00:00
|
|
|
return newRange(begin, end), true
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
n, err := strconv.Atoi(*str)
|
|
|
|
if err != nil || n == 0 {
|
|
|
|
return Range{}, false
|
|
|
|
}
|
2015-01-22 21:26:00 +00:00
|
|
|
return newRange(n, n), true
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2017-07-19 17:44:30 +00:00
|
|
|
func withPrefixLengths(tokens []string, begin int) []Token {
|
2015-01-01 19:49:30 +00:00
|
|
|
ret := make([]Token, len(tokens))
|
|
|
|
|
|
|
|
prefixLength := begin
|
2017-07-19 17:44:30 +00:00
|
|
|
for idx := range tokens {
|
2024-05-07 07:58:17 +00:00
|
|
|
chars := util.ToChars(stringBytes(tokens[idx]))
|
2017-07-19 17:44:30 +00:00
|
|
|
ret[idx] = Token{&chars, int32(prefixLength)}
|
|
|
|
prefixLength += chars.Length()
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
2015-01-11 18:01:24 +00:00
|
|
|
awkNil = iota
|
|
|
|
awkBlack
|
|
|
|
awkWhite
|
2015-01-01 19:49:30 +00:00
|
|
|
)
|
|
|
|
|
2017-07-19 17:44:30 +00:00
|
|
|
func awkTokenizer(input string) ([]string, int) {
|
2015-01-01 19:49:30 +00:00
|
|
|
// 9, 32
|
2017-07-19 17:44:30 +00:00
|
|
|
ret := []string{}
|
2015-01-01 19:49:30 +00:00
|
|
|
prefixLength := 0
|
2015-01-11 18:01:24 +00:00
|
|
|
state := awkNil
|
2016-08-13 16:53:06 +00:00
|
|
|
begin := 0
|
|
|
|
end := 0
|
2017-07-19 17:44:30 +00:00
|
|
|
for idx := 0; idx < len(input); idx++ {
|
|
|
|
r := input[idx]
|
2015-01-01 19:49:30 +00:00
|
|
|
white := r == 9 || r == 32
|
|
|
|
switch state {
|
2015-01-11 18:01:24 +00:00
|
|
|
case awkNil:
|
2015-01-01 19:49:30 +00:00
|
|
|
if white {
|
|
|
|
prefixLength++
|
|
|
|
} else {
|
2016-08-13 16:53:06 +00:00
|
|
|
state, begin, end = awkBlack, idx, idx+1
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2015-01-11 18:01:24 +00:00
|
|
|
case awkBlack:
|
2016-08-13 16:53:06 +00:00
|
|
|
end = idx + 1
|
2015-01-01 19:49:30 +00:00
|
|
|
if white {
|
2015-01-11 18:01:24 +00:00
|
|
|
state = awkWhite
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2015-01-11 18:01:24 +00:00
|
|
|
case awkWhite:
|
2015-01-01 19:49:30 +00:00
|
|
|
if white {
|
2016-08-13 16:53:06 +00:00
|
|
|
end = idx + 1
|
2015-01-01 19:49:30 +00:00
|
|
|
} else {
|
2017-07-19 17:44:30 +00:00
|
|
|
ret = append(ret, input[begin:end])
|
2016-08-13 16:53:06 +00:00
|
|
|
state, begin, end = awkBlack, idx, idx+1
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-08-13 16:53:06 +00:00
|
|
|
if begin < end {
|
2017-07-19 17:44:30 +00:00
|
|
|
ret = append(ret, input[begin:end])
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
return ret, prefixLength
|
|
|
|
}
|
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
// Tokenize tokenizes the given string with the delimiter
|
2017-07-19 17:44:30 +00:00
|
|
|
func Tokenize(text string, delimiter Delimiter) []Token {
|
2015-08-10 09:34:20 +00:00
|
|
|
if delimiter.str == nil && delimiter.regex == nil {
|
2015-01-01 19:49:30 +00:00
|
|
|
// AWK-style (\S+\s*)
|
2016-08-13 15:39:44 +00:00
|
|
|
tokens, prefixLength := awkTokenizer(text)
|
2015-01-01 19:49:30 +00:00
|
|
|
return withPrefixLengths(tokens, prefixLength)
|
|
|
|
}
|
2015-08-10 09:34:20 +00:00
|
|
|
|
|
|
|
if delimiter.str != nil {
|
2017-07-19 17:44:30 +00:00
|
|
|
return withPrefixLengths(strings.SplitAfter(text, *delimiter.str), 0)
|
2016-08-13 19:23:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME performance
|
|
|
|
var tokens []string
|
|
|
|
if delimiter.regex != nil {
|
2022-07-21 12:21:06 +00:00
|
|
|
locs := delimiter.regex.FindAllStringIndex(text, -1)
|
|
|
|
begin := 0
|
|
|
|
for _, loc := range locs {
|
|
|
|
tokens = append(tokens, text[begin:loc[1]])
|
|
|
|
begin = loc[1]
|
|
|
|
}
|
|
|
|
if begin < len(text) {
|
|
|
|
tokens = append(tokens, text[begin:])
|
2015-08-10 14:47:03 +00:00
|
|
|
}
|
2015-08-10 09:34:20 +00:00
|
|
|
}
|
2017-07-19 17:44:30 +00:00
|
|
|
return withPrefixLengths(tokens, 0)
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
|
2017-07-19 17:44:30 +00:00
|
|
|
func joinTokens(tokens []Token) string {
|
|
|
|
var output bytes.Buffer
|
2015-08-02 05:00:18 +00:00
|
|
|
for _, token := range tokens {
|
2017-07-19 17:44:30 +00:00
|
|
|
output.WriteString(token.text.ToString())
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2017-07-19 17:44:30 +00:00
|
|
|
return output.String()
|
2015-04-17 13:23:52 +00:00
|
|
|
}
|
|
|
|
|
2015-01-11 18:01:24 +00:00
|
|
|
// Transform is used to transform the input when --with-nth option is given
|
2015-08-02 05:00:18 +00:00
|
|
|
func Transform(tokens []Token, withNth []Range) []Token {
|
2015-01-01 19:49:30 +00:00
|
|
|
transTokens := make([]Token, len(withNth))
|
|
|
|
numTokens := len(tokens)
|
|
|
|
for idx, r := range withNth {
|
2016-08-18 17:39:32 +00:00
|
|
|
parts := []*util.Chars{}
|
2015-01-01 19:49:30 +00:00
|
|
|
minIdx := 0
|
|
|
|
if r.begin == r.end {
|
|
|
|
idx := r.begin
|
2015-01-11 18:01:24 +00:00
|
|
|
if idx == rangeEllipsis {
|
2024-05-07 07:58:17 +00:00
|
|
|
chars := util.ToChars(stringBytes(joinTokens(tokens)))
|
2016-08-18 17:39:32 +00:00
|
|
|
parts = append(parts, &chars)
|
2015-01-01 19:49:30 +00:00
|
|
|
} else {
|
|
|
|
if idx < 0 {
|
|
|
|
idx += numTokens + 1
|
|
|
|
}
|
|
|
|
if idx >= 1 && idx <= numTokens {
|
|
|
|
minIdx = idx - 1
|
2016-08-13 16:53:06 +00:00
|
|
|
parts = append(parts, tokens[idx-1].text)
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
var begin, end int
|
2015-01-11 18:01:24 +00:00
|
|
|
if r.begin == rangeEllipsis { // ..N
|
2015-01-01 19:49:30 +00:00
|
|
|
begin, end = 1, r.end
|
|
|
|
if end < 0 {
|
|
|
|
end += numTokens + 1
|
|
|
|
}
|
2015-01-11 18:01:24 +00:00
|
|
|
} else if r.end == rangeEllipsis { // N..
|
2015-01-01 19:49:30 +00:00
|
|
|
begin, end = r.begin, numTokens
|
|
|
|
if begin < 0 {
|
|
|
|
begin += numTokens + 1
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
begin, end = r.begin, r.end
|
|
|
|
if begin < 0 {
|
|
|
|
begin += numTokens + 1
|
|
|
|
}
|
|
|
|
if end < 0 {
|
|
|
|
end += numTokens + 1
|
|
|
|
}
|
|
|
|
}
|
2015-01-12 03:56:17 +00:00
|
|
|
minIdx = util.Max(0, begin-1)
|
2015-01-01 19:49:30 +00:00
|
|
|
for idx := begin; idx <= end; idx++ {
|
|
|
|
if idx >= 1 && idx <= numTokens {
|
2016-08-13 16:53:06 +00:00
|
|
|
parts = append(parts, tokens[idx-1].text)
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-08-13 16:53:06 +00:00
|
|
|
// Merge multiple parts
|
|
|
|
var merged util.Chars
|
|
|
|
switch len(parts) {
|
|
|
|
case 0:
|
2017-07-19 17:44:30 +00:00
|
|
|
merged = util.ToChars([]byte{})
|
2016-08-13 16:53:06 +00:00
|
|
|
case 1:
|
2016-08-18 17:39:32 +00:00
|
|
|
merged = *parts[0]
|
2016-08-13 16:53:06 +00:00
|
|
|
default:
|
2017-07-19 17:44:30 +00:00
|
|
|
var output bytes.Buffer
|
2016-08-13 16:53:06 +00:00
|
|
|
for _, part := range parts {
|
2017-07-19 17:44:30 +00:00
|
|
|
output.WriteString(part.ToString())
|
2016-08-13 16:53:06 +00:00
|
|
|
}
|
2019-07-19 04:22:35 +00:00
|
|
|
merged = util.ToChars(output.Bytes())
|
2016-08-13 16:53:06 +00:00
|
|
|
}
|
|
|
|
|
2016-08-18 17:39:32 +00:00
|
|
|
var prefixLength int32
|
2015-01-05 10:32:44 +00:00
|
|
|
if minIdx < numTokens {
|
|
|
|
prefixLength = tokens[minIdx].prefixLength
|
|
|
|
} else {
|
|
|
|
prefixLength = 0
|
|
|
|
}
|
2017-06-02 04:25:35 +00:00
|
|
|
transTokens[idx] = Token{&merged, prefixLength}
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|
2015-08-02 05:00:18 +00:00
|
|
|
return transTokens
|
2015-01-01 19:49:30 +00:00
|
|
|
}
|