fzf/src/tokenizer.go

203 lines
4.3 KiB
Go
Raw Normal View History

2015-01-01 19:49:30 +00:00
package fzf
import (
"regexp"
"strconv"
"strings"
)
2015-01-11 18:01:24 +00:00
const rangeEllipsis = 0
2015-01-01 19:49:30 +00:00
2015-01-11 18:01:24 +00:00
// Range represents nth-expression
2015-01-01 19:49:30 +00:00
type Range struct {
begin int
end int
}
2015-01-11 18:01:24 +00:00
// Transformed holds the result of tokenization and transformation
2015-01-01 19:49:30 +00:00
type Transformed struct {
whole *string
parts []Token
}
2015-01-11 18:01:24 +00:00
// Token contains the tokenized part of the strings and its prefix length
2015-01-01 19:49:30 +00:00
type Token struct {
text *string
prefixLength int
}
2015-01-11 18:01:24 +00:00
// ParseRange parses nth-expression and returns the corresponding Range object
2015-01-01 19:49:30 +00:00
func ParseRange(str *string) (Range, bool) {
if (*str) == ".." {
2015-01-11 18:01:24 +00:00
return Range{rangeEllipsis, rangeEllipsis}, true
2015-01-01 19:49:30 +00:00
} else if strings.HasPrefix(*str, "..") {
end, err := strconv.Atoi((*str)[2:])
if err != nil || end == 0 {
return Range{}, false
}
2015-01-11 18:01:24 +00:00
return Range{rangeEllipsis, end}, true
2015-01-01 19:49:30 +00:00
} else if strings.HasSuffix(*str, "..") {
begin, err := strconv.Atoi((*str)[:len(*str)-2])
if err != nil || begin == 0 {
return Range{}, false
}
2015-01-11 18:01:24 +00:00
return Range{begin, rangeEllipsis}, true
2015-01-01 19:49:30 +00:00
} else if strings.Contains(*str, "..") {
ns := strings.Split(*str, "..")
if len(ns) != 2 {
return Range{}, false
}
begin, err1 := strconv.Atoi(ns[0])
end, err2 := strconv.Atoi(ns[1])
if err1 != nil || err2 != nil {
return Range{}, false
}
return Range{begin, end}, true
}
n, err := strconv.Atoi(*str)
if err != nil || n == 0 {
return Range{}, false
}
return Range{n, n}, true
}
func withPrefixLengths(tokens []string, begin int) []Token {
ret := make([]Token, len(tokens))
prefixLength := begin
for idx, token := range tokens {
// Need to define a new local variable instead of the reused token to take
// the pointer to it
str := token
ret[idx] = Token{text: &str, prefixLength: prefixLength}
prefixLength += len([]rune(token))
}
return ret
}
const (
2015-01-11 18:01:24 +00:00
awkNil = iota
awkBlack
awkWhite
2015-01-01 19:49:30 +00:00
)
func awkTokenizer(input *string) ([]string, int) {
// 9, 32
ret := []string{}
str := []rune{}
prefixLength := 0
2015-01-11 18:01:24 +00:00
state := awkNil
2015-01-01 19:49:30 +00:00
for _, r := range []rune(*input) {
white := r == 9 || r == 32
switch state {
2015-01-11 18:01:24 +00:00
case awkNil:
2015-01-01 19:49:30 +00:00
if white {
prefixLength++
} else {
2015-01-11 18:01:24 +00:00
state = awkBlack
2015-01-01 19:49:30 +00:00
str = append(str, r)
}
2015-01-11 18:01:24 +00:00
case awkBlack:
2015-01-01 19:49:30 +00:00
str = append(str, r)
if white {
2015-01-11 18:01:24 +00:00
state = awkWhite
2015-01-01 19:49:30 +00:00
}
2015-01-11 18:01:24 +00:00
case awkWhite:
2015-01-01 19:49:30 +00:00
if white {
str = append(str, r)
} else {
ret = append(ret, string(str))
2015-01-11 18:01:24 +00:00
state = awkBlack
2015-01-01 19:49:30 +00:00
str = []rune{r}
}
}
}
if len(str) > 0 {
ret = append(ret, string(str))
}
return ret, prefixLength
}
2015-01-11 18:01:24 +00:00
// Tokenize tokenizes the given string with the delimiter
2015-01-01 19:49:30 +00:00
func Tokenize(str *string, delimiter *regexp.Regexp) []Token {
if delimiter == nil {
// AWK-style (\S+\s*)
tokens, prefixLength := awkTokenizer(str)
return withPrefixLengths(tokens, prefixLength)
}
2015-01-11 18:01:24 +00:00
tokens := delimiter.FindAllString(*str, -1)
return withPrefixLengths(tokens, 0)
2015-01-01 19:49:30 +00:00
}
func joinTokens(tokens []Token) string {
ret := ""
for _, token := range tokens {
ret += *token.text
}
return ret
}
2015-01-11 18:01:24 +00:00
// Transform is used to transform the input when --with-nth option is given
2015-01-01 19:49:30 +00:00
func Transform(tokens []Token, withNth []Range) *Transformed {
transTokens := make([]Token, len(withNth))
numTokens := len(tokens)
whole := ""
for idx, r := range withNth {
part := ""
minIdx := 0
if r.begin == r.end {
idx := r.begin
2015-01-11 18:01:24 +00:00
if idx == rangeEllipsis {
2015-01-01 19:49:30 +00:00
part += joinTokens(tokens)
} else {
if idx < 0 {
idx += numTokens + 1
}
if idx >= 1 && idx <= numTokens {
minIdx = idx - 1
part += *tokens[idx-1].text
}
}
} else {
var begin, end int
2015-01-11 18:01:24 +00:00
if r.begin == rangeEllipsis { // ..N
2015-01-01 19:49:30 +00:00
begin, end = 1, r.end
if end < 0 {
end += numTokens + 1
}
2015-01-11 18:01:24 +00:00
} else if r.end == rangeEllipsis { // N..
2015-01-01 19:49:30 +00:00
begin, end = r.begin, numTokens
if begin < 0 {
begin += numTokens + 1
}
} else {
begin, end = r.begin, r.end
if begin < 0 {
begin += numTokens + 1
}
if end < 0 {
end += numTokens + 1
}
}
minIdx = Max(0, begin-1)
for idx := begin; idx <= end; idx++ {
if idx >= 1 && idx <= numTokens {
part += *tokens[idx-1].text
}
}
}
whole += part
var prefixLength int
if minIdx < numTokens {
prefixLength = tokens[minIdx].prefixLength
} else {
prefixLength = 0
}
transTokens[idx] = Token{&part, prefixLength}
2015-01-01 19:49:30 +00:00
}
return &Transformed{
whole: &whole,
parts: transTokens}
}