Files
fzf/src/tokenizer_test.go
Junegunn Choi 2b584586ed Add --accept-nth option to transform the output
This option can be used to replace a sed or awk in the post-processing step.

  ps -ef | fzf --multi --header-lines 1 | awk '{print $2}'
  ps -ef | fzf --multi --header-lines 1 --accept-nth 2

This may not be a very "Unix-y" thing to do, so I've always felt that fzf
shouldn't have such an option, but I've finally changed my mind because:

* fzf can be configured with a custom delimiter that is a fixed string
  or a regular expression.
* In such cases, you'd need to repeat the delimiter again in the
  post-processing step.
* Also, tools like awk or sed may interpret a regular expression
  differently, causing mismatches.

You can still use sed, cut, or awk if you prefer.

Close #3987
Close #1323
2025-02-09 11:53:35 +09:00

126 lines
3.0 KiB
Go

package fzf
import (
"testing"
)
func TestParseRange(t *testing.T) {
{
i := ".."
r, _ := ParseRange(&i)
if r.begin != rangeEllipsis || r.end != rangeEllipsis {
t.Errorf("%v", r)
}
}
{
i := "3.."
r, _ := ParseRange(&i)
if r.begin != 3 || r.end != rangeEllipsis {
t.Errorf("%v", r)
}
}
{
i := "3..5"
r, _ := ParseRange(&i)
if r.begin != 3 || r.end != 5 {
t.Errorf("%v", r)
}
}
{
i := "-3..-5"
r, _ := ParseRange(&i)
if r.begin != -3 || r.end != -5 {
t.Errorf("%v", r)
}
}
{
i := "3"
r, _ := ParseRange(&i)
if r.begin != 3 || r.end != 3 {
t.Errorf("%v", r)
}
}
{
i := "1..3..5"
if r, ok := ParseRange(&i); ok {
t.Errorf("%v", r)
}
}
{
i := "-3..3"
if r, ok := ParseRange(&i); ok {
t.Errorf("%v", r)
}
}
}
func TestTokenize(t *testing.T) {
// AWK-style
input := " abc: def: ghi "
tokens := Tokenize(input, Delimiter{})
if tokens[0].text.ToString() != "abc: " || tokens[0].prefixLength != 2 {
t.Errorf("%s", tokens)
}
// With delimiter
tokens = Tokenize(input, delimiterRegexp(":"))
if tokens[0].text.ToString() != " abc:" || tokens[0].prefixLength != 0 {
t.Error(tokens[0].text.ToString(), tokens[0].prefixLength)
}
// With delimiter regex
tokens = Tokenize(input, delimiterRegexp("\\s+"))
if tokens[0].text.ToString() != " " || tokens[0].prefixLength != 0 ||
tokens[1].text.ToString() != "abc: " || tokens[1].prefixLength != 2 ||
tokens[2].text.ToString() != "def: " || tokens[2].prefixLength != 8 ||
tokens[3].text.ToString() != "ghi " || tokens[3].prefixLength != 14 {
t.Errorf("%s", tokens)
}
}
func TestTransform(t *testing.T) {
input := " abc: def: ghi: jkl"
{
tokens := Tokenize(input, Delimiter{})
{
ranges, _ := splitNth("1,2,3")
tx := Transform(tokens, ranges)
if JoinTokens(tx) != "abc: def: ghi: " {
t.Errorf("%s", tx)
}
}
{
ranges, _ := splitNth("1..2,3,2..,1")
tx := Transform(tokens, ranges)
if string(JoinTokens(tx)) != "abc: def: ghi: def: ghi: jklabc: " ||
len(tx) != 4 ||
tx[0].text.ToString() != "abc: def: " || tx[0].prefixLength != 2 ||
tx[1].text.ToString() != "ghi: " || tx[1].prefixLength != 14 ||
tx[2].text.ToString() != "def: ghi: jkl" || tx[2].prefixLength != 8 ||
tx[3].text.ToString() != "abc: " || tx[3].prefixLength != 2 {
t.Errorf("%s", tx)
}
}
}
{
tokens := Tokenize(input, delimiterRegexp(":"))
{
ranges, _ := splitNth("1..2,3,2..,1")
tx := Transform(tokens, ranges)
if JoinTokens(tx) != " abc: def: ghi: def: ghi: jkl abc:" ||
len(tx) != 4 ||
tx[0].text.ToString() != " abc: def:" || tx[0].prefixLength != 0 ||
tx[1].text.ToString() != " ghi:" || tx[1].prefixLength != 12 ||
tx[2].text.ToString() != " def: ghi: jkl" || tx[2].prefixLength != 6 ||
tx[3].text.ToString() != " abc:" || tx[3].prefixLength != 0 {
t.Errorf("%s", tx)
}
}
}
}
func TestTransformIndexOutOfBounds(t *testing.T) {
s, _ := splitNth("1")
Transform([]Token{}, s)
}