mirror of
https://github.com/junegunn/fzf.git
synced 2025-08-18 05:53:49 -07:00
Add --accept-nth option to transform the output
This option can be used to replace a sed or awk in the post-processing step. ps -ef | fzf --multi --header-lines 1 | awk '{print $2}' ps -ef | fzf --multi --header-lines 1 --accept-nth 2 This may not be a very "Unix-y" thing to do, so I've always felt that fzf shouldn't have such an option, but I've finally changed my mind because: * fzf can be configured with a custom delimiter that is a fixed string or a regular expression. * In such cases, you'd need to repeat the delimiter again in the post-processing step. * Also, tools like awk or sed may interpret a regular expression differently, causing mismatches. You can still use sed, cut, or awk if you prefer. Close #3987 Close #1323
This commit is contained in:
@@ -120,6 +120,10 @@ fields.
|
|||||||
.BI "\-\-with\-nth=" "N[,..]"
|
.BI "\-\-with\-nth=" "N[,..]"
|
||||||
Transform the presentation of each line using field index expressions
|
Transform the presentation of each line using field index expressions
|
||||||
.TP
|
.TP
|
||||||
|
.BI "\-\-accept\-nth=" "N[,..]"
|
||||||
|
Define which fields to print on accept. The last delimiter is stripped from the
|
||||||
|
output.
|
||||||
|
.TP
|
||||||
.B "+s, \-\-no\-sort"
|
.B "+s, \-\-no\-sort"
|
||||||
Do not sort the result
|
Do not sort the result
|
||||||
.TP
|
.TP
|
||||||
|
@@ -128,7 +128,7 @@ func Run(opts *Options) (int, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
trans := Transform(tokens, opts.WithNth)
|
trans := Transform(tokens, opts.WithNth)
|
||||||
transformed := joinTokens(trans)
|
transformed := JoinTokens(trans)
|
||||||
if len(header) < opts.HeaderLines {
|
if len(header) < opts.HeaderLines {
|
||||||
header = append(header, transformed)
|
header = append(header, transformed)
|
||||||
eventBox.Set(EvtHeader, header)
|
eventBox.Set(EvtHeader, header)
|
||||||
|
@@ -41,6 +41,7 @@ Usage: fzf [options]
|
|||||||
integer or a range expression ([BEGIN]..[END]).
|
integer or a range expression ([BEGIN]..[END]).
|
||||||
--with-nth=N[,..] Transform the presentation of each line using
|
--with-nth=N[,..] Transform the presentation of each line using
|
||||||
field index expressions
|
field index expressions
|
||||||
|
--accept-nth=N[,..] Define which fields to print on accept
|
||||||
-d, --delimiter=STR Field delimiter regex (default: AWK-style)
|
-d, --delimiter=STR Field delimiter regex (default: AWK-style)
|
||||||
+s, --no-sort Do not sort the result
|
+s, --no-sort Do not sort the result
|
||||||
--literal Do not normalize latin script letters
|
--literal Do not normalize latin script letters
|
||||||
@@ -544,6 +545,7 @@ type Options struct {
|
|||||||
Normalize bool
|
Normalize bool
|
||||||
Nth []Range
|
Nth []Range
|
||||||
WithNth []Range
|
WithNth []Range
|
||||||
|
AcceptNth []Range
|
||||||
Delimiter Delimiter
|
Delimiter Delimiter
|
||||||
Sort int
|
Sort int
|
||||||
Track trackOption
|
Track trackOption
|
||||||
@@ -666,6 +668,7 @@ func defaultOptions() *Options {
|
|||||||
Normalize: true,
|
Normalize: true,
|
||||||
Nth: make([]Range, 0),
|
Nth: make([]Range, 0),
|
||||||
WithNth: make([]Range, 0),
|
WithNth: make([]Range, 0),
|
||||||
|
AcceptNth: make([]Range, 0),
|
||||||
Delimiter: Delimiter{},
|
Delimiter: Delimiter{},
|
||||||
Sort: 1000,
|
Sort: 1000,
|
||||||
Track: trackDisabled,
|
Track: trackDisabled,
|
||||||
@@ -2383,6 +2386,14 @@ func parseOptions(index *int, opts *Options, allArgs []string) error {
|
|||||||
if opts.WithNth, err = splitNth(str); err != nil {
|
if opts.WithNth, err = splitNth(str); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
case "--accept-nth":
|
||||||
|
str, err := nextString("nth expression required")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if opts.AcceptNth, err = splitNth(str); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
case "-s", "--sort":
|
case "-s", "--sort":
|
||||||
if opts.Sort, err = optionalNumeric(1); err != nil {
|
if opts.Sort, err = optionalNumeric(1); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@@ -403,6 +403,8 @@ func (p *Pattern) transformInput(item *Item) []Token {
|
|||||||
|
|
||||||
tokens := Tokenize(item.text.ToString(), p.delimiter)
|
tokens := Tokenize(item.text.ToString(), p.delimiter)
|
||||||
ret := Transform(tokens, p.nth)
|
ret := Transform(tokens, p.nth)
|
||||||
|
// TODO: We could apply StripLastDelimiter to exclude the last delimiter from
|
||||||
|
// the search allowing suffix match with a string or a regex delimiter.
|
||||||
item.transformed = &transformed{p.revision, ret}
|
item.transformed = &transformed{p.revision, ret}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
@@ -305,6 +305,7 @@ type Terminal struct {
|
|||||||
nthAttr tui.Attr
|
nthAttr tui.Attr
|
||||||
nth []Range
|
nth []Range
|
||||||
nthCurrent []Range
|
nthCurrent []Range
|
||||||
|
acceptNth []Range
|
||||||
tabstop int
|
tabstop int
|
||||||
margin [4]sizeSpec
|
margin [4]sizeSpec
|
||||||
padding [4]sizeSpec
|
padding [4]sizeSpec
|
||||||
@@ -914,6 +915,7 @@ func NewTerminal(opts *Options, eventBox *util.EventBox, executor *util.Executor
|
|||||||
nthAttr: opts.Theme.Nth.Attr,
|
nthAttr: opts.Theme.Nth.Attr,
|
||||||
nth: opts.Nth,
|
nth: opts.Nth,
|
||||||
nthCurrent: opts.Nth,
|
nthCurrent: opts.Nth,
|
||||||
|
acceptNth: opts.AcceptNth,
|
||||||
tabstop: opts.Tabstop,
|
tabstop: opts.Tabstop,
|
||||||
hasStartActions: false,
|
hasStartActions: false,
|
||||||
hasResultActions: false,
|
hasResultActions: false,
|
||||||
@@ -1561,16 +1563,24 @@ func (t *Terminal) output() bool {
|
|||||||
for _, s := range t.printQueue {
|
for _, s := range t.printQueue {
|
||||||
t.printer(s)
|
t.printer(s)
|
||||||
}
|
}
|
||||||
|
transform := func(item *Item) string {
|
||||||
|
return item.AsString(t.ansi)
|
||||||
|
}
|
||||||
|
if len(t.acceptNth) > 0 {
|
||||||
|
transform = func(item *Item) string {
|
||||||
|
return JoinTokens(StripLastDelimiter(Transform(Tokenize(item.AsString(t.ansi), t.delimiter), t.acceptNth), t.delimiter))
|
||||||
|
}
|
||||||
|
}
|
||||||
found := len(t.selected) > 0
|
found := len(t.selected) > 0
|
||||||
if !found {
|
if !found {
|
||||||
current := t.currentItem()
|
current := t.currentItem()
|
||||||
if current != nil {
|
if current != nil {
|
||||||
t.printer(current.AsString(t.ansi))
|
t.printer(transform(current))
|
||||||
found = true
|
found = true
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for _, sel := range t.sortSelected() {
|
for _, sel := range t.sortSelected() {
|
||||||
t.printer(sel.item.AsString(t.ansi))
|
t.printer(transform(sel.item))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return found
|
return found
|
||||||
@@ -3847,7 +3857,7 @@ func replacePlaceholder(params replacePlaceholderParams) (string, []string) {
|
|||||||
elems, prefixLength := awkTokenizer(params.query)
|
elems, prefixLength := awkTokenizer(params.query)
|
||||||
tokens := withPrefixLengths(elems, prefixLength)
|
tokens := withPrefixLengths(elems, prefixLength)
|
||||||
trans := Transform(tokens, nth)
|
trans := Transform(tokens, nth)
|
||||||
result := joinTokens(trans)
|
result := JoinTokens(trans)
|
||||||
if !flags.preserveSpace {
|
if !flags.preserveSpace {
|
||||||
result = strings.TrimSpace(result)
|
result = strings.TrimSpace(result)
|
||||||
}
|
}
|
||||||
@@ -3897,7 +3907,7 @@ func replacePlaceholder(params replacePlaceholderParams) (string, []string) {
|
|||||||
replace = func(item *Item) string {
|
replace = func(item *Item) string {
|
||||||
tokens := Tokenize(item.AsString(params.stripAnsi), params.delimiter)
|
tokens := Tokenize(item.AsString(params.stripAnsi), params.delimiter)
|
||||||
trans := Transform(tokens, ranges)
|
trans := Transform(tokens, ranges)
|
||||||
str := joinTokens(trans)
|
str := JoinTokens(trans)
|
||||||
|
|
||||||
// trim the last delimiter
|
// trim the last delimiter
|
||||||
if params.delimiter.str != nil {
|
if params.delimiter.str != nil {
|
||||||
|
@@ -211,7 +211,36 @@ func Tokenize(text string, delimiter Delimiter) []Token {
|
|||||||
return withPrefixLengths(tokens, 0)
|
return withPrefixLengths(tokens, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func joinTokens(tokens []Token) string {
|
// StripLastDelimiter removes the trailing delimiter and whitespaces from the
|
||||||
|
// last token.
|
||||||
|
func StripLastDelimiter(tokens []Token, delimiter Delimiter) []Token {
|
||||||
|
if len(tokens) == 0 {
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
lastToken := tokens[len(tokens)-1]
|
||||||
|
|
||||||
|
if delimiter.str == nil && delimiter.regex == nil {
|
||||||
|
lastToken.text.TrimTrailingWhitespaces()
|
||||||
|
} else {
|
||||||
|
if delimiter.str != nil {
|
||||||
|
lastToken.text.TrimSuffix([]rune(*delimiter.str))
|
||||||
|
} else if delimiter.regex != nil {
|
||||||
|
str := lastToken.text.ToString()
|
||||||
|
locs := delimiter.regex.FindAllStringIndex(str, -1)
|
||||||
|
if len(locs) > 0 {
|
||||||
|
lastLoc := locs[len(locs)-1]
|
||||||
|
lastToken.text.SliceRight(lastLoc[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastToken.text.TrimTrailingWhitespaces()
|
||||||
|
}
|
||||||
|
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
// JoinTokens concatenates the tokens into a single string
|
||||||
|
func JoinTokens(tokens []Token) string {
|
||||||
var output bytes.Buffer
|
var output bytes.Buffer
|
||||||
for _, token := range tokens {
|
for _, token := range tokens {
|
||||||
output.WriteString(token.text.ToString())
|
output.WriteString(token.text.ToString())
|
||||||
@@ -229,7 +258,7 @@ func Transform(tokens []Token, withNth []Range) []Token {
|
|||||||
if r.begin == r.end {
|
if r.begin == r.end {
|
||||||
idx := r.begin
|
idx := r.begin
|
||||||
if idx == rangeEllipsis {
|
if idx == rangeEllipsis {
|
||||||
chars := util.ToChars(stringBytes(joinTokens(tokens)))
|
chars := util.ToChars(stringBytes(JoinTokens(tokens)))
|
||||||
parts = append(parts, &chars)
|
parts = append(parts, &chars)
|
||||||
} else {
|
} else {
|
||||||
if idx < 0 {
|
if idx < 0 {
|
||||||
|
@@ -85,14 +85,14 @@ func TestTransform(t *testing.T) {
|
|||||||
{
|
{
|
||||||
ranges, _ := splitNth("1,2,3")
|
ranges, _ := splitNth("1,2,3")
|
||||||
tx := Transform(tokens, ranges)
|
tx := Transform(tokens, ranges)
|
||||||
if joinTokens(tx) != "abc: def: ghi: " {
|
if JoinTokens(tx) != "abc: def: ghi: " {
|
||||||
t.Errorf("%s", tx)
|
t.Errorf("%s", tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
ranges, _ := splitNth("1..2,3,2..,1")
|
ranges, _ := splitNth("1..2,3,2..,1")
|
||||||
tx := Transform(tokens, ranges)
|
tx := Transform(tokens, ranges)
|
||||||
if string(joinTokens(tx)) != "abc: def: ghi: def: ghi: jklabc: " ||
|
if string(JoinTokens(tx)) != "abc: def: ghi: def: ghi: jklabc: " ||
|
||||||
len(tx) != 4 ||
|
len(tx) != 4 ||
|
||||||
tx[0].text.ToString() != "abc: def: " || tx[0].prefixLength != 2 ||
|
tx[0].text.ToString() != "abc: def: " || tx[0].prefixLength != 2 ||
|
||||||
tx[1].text.ToString() != "ghi: " || tx[1].prefixLength != 14 ||
|
tx[1].text.ToString() != "ghi: " || tx[1].prefixLength != 14 ||
|
||||||
@@ -107,7 +107,7 @@ func TestTransform(t *testing.T) {
|
|||||||
{
|
{
|
||||||
ranges, _ := splitNth("1..2,3,2..,1")
|
ranges, _ := splitNth("1..2,3,2..,1")
|
||||||
tx := Transform(tokens, ranges)
|
tx := Transform(tokens, ranges)
|
||||||
if joinTokens(tx) != " abc: def: ghi: def: ghi: jkl abc:" ||
|
if JoinTokens(tx) != " abc: def: ghi: def: ghi: jkl abc:" ||
|
||||||
len(tx) != 4 ||
|
len(tx) != 4 ||
|
||||||
tx[0].text.ToString() != " abc: def:" || tx[0].prefixLength != 0 ||
|
tx[0].text.ToString() != " abc: def:" || tx[0].prefixLength != 0 ||
|
||||||
tx[1].text.ToString() != " ghi:" || tx[1].prefixLength != 12 ||
|
tx[1].text.ToString() != " ghi:" || tx[1].prefixLength != 12 ||
|
||||||
|
@@ -189,6 +189,27 @@ func (chars *Chars) TrimTrailingWhitespaces() {
|
|||||||
chars.slice = chars.slice[0 : len(chars.slice)-whitespaces]
|
chars.slice = chars.slice[0 : len(chars.slice)-whitespaces]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) TrimSuffix(runes []rune) {
|
||||||
|
lastIdx := len(chars.slice)
|
||||||
|
firstIdx := lastIdx - len(runes)
|
||||||
|
if firstIdx < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := firstIdx; i < lastIdx; i++ {
|
||||||
|
char := chars.Get(i)
|
||||||
|
if char != runes[i-firstIdx] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
chars.slice = chars.slice[0:firstIdx]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) SliceRight(last int) {
|
||||||
|
chars.slice = chars.slice[:last]
|
||||||
|
}
|
||||||
|
|
||||||
func (chars *Chars) ToString() string {
|
func (chars *Chars) ToString() string {
|
||||||
if runes := chars.optionalRunes(); runes != nil {
|
if runes := chars.optionalRunes(); runes != nil {
|
||||||
return string(runes)
|
return string(runes)
|
||||||
|
@@ -1665,4 +1665,30 @@ class TestCore < TestInteractive
|
|||||||
assert_equal '', File.read(tempname).chomp
|
assert_equal '', File.read(tempname).chomp
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_accept_nth
|
||||||
|
tmux.send_keys %((echo "foo bar baz"; echo "bar baz foo") | #{FZF} --multi --accept-nth 2,2 --sync --bind start:select-all+accept > #{tempname}), :Enter
|
||||||
|
wait do
|
||||||
|
assert_path_exists tempname
|
||||||
|
assert_equal ['bar bar', 'baz baz'], File.readlines(tempname, chomp: true)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_accept_nth_string_delimiter
|
||||||
|
tmux.send_keys %(echo "foo ,bar,baz" | #{FZF} -d, --accept-nth 2,2,1,3,1 --sync --bind start:accept > #{tempname}), :Enter
|
||||||
|
wait do
|
||||||
|
assert_path_exists tempname
|
||||||
|
# Last delimiter and the whitespaces are removed
|
||||||
|
assert_equal ['bar,bar,foo ,bazfoo'], File.readlines(tempname, chomp: true)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_accept_nth_regex_delimiter
|
||||||
|
tmux.send_keys %(echo "foo :,:bar,baz" | #{FZF} --delimiter='[:,]+' --accept-nth 2,2,1,3,1 --sync --bind start:accept > #{tempname}), :Enter
|
||||||
|
wait do
|
||||||
|
assert_path_exists tempname
|
||||||
|
# Last delimiter and the whitespaces are removed
|
||||||
|
assert_equal ['bar,bar,foo :,:bazfoo'], File.readlines(tempname, chomp: true)
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
Reference in New Issue
Block a user