Compare commits

..

23 Commits

Author SHA1 Message Date
Junegunn Choi
8156e9894e 0.10.3 2015-08-12 02:09:46 +09:00
Junegunn Choi
cacc212f12 [install] Prerelease of 0.10.3 2015-08-11 00:21:09 +09:00
Junegunn Choi
d0f2c00f9f Fix --with-nth performance; use simpler regular expression
Related #317
2015-08-11 00:15:41 +09:00
Junegunn Choi
766427de0c Fix --with-nth performance; avoid regex if possible
Close #317
2015-08-10 18:34:20 +09:00
Junegunn Choi
a7b75c99a5 [install] Stop installer when failed to download the binary
Close #312
2015-08-08 03:53:46 +09:00
Junegunn Choi
bae10a6582 [install] Add an extra new line character
so that it doesn't corrupt file that doesn't end with a new line
character. Close #311.
2015-08-05 23:50:38 +09:00
Junegunn Choi
c4cf90a3d2 0.10.2 2015-08-03 00:21:21 +09:00
Junegunn Choi
15c49a3e08 Fix race condition 2015-08-03 00:14:34 +09:00
Junegunn Choi
ae87f6548a GoLint 2015-08-02 23:54:53 +09:00
Junegunn Choi
7833fa7396 [install] Always download binary when --pre is set 2015-08-02 15:09:57 +09:00
Junegunn Choi
9278f3acd2 [install] Add --pre option for downloading prerelease binary 2015-08-02 15:02:12 +09:00
Junegunn Choi
e83ae34a3b Update CHANGELOG - 0.10.2 2015-08-02 14:32:34 +09:00
Junegunn Choi
e13bafc1ab Performance fix - unnecessary rune convertion on --ansi
> time cat /tmp/list | fzf-0.10.1-darwin_amd64 --ansi -fqwerty > /dev/null

    real    0m4.364s
    user    0m8.231s
    sys     0m0.820s

    > time cat /tmp/list | fzf --ansi -fqwerty > /dev/null

    real    0m4.624s
    user    0m5.755s
    sys     0m0.732s
2015-08-02 14:25:57 +09:00
Junegunn Choi
0ea66329b8 Performance tuning - eager rune array conversion
> wc -l /tmp/list2
     2594098 /tmp/list2

    > time cat /tmp/list2 | fzf-0.10.1-darwin_amd64 -fqwerty > /dev/null

    real    0m5.418s
    user    0m10.990s
    sys     0m1.302s

    > time cat /tmp/list2 | fzf-head -fqwerty > /dev/null

    real    0m4.862s
    user    0m6.619s
    sys     0m0.982s
2015-08-02 14:00:18 +09:00
Junegunn Choi
634670e3ea Lint 2015-08-02 13:11:59 +09:00
Junegunn Choi
dea60b11bc Only consider the lengths of the relevant parts when --nth is set 2015-08-01 23:13:24 +09:00
Junegunn Choi
5e90f0a57b Fix default command so that it doesn't fail on dash-prefixed files
Close #310
2015-08-01 21:51:10 +09:00
Junegunn Choi
0b4542fcdf [vim] Temporarily disable &autochdir when opening files (#306) 2015-07-29 17:55:58 +09:00
Junegunn Choi
02bd2d2adf Do not proceed if $TERM is invalid
Related #305
2015-07-28 14:35:46 +09:00
Junegunn Choi
dce6fe6f2d [fzf-tmux] Ensure that the same $TERM value is used in split
Fix #305. ncurses can crash on invalid $TERM. fzf-tmux uses bash on
a new pane so we have to make sure that the $TERM is consistent with
that of the hosting shell.
2015-07-28 14:17:25 +09:00
Junegunn Choi
fcae99f09b No need to "tmux list-panes" when obviously not on tmux (#303) 2015-07-28 00:56:03 +09:00
Junegunn Choi
fb1b026d3d Always check if the pane is zoomed
Close #303
2015-07-28 00:30:17 +09:00
Junegunn Choi
9f953fc944 Do not use tmux pane if the current pane is zoomed
Close #303
2015-07-28 00:22:04 +09:00
31 changed files with 424 additions and 233 deletions

View File

@@ -1,6 +1,28 @@
CHANGELOG
=========
0.10.3
------
- Fixed slow performance of `--with-nth` when used with `--delimiter`
- Regular expression engine of Golang as of now is very slow, so the fixed
version will treat the given delimiter pattern as a plain string instead
of a regular expression unless it contains special characters and is
a valid regular expression.
- Simpler regular expression for delimiter for better performance
0.10.2
------
### Fixes and improvements
- Improvement in perceived response time of queries
- Eager, efficient rune array conversion
- Graceful exit when failed to initialize ncurses (invalid $TERM)
- Improved ranking algorithm when `--nth` option is set
- Changed the default command not to fail when there are files whose names
start with dash
0.10.1
------

View File

@@ -82,7 +82,7 @@ while [ $# -gt 0 ]; do
shift
done
if [ -z "$TMUX_PANE" ]; then
if [ -z "$TMUX_PANE" ] || tmux list-panes -F '#F' | grep -q Z; then
fzf "${args[@]}"
exit $?
fi
@@ -107,7 +107,7 @@ fail() {
fzf="$(which fzf 2> /dev/null)" || fzf="$(dirname "$0")/fzf"
[ -x "$fzf" ] || fail "fzf executable not found"
envs="env "
envs="env TERM=$TERM "
[ -n "$FZF_DEFAULT_OPTS" ] && envs="$envs FZF_DEFAULT_OPTS=$(printf %q "$FZF_DEFAULT_OPTS")"
[ -n "$FZF_DEFAULT_COMMAND" ] && envs="$envs FZF_DEFAULT_COMMAND=$(printf %q "$FZF_DEFAULT_COMMAND")"

17
install
View File

@@ -1,6 +1,7 @@
#!/usr/bin/env bash
version=0.10.1
[[ "$@" =~ --pre ]] && version=0.10.3 pre=1 ||
version=0.10.3 pre=0
cd $(dirname $BASH_SOURCE)
fzf_base=$(pwd)
@@ -45,11 +46,13 @@ symlink() {
download() {
echo "Downloading bin/fzf ..."
if [[ ! $1 =~ dev && -x "$fzf_base"/bin/fzf ]]; then
echo " - Already exists"
check_binary && return
elif [ -x "$fzf_base"/bin/$1 ]; then
symlink $1 && check_binary && return
if [ $pre = 0 ]; then
if [ -x "$fzf_base"/bin/fzf ]; then
echo " - Already exists"
check_binary && return
elif [ -x "$fzf_base"/bin/$1 ]; then
symlink $1 && check_binary && return
fi
fi
mkdir -p "$fzf_base"/bin && cd "$fzf_base"/bin
if [ $? -ne 0 ]; then
@@ -93,6 +96,7 @@ if [ -n "$binary_error" ]; then
echo "No prebuilt binary for $archi ... "
else
echo " - $binary_error !!!"
exit 1
fi
echo "Installing legacy Ruby version ..."
@@ -247,6 +251,7 @@ append_line() {
if [ -n "$line" ]; then
echo " - Already exists: line #$line"
else
echo >> "$2"
echo "$1" >> "$2"
echo " + Added"
fi

View File

@@ -21,7 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
..
.TH fzf 1 "July 2015" "fzf 0.10.1" "fzf - a command-line fuzzy finder"
.TH fzf 1 "Aug 2015" "fzf 0.10.3" "fzf - a command-line fuzzy finder"
.SH NAME
fzf - a command-line fuzzy finder

View File

@@ -54,13 +54,17 @@ function! s:fzf_exec()
return s:exec
endfunction
function! s:tmux_not_zoomed()
return system('tmux list-panes -F "#F"') !~# 'Z'
endfunction
function! s:tmux_enabled()
if has('gui_running')
return 0
endif
if exists('s:tmux')
return s:tmux
return s:tmux && s:tmux_not_zoomed()
endif
let s:tmux = 0
@@ -68,7 +72,7 @@ function! s:tmux_enabled()
let output = system('tmux -V')
let s:tmux = !v:shell_error && output >= 'tmux 1.7'
endif
return s:tmux
return s:tmux && s:tmux_not_zoomed()
endfunction
function! s:shellesc(arg)
@@ -364,9 +368,15 @@ function! s:cmd_callback(lines) abort
endif
let key = remove(a:lines, 0)
let cmd = get(s:action, key, 'e')
for item in a:lines
execute cmd s:escape(item)
endfor
try
let autochdir = &autochdir
set noautochdir
for item in a:lines
execute cmd s:escape(item)
endfor
finally
let &autochdir = autochdir
endtry
endfunction
function! s:cmd(bang, ...) abort

View File

@@ -16,7 +16,7 @@ import (
*/
// FuzzyMatch performs fuzzy-match
func FuzzyMatch(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
func FuzzyMatch(caseSensitive bool, runes []rune, pattern []rune) (int, int) {
if len(pattern) == 0 {
return 0, 0
}
@@ -34,7 +34,7 @@ func FuzzyMatch(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
sidx := -1
eidx := -1
for index, char := range *runes {
for index, char := range runes {
// This is considerably faster than blindly applying strings.ToLower to the
// whole string
if !caseSensitive {
@@ -61,7 +61,7 @@ func FuzzyMatch(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
if sidx >= 0 && eidx >= 0 {
pidx--
for index := eidx - 1; index >= sidx; index-- {
char := (*runes)[index]
char := runes[index]
if !caseSensitive {
if char >= 'A' && char <= 'Z' {
char += 32
@@ -88,12 +88,12 @@ func FuzzyMatch(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
//
// We might try to implement better algorithms in the future:
// http://en.wikipedia.org/wiki/String_searching_algorithm
func ExactMatchNaive(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
func ExactMatchNaive(caseSensitive bool, runes []rune, pattern []rune) (int, int) {
if len(pattern) == 0 {
return 0, 0
}
numRunes := len(*runes)
numRunes := len(runes)
plen := len(pattern)
if numRunes < plen {
return -1, -1
@@ -101,7 +101,7 @@ func ExactMatchNaive(caseSensitive bool, runes *[]rune, pattern []rune) (int, in
pidx := 0
for index := 0; index < numRunes; index++ {
char := (*runes)[index]
char := runes[index]
if !caseSensitive {
if char >= 'A' && char <= 'Z' {
char += 32
@@ -123,13 +123,13 @@ func ExactMatchNaive(caseSensitive bool, runes *[]rune, pattern []rune) (int, in
}
// PrefixMatch performs prefix-match
func PrefixMatch(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
if len(*runes) < len(pattern) {
func PrefixMatch(caseSensitive bool, runes []rune, pattern []rune) (int, int) {
if len(runes) < len(pattern) {
return -1, -1
}
for index, r := range pattern {
char := (*runes)[index]
char := runes[index]
if !caseSensitive {
char = unicode.ToLower(char)
}
@@ -141,7 +141,7 @@ func PrefixMatch(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
}
// SuffixMatch performs suffix-match
func SuffixMatch(caseSensitive bool, input *[]rune, pattern []rune) (int, int) {
func SuffixMatch(caseSensitive bool, input []rune, pattern []rune) (int, int) {
runes := util.TrimRight(input)
trimmedLen := len(runes)
diff := trimmedLen - len(pattern)
@@ -161,11 +161,12 @@ func SuffixMatch(caseSensitive bool, input *[]rune, pattern []rune) (int, int) {
return trimmedLen - len(pattern), trimmedLen
}
func EqualMatch(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
if len(*runes) != len(pattern) {
// EqualMatch performs equal-match
func EqualMatch(caseSensitive bool, runes []rune, pattern []rune) (int, int) {
if len(runes) != len(pattern) {
return -1, -1
}
runesStr := string(*runes)
runesStr := string(runes)
if !caseSensitive {
runesStr = strings.ToLower(runesStr)
}

View File

@@ -5,12 +5,11 @@ import (
"testing"
)
func assertMatch(t *testing.T, fun func(bool, *[]rune, []rune) (int, int), caseSensitive bool, input string, pattern string, sidx int, eidx int) {
func assertMatch(t *testing.T, fun func(bool, []rune, []rune) (int, int), caseSensitive bool, input string, pattern string, sidx int, eidx int) {
if !caseSensitive {
pattern = strings.ToLower(pattern)
}
runes := []rune(input)
s, e := fun(caseSensitive, &runes, []rune(pattern))
s, e := fun(caseSensitive, []rune(input), []rune(pattern))
if s != sidx {
t.Errorf("Invalid start index: %d (expected: %d, %s / %s)", s, sidx, input, pattern)
}

View File

@@ -36,7 +36,7 @@ func init() {
ansiRegex = regexp.MustCompile("\x1b\\[[0-9;]*[mK]")
}
func extractColor(str *string, state *ansiState) (*string, []ansiOffset, *ansiState) {
func extractColor(str string, state *ansiState) (string, []ansiOffset, *ansiState) {
var offsets []ansiOffset
var output bytes.Buffer
@@ -45,9 +45,9 @@ func extractColor(str *string, state *ansiState) (*string, []ansiOffset, *ansiSt
}
idx := 0
for _, offset := range ansiRegex.FindAllStringIndex(*str, -1) {
output.WriteString((*str)[idx:offset[0]])
newState := interpretCode((*str)[offset[0]:offset[1]], state)
for _, offset := range ansiRegex.FindAllStringIndex(str, -1) {
output.WriteString(str[idx:offset[0]])
newState := interpretCode(str[offset[0]:offset[1]], state)
if !newState.equals(state) {
if state != nil {
@@ -69,7 +69,7 @@ func extractColor(str *string, state *ansiState) (*string, []ansiOffset, *ansiSt
idx = offset[1]
}
rest := (*str)[idx:]
rest := str[idx:]
if len(rest) > 0 {
output.WriteString(rest)
if state != nil {
@@ -77,8 +77,7 @@ func extractColor(str *string, state *ansiState) (*string, []ansiOffset, *ansiSt
(&offsets[len(offsets)-1]).offset[1] = int32(utf8.RuneCount(output.Bytes()))
}
}
outputStr := output.String()
return &outputStr, offsets, state
return output.String(), offsets, state
}
func interpretCode(ansiCode string, prevState *ansiState) *ansiState {

View File

@@ -17,9 +17,9 @@ func TestExtractColor(t *testing.T) {
var state *ansiState
clean := "\x1b[0m"
check := func(assertion func(ansiOffsets []ansiOffset, state *ansiState)) {
output, ansiOffsets, newState := extractColor(&src, state)
output, ansiOffsets, newState := extractColor(src, state)
state = newState
if *output != "hello world" {
if output != "hello world" {
t.Errorf("Invalid output: {}", output)
}
fmt.Println(src, ansiOffsets, clean)

View File

@@ -7,7 +7,7 @@ type Chunk []*Item // >>> []Item
// ItemBuilder is a closure type that builds Item object from a pointer to a
// string and an integer
type ItemBuilder func(*string, int) *Item
type ItemBuilder func([]byte, int) *Item
// ChunkList is a list of Chunks
type ChunkList struct {
@@ -26,7 +26,7 @@ func NewChunkList(trans ItemBuilder) *ChunkList {
trans: trans}
}
func (c *Chunk) push(trans ItemBuilder, data *string, index int) bool {
func (c *Chunk) push(trans ItemBuilder, data []byte, index int) bool {
item := trans(data, index)
if item != nil {
*c = append(*c, item)
@@ -53,7 +53,7 @@ func CountItems(cs []*Chunk) int {
}
// Push adds the item to the list
func (cl *ChunkList) Push(data string) bool {
func (cl *ChunkList) Push(data []byte) bool {
cl.mutex.Lock()
defer cl.mutex.Unlock()
@@ -62,7 +62,7 @@ func (cl *ChunkList) Push(data string) bool {
cl.chunks = append(cl.chunks, &newChunk)
}
if cl.lastChunk().push(cl.trans, &data, cl.count) {
if cl.lastChunk().push(cl.trans, data, cl.count) {
cl.count++
return true
}

View File

@@ -6,8 +6,8 @@ import (
)
func TestChunkList(t *testing.T) {
cl := NewChunkList(func(s *string, i int) *Item {
return &Item{text: s, rank: Rank{0, 0, uint32(i * 2)}}
cl := NewChunkList(func(s []byte, i int) *Item {
return &Item{text: []rune(string(s)), rank: Rank{0, 0, uint32(i * 2)}}
})
// Snapshot
@@ -17,8 +17,8 @@ func TestChunkList(t *testing.T) {
}
// Add some data
cl.Push("hello")
cl.Push("world")
cl.Push([]byte("hello"))
cl.Push([]byte("world"))
// Previously created snapshot should remain the same
if len(snapshot) > 0 {
@@ -36,8 +36,8 @@ func TestChunkList(t *testing.T) {
if len(*chunk1) != 2 {
t.Error("Snapshot should contain only two items")
}
if *(*chunk1)[0].text != "hello" || (*chunk1)[0].rank.index != 0 ||
*(*chunk1)[1].text != "world" || (*chunk1)[1].rank.index != 2 {
if string((*chunk1)[0].text) != "hello" || (*chunk1)[0].rank.index != 0 ||
string((*chunk1)[1].text) != "world" || (*chunk1)[1].rank.index != 2 {
t.Error("Invalid data")
}
if chunk1.IsFull() {
@@ -46,7 +46,7 @@ func TestChunkList(t *testing.T) {
// Add more data
for i := 0; i < chunkSize*2; i++ {
cl.Push(fmt.Sprintf("item %d", i))
cl.Push([]byte(fmt.Sprintf("item %d", i)))
}
// Previous snapshot should remain the same
@@ -64,8 +64,8 @@ func TestChunkList(t *testing.T) {
t.Error("Unexpected number of items")
}
cl.Push("hello")
cl.Push("world")
cl.Push([]byte("hello"))
cl.Push([]byte("world"))
lastChunkCount := len(*snapshot[len(snapshot)-1])
if lastChunkCount != 2 {

View File

@@ -8,14 +8,14 @@ import (
const (
// Current version
Version = "0.10.1"
version = "0.10.3"
// Core
coordinatorDelayMax time.Duration = 100 * time.Millisecond
coordinatorDelayStep time.Duration = 10 * time.Millisecond
// Reader
defaultCommand = `find * -path '*/\.*' -prune -o -type f -print -o -type l -print 2> /dev/null`
defaultCommand = `find . -path '*/\.*' -prune -o -type f -print -o -type l -print 2> /dev/null | sed s/^..//`
// Terminal
initialDelay = 100 * time.Millisecond

View File

@@ -55,7 +55,7 @@ func Run(opts *Options) {
rankTiebreak = opts.Tiebreak
if opts.Version {
fmt.Println(Version)
fmt.Println(version)
os.Exit(0)
}
@@ -63,62 +63,68 @@ func Run(opts *Options) {
eventBox := util.NewEventBox()
// ANSI code processor
ansiProcessor := func(data *string) (*string, []ansiOffset) {
// By default, we do nothing
ansiProcessor := func(data []byte) ([]rune, []ansiOffset) {
return util.BytesToRunes(data), nil
}
ansiProcessorRunes := func(data []rune) ([]rune, []ansiOffset) {
return data, nil
}
if opts.Ansi {
if opts.Theme != nil {
var state *ansiState
ansiProcessor = func(data *string) (*string, []ansiOffset) {
trimmed, offsets, newState := extractColor(data, state)
ansiProcessor = func(data []byte) ([]rune, []ansiOffset) {
trimmed, offsets, newState := extractColor(string(data), state)
state = newState
return trimmed, offsets
return []rune(trimmed), offsets
}
} else {
// When color is disabled but ansi option is given,
// we simply strip out ANSI codes from the input
ansiProcessor = func(data *string) (*string, []ansiOffset) {
trimmed, _, _ := extractColor(data, nil)
return trimmed, nil
ansiProcessor = func(data []byte) ([]rune, []ansiOffset) {
trimmed, _, _ := extractColor(string(data), nil)
return []rune(trimmed), nil
}
}
ansiProcessorRunes = func(data []rune) ([]rune, []ansiOffset) {
return ansiProcessor([]byte(string(data)))
}
}
// Chunk list
var chunkList *ChunkList
header := make([]string, 0, opts.HeaderLines)
if len(opts.WithNth) == 0 {
chunkList = NewChunkList(func(data *string, index int) *Item {
chunkList = NewChunkList(func(data []byte, index int) *Item {
if len(header) < opts.HeaderLines {
header = append(header, *data)
header = append(header, string(data))
eventBox.Set(EvtHeader, header)
return nil
}
data, colors := ansiProcessor(data)
runes, colors := ansiProcessor(data)
return &Item{
text: data,
text: runes,
index: uint32(index),
colors: colors,
rank: Rank{0, 0, uint32(index)}}
})
} else {
chunkList = NewChunkList(func(data *string, index int) *Item {
tokens := Tokenize(data, opts.Delimiter)
chunkList = NewChunkList(func(data []byte, index int) *Item {
runes := util.BytesToRunes(data)
tokens := Tokenize(runes, opts.Delimiter)
trans := Transform(tokens, opts.WithNth)
if len(header) < opts.HeaderLines {
header = append(header, *joinTokens(trans))
header = append(header, string(joinTokens(trans)))
eventBox.Set(EvtHeader, header)
return nil
}
item := Item{
text: joinTokens(trans),
origText: data,
origText: &runes,
index: uint32(index),
colors: nil,
rank: Rank{0, 0, uint32(index)}}
trimmed, colors := ansiProcessor(item.text)
trimmed, colors := ansiProcessorRunes(item.text)
item.text = trimmed
item.colors = colors
return &item
@@ -128,8 +134,8 @@ func Run(opts *Options) {
// Reader
streamingFilter := opts.Filter != nil && !sort && !opts.Tac && !opts.Sync
if !streamingFilter {
reader := Reader{func(str string) bool {
return chunkList.Push(str)
reader := Reader{func(data []byte) bool {
return chunkList.Push(data)
}, eventBox, opts.ReadZero}
go reader.ReadSource()
}
@@ -151,10 +157,10 @@ func Run(opts *Options) {
if streamingFilter {
reader := Reader{
func(str string) bool {
item := chunkList.trans(&str, 0)
func(runes []byte) bool {
item := chunkList.trans(runes, 0)
if item != nil && pattern.MatchItem(item) {
fmt.Println(*item.text)
fmt.Println(string(item.text))
}
return false
}, eventBox, opts.ReadZero}

View File

@@ -8,6 +8,7 @@ package curses
import "C"
import (
"fmt"
"os"
"os/signal"
"syscall"
@@ -258,6 +259,10 @@ func Init(theme *ColorTheme, black bool, mouse bool) {
C.setlocale(C.LC_ALL, C.CString(""))
_screen = C.newterm(nil, C.stderr, C.stdin)
if _screen == nil {
fmt.Println("Invalid $TERM: " + os.Getenv("TERM"))
os.Exit(1)
}
C.set_term(_screen)
if mouse {
C.mousemask(C.ALL_MOUSE_EVENTS, nil)

View File

@@ -7,6 +7,7 @@ import (
"strings"
)
// History struct represents input history
type History struct {
path string
lines []string
@@ -15,6 +16,7 @@ type History struct {
cursor int
}
// NewHistory returns the pointer to a new History struct
func NewHistory(path string, maxSize int) (*History, error) {
fmtError := func(e error) error {
if os.IsPermission(e) {

View File

@@ -17,9 +17,9 @@ type colorOffset struct {
// Item represents each input line
type Item struct {
text *string
origText *string
transformed *[]Token
text []rune
origText *[]rune
transformed []Token
index uint32
offsets []Offset
colors []ansiOffset
@@ -37,14 +37,14 @@ type Rank struct {
var rankTiebreak tiebreak
// Rank calculates rank of the Item
func (i *Item) Rank(cache bool) Rank {
if cache && (i.rank.matchlen > 0 || i.rank.tiebreak > 0) {
return i.rank
func (item *Item) Rank(cache bool) Rank {
if cache && (item.rank.matchlen > 0 || item.rank.tiebreak > 0) {
return item.rank
}
matchlen := 0
prevEnd := 0
minBegin := math.MaxUint16
for _, offset := range i.offsets {
for _, offset := range item.offsets {
begin := int(offset[0])
end := int(offset[1])
if prevEnd > begin {
@@ -63,13 +63,22 @@ func (i *Item) Rank(cache bool) Rank {
var tiebreak uint16
switch rankTiebreak {
case byLength:
tiebreak = uint16(len(*i.text))
// It is guaranteed that .transformed in not null in normal execution
if item.transformed != nil {
lenSum := 0
for _, token := range item.transformed {
lenSum += len(token.text)
}
tiebreak = uint16(lenSum)
} else {
tiebreak = uint16(len(item.text))
}
case byBegin:
// We can't just look at i.offsets[0][0] because it can be an inverse term
// We can't just look at item.offsets[0][0] because it can be an inverse term
tiebreak = uint16(minBegin)
case byEnd:
if prevEnd > 0 {
tiebreak = uint16(1 + len(*i.text) - prevEnd)
tiebreak = uint16(1 + len(item.text) - prevEnd)
} else {
// Empty offsets due to inverse terms.
tiebreak = 1
@@ -77,24 +86,26 @@ func (i *Item) Rank(cache bool) Rank {
case byIndex:
tiebreak = 1
}
rank := Rank{uint16(matchlen), tiebreak, i.index}
rank := Rank{uint16(matchlen), tiebreak, item.index}
if cache {
i.rank = rank
item.rank = rank
}
return rank
}
// AsString returns the original string
func (i *Item) AsString() string {
return *i.StringPtr()
func (item *Item) AsString() string {
return *item.StringPtr()
}
// StringPtr returns the pointer to the original string
func (i *Item) StringPtr() *string {
if i.origText != nil {
return i.origText
func (item *Item) StringPtr() *string {
runes := item.text
if item.origText != nil {
runes = *item.origText
}
return i.text
str := string(runes)
return &str
}
func (item *Item) colorOffsets(color int, bold bool, current bool) []colorOffset {

View File

@@ -39,14 +39,14 @@ func TestRankComparison(t *testing.T) {
// Match length, string length, index
func TestItemRank(t *testing.T) {
strs := []string{"foo", "foobar", "bar", "baz"}
item1 := Item{text: &strs[0], index: 1, offsets: []Offset{}}
strs := [][]rune{[]rune("foo"), []rune("foobar"), []rune("bar"), []rune("baz")}
item1 := Item{text: strs[0], index: 1, offsets: []Offset{}}
rank1 := item1.Rank(true)
if rank1.matchlen != 0 || rank1.tiebreak != 3 || rank1.index != 1 {
t.Error(item1.Rank(true))
}
// Only differ in index
item2 := Item{text: &strs[0], index: 0, offsets: []Offset{}}
item2 := Item{text: strs[0], index: 0, offsets: []Offset{}}
items := []*Item{&item1, &item2}
sort.Sort(ByRelevance(items))
@@ -62,10 +62,10 @@ func TestItemRank(t *testing.T) {
}
// Sort by relevance
item3 := Item{text: &strs[1], rank: Rank{0, 0, 2}, offsets: []Offset{Offset{1, 3}, Offset{5, 7}}}
item4 := Item{text: &strs[1], rank: Rank{0, 0, 2}, offsets: []Offset{Offset{1, 2}, Offset{6, 7}}}
item5 := Item{text: &strs[2], rank: Rank{0, 0, 2}, offsets: []Offset{Offset{1, 3}, Offset{5, 7}}}
item6 := Item{text: &strs[2], rank: Rank{0, 0, 2}, offsets: []Offset{Offset{1, 2}, Offset{6, 7}}}
item3 := Item{text: strs[1], rank: Rank{0, 0, 2}, offsets: []Offset{Offset{1, 3}, Offset{5, 7}}}
item4 := Item{text: strs[1], rank: Rank{0, 0, 2}, offsets: []Offset{Offset{1, 2}, Offset{6, 7}}}
item5 := Item{text: strs[2], rank: Rank{0, 0, 2}, offsets: []Offset{Offset{1, 3}, Offset{5, 7}}}
item6 := Item{text: strs[2], rank: Rank{0, 0, 2}, offsets: []Offset{Offset{1, 2}, Offset{6, 7}}}
items = []*Item{&item1, &item2, &item3, &item4, &item5, &item6}
sort.Sort(ByRelevance(items))
if items[0] != &item2 || items[1] != &item1 ||

View File

@@ -96,7 +96,7 @@ func (m *Matcher) Loop() {
}
if !cancelled {
if merger.Cacheable() {
if merger.cacheable() {
m.mergerCache[patternString] = merger
}
merger.final = request.final

View File

@@ -82,7 +82,7 @@ func (mg *Merger) Get(idx int) *Item {
panic(fmt.Sprintf("Index out of bounds (unsorted, %d/%d)", idx, mg.count))
}
func (mg *Merger) Cacheable() bool {
func (mg *Merger) cacheable() bool {
return mg.count < mergerCacheMax
}

View File

@@ -22,7 +22,7 @@ func randItem() *Item {
offsets[idx] = Offset{sidx, eidx}
}
return &Item{
text: &str,
text: []rune(str),
index: rand.Uint32(),
offsets: offsets}
}

View File

@@ -1,7 +1,6 @@
package fzf
import (
"fmt"
"io/ioutil"
"os"
"regexp"
@@ -104,7 +103,7 @@ type Options struct {
Case Case
Nth []Range
WithNth []Range
Delimiter *regexp.Regexp
Delimiter Delimiter
Sort int
Tac bool
Tiebreak tiebreak
@@ -149,7 +148,7 @@ func defaultOptions() *Options {
Case: CaseSmart,
Nth: make([]Range, 0),
WithNth: make([]Range, 0),
Delimiter: nil,
Delimiter: Delimiter{},
Sort: 1000,
Tac: false,
Tiebreak: byLength,
@@ -268,17 +267,23 @@ func splitNth(str string) []Range {
return ranges
}
func delimiterRegexp(str string) *regexp.Regexp {
rx, e := regexp.Compile(str)
if e != nil {
str = regexp.QuoteMeta(str)
func delimiterRegexp(str string) Delimiter {
// Special handling of \t
str = strings.Replace(str, "\\t", "\t", -1)
// 1. Pattern does not contain any special character
if regexp.QuoteMeta(str) == str {
return Delimiter{str: &str}
}
rx, e = regexp.Compile(fmt.Sprintf("(?:.*?%s)|(?:.+?$)", str))
rx, e := regexp.Compile(str)
// 2. Pattern is not a valid regular expression
if e != nil {
errorExit("invalid regular expression: " + e.Error())
return Delimiter{str: &str}
}
return rx
// 3. Pattern as regular expression. Slow.
return Delimiter{regex: rx}
}
func isAlphabet(char uint8) bool {
@@ -513,7 +518,7 @@ func parseKeymap(keymap map[int]actionType, execmap map[int]string, toggleSort b
case "delete-char":
keymap[key] = actDeleteChar
case "delete-char/eof":
keymap[key] = actDeleteCharEof
keymap[key] = actDeleteCharEOF
case "end-of-line":
keymap[key] = actEndOfLine
case "cancel":

View File

@@ -8,11 +8,59 @@ import (
)
func TestDelimiterRegex(t *testing.T) {
rx := delimiterRegexp("*")
tokens := rx.FindAllString("-*--*---**---", -1)
if tokens[0] != "-*" || tokens[1] != "--*" || tokens[2] != "---*" ||
tokens[3] != "*" || tokens[4] != "---" {
t.Errorf("%s %s %d", rx, tokens, len(tokens))
// Valid regex
delim := delimiterRegexp(".")
if delim.regex == nil || delim.str != nil {
t.Error(delim)
}
// Broken regex -> string
delim = delimiterRegexp("[0-9")
if delim.regex != nil || *delim.str != "[0-9" {
t.Error(delim)
}
// Valid regex
delim = delimiterRegexp("[0-9]")
if delim.regex.String() != "[0-9]" || delim.str != nil {
t.Error(delim)
}
// Tab character
delim = delimiterRegexp("\t")
if delim.regex != nil || *delim.str != "\t" {
t.Error(delim)
}
// Tab expression
delim = delimiterRegexp("\\t")
if delim.regex != nil || *delim.str != "\t" {
t.Error(delim)
}
// Tabs -> regex
delim = delimiterRegexp("\t+")
if delim.regex == nil || delim.str != nil {
t.Error(delim)
}
}
func TestDelimiterRegexString(t *testing.T) {
delim := delimiterRegexp("*")
tokens := Tokenize([]rune("-*--*---**---"), delim)
if delim.regex != nil ||
string(tokens[0].text) != "-*" ||
string(tokens[1].text) != "--*" ||
string(tokens[2].text) != "---*" ||
string(tokens[3].text) != "*" ||
string(tokens[4].text) != "---" {
t.Errorf("%s %s %d", delim, tokens, len(tokens))
}
}
func TestDelimiterRegexRegex(t *testing.T) {
delim := delimiterRegexp("--\\*")
tokens := Tokenize([]rune("-*--*---**---"), delim)
if delim.str != nil ||
string(tokens[0].text) != "-*--*" ||
string(tokens[1].text) != "---*" ||
string(tokens[2].text) != "*---" {
t.Errorf("%s %d", tokens, len(tokens))
}
}

View File

@@ -42,9 +42,9 @@ type Pattern struct {
text []rune
terms []term
hasInvTerm bool
delimiter *regexp.Regexp
delimiter Delimiter
nth []Range
procFun map[termType]func(bool, *[]rune, []rune) (int, int)
procFun map[termType]func(bool, []rune, []rune) (int, int)
}
var (
@@ -71,7 +71,7 @@ func clearChunkCache() {
// BuildPattern builds Pattern object from the given arguments
func BuildPattern(mode Mode, caseMode Case,
nth []Range, delimiter *regexp.Regexp, runes []rune) *Pattern {
nth []Range, delimiter Delimiter, runes []rune) *Pattern {
var asString string
switch mode {
@@ -114,7 +114,7 @@ func BuildPattern(mode Mode, caseMode Case,
hasInvTerm: hasInvTerm,
nth: nth,
delimiter: delimiter,
procFun: make(map[termType]func(bool, *[]rune, []rune) (int, int))}
procFun: make(map[termType]func(bool, []rune, []rune) (int, int))}
ptr.procFun[termFuzzy] = algo.FuzzyMatch
ptr.procFun[termEqual] = algo.EqualMatch
@@ -305,27 +305,25 @@ func (p *Pattern) extendedMatch(item *Item) []Offset {
return offsets
}
func (p *Pattern) prepareInput(item *Item) *[]Token {
func (p *Pattern) prepareInput(item *Item) []Token {
if item.transformed != nil {
return item.transformed
}
var ret *[]Token
var ret []Token
if len(p.nth) > 0 {
tokens := Tokenize(item.text, p.delimiter)
ret = Transform(tokens, p.nth)
} else {
runes := []rune(*item.text)
trans := []Token{Token{text: &runes, prefixLength: 0}}
ret = &trans
ret = []Token{Token{text: item.text, prefixLength: 0}}
}
item.transformed = ret
return ret
}
func (p *Pattern) iter(pfun func(bool, *[]rune, []rune) (int, int),
tokens *[]Token, caseSensitive bool, pattern []rune) (int, int) {
for _, part := range *tokens {
func (p *Pattern) iter(pfun func(bool, []rune, []rune) (int, int),
tokens []Token, caseSensitive bool, pattern []rune) (int, int) {
for _, part := range tokens {
prefixLength := part.prefixLength
if sidx, eidx := pfun(caseSensitive, part.text, pattern); sidx >= 0 {
return sidx + prefixLength, eidx + prefixLength

View File

@@ -1,6 +1,7 @@
package fzf
import (
"reflect"
"testing"
"github.com/junegunn/fzf/src/algo"
@@ -58,9 +59,9 @@ func TestExact(t *testing.T) {
defer clearPatternCache()
clearPatternCache()
pattern := BuildPattern(ModeExtended, CaseSmart,
[]Range{}, nil, []rune("'abc"))
runes := []rune("aabbcc abc")
sidx, eidx := algo.ExactMatchNaive(pattern.caseSensitive, &runes, pattern.terms[0].text)
[]Range{}, Delimiter{}, []rune("'abc"))
sidx, eidx := algo.ExactMatchNaive(
pattern.caseSensitive, []rune("aabbcc abc"), pattern.terms[0].text)
if sidx != 7 || eidx != 10 {
t.Errorf("%s / %d / %d", pattern.terms, sidx, eidx)
}
@@ -69,11 +70,11 @@ func TestExact(t *testing.T) {
func TestEqual(t *testing.T) {
defer clearPatternCache()
clearPatternCache()
pattern := BuildPattern(ModeExtended, CaseSmart, []Range{}, nil, []rune("^AbC$"))
pattern := BuildPattern(ModeExtended, CaseSmart, []Range{}, Delimiter{}, []rune("^AbC$"))
match := func(str string, sidxExpected int, eidxExpected int) {
runes := []rune(str)
sidx, eidx := algo.EqualMatch(pattern.caseSensitive, &runes, pattern.terms[0].text)
sidx, eidx := algo.EqualMatch(
pattern.caseSensitive, []rune(str), pattern.terms[0].text)
if sidx != sidxExpected || eidx != eidxExpected {
t.Errorf("%s / %d / %d", pattern.terms, sidx, eidx)
}
@@ -85,17 +86,17 @@ func TestEqual(t *testing.T) {
func TestCaseSensitivity(t *testing.T) {
defer clearPatternCache()
clearPatternCache()
pat1 := BuildPattern(ModeFuzzy, CaseSmart, []Range{}, nil, []rune("abc"))
pat1 := BuildPattern(ModeFuzzy, CaseSmart, []Range{}, Delimiter{}, []rune("abc"))
clearPatternCache()
pat2 := BuildPattern(ModeFuzzy, CaseSmart, []Range{}, nil, []rune("Abc"))
pat2 := BuildPattern(ModeFuzzy, CaseSmart, []Range{}, Delimiter{}, []rune("Abc"))
clearPatternCache()
pat3 := BuildPattern(ModeFuzzy, CaseIgnore, []Range{}, nil, []rune("abc"))
pat3 := BuildPattern(ModeFuzzy, CaseIgnore, []Range{}, Delimiter{}, []rune("abc"))
clearPatternCache()
pat4 := BuildPattern(ModeFuzzy, CaseIgnore, []Range{}, nil, []rune("Abc"))
pat4 := BuildPattern(ModeFuzzy, CaseIgnore, []Range{}, Delimiter{}, []rune("Abc"))
clearPatternCache()
pat5 := BuildPattern(ModeFuzzy, CaseRespect, []Range{}, nil, []rune("abc"))
pat5 := BuildPattern(ModeFuzzy, CaseRespect, []Range{}, Delimiter{}, []rune("abc"))
clearPatternCache()
pat6 := BuildPattern(ModeFuzzy, CaseRespect, []Range{}, nil, []rune("Abc"))
pat6 := BuildPattern(ModeFuzzy, CaseRespect, []Range{}, Delimiter{}, []rune("Abc"))
if string(pat1.text) != "abc" || pat1.caseSensitive != false ||
string(pat2.text) != "Abc" || pat2.caseSensitive != true ||
@@ -108,25 +109,23 @@ func TestCaseSensitivity(t *testing.T) {
}
func TestOrigTextAndTransformed(t *testing.T) {
strptr := func(str string) *string {
return &str
}
pattern := BuildPattern(ModeExtended, CaseSmart, []Range{}, nil, []rune("jg"))
tokens := Tokenize(strptr("junegunn"), nil)
pattern := BuildPattern(ModeExtended, CaseSmart, []Range{}, Delimiter{}, []rune("jg"))
tokens := Tokenize([]rune("junegunn"), Delimiter{})
trans := Transform(tokens, []Range{Range{1, 1}})
origRunes := []rune("junegunn.choi")
for _, mode := range []Mode{ModeFuzzy, ModeExtended} {
chunk := Chunk{
&Item{
text: strptr("junegunn"),
origText: strptr("junegunn.choi"),
text: []rune("junegunn"),
origText: &origRunes,
transformed: trans},
}
pattern.mode = mode
matches := pattern.matchChunk(&chunk)
if *matches[0].text != "junegunn" || *matches[0].origText != "junegunn.choi" ||
if string(matches[0].text) != "junegunn" || string(*matches[0].origText) != "junegunn.choi" ||
matches[0].offsets[0][0] != 0 || matches[0].offsets[0][1] != 5 ||
matches[0].transformed != trans {
!reflect.DeepEqual(matches[0].transformed, trans) {
t.Error("Invalid match result", matches)
}
}

View File

@@ -11,7 +11,7 @@ import (
// Reader reads from command or standard input
type Reader struct {
pusher func(string) bool
pusher func([]byte) bool
eventBox *util.EventBox
delimNil bool
}
@@ -37,13 +37,14 @@ func (r *Reader) feed(src io.Reader) {
}
reader := bufio.NewReader(src)
for {
line, err := reader.ReadString(delim)
if line != "" {
// "ReadString returns err != nil if and only if the returned data does not end in delim."
// ReadBytes returns err != nil if and only if the returned data does not
// end in delim.
bytea, err := reader.ReadBytes(delim)
if len(bytea) > 0 {
if err == nil {
line = line[:len(line)-1]
bytea = bytea[:len(bytea)-1]
}
if r.pusher(line) {
if r.pusher(bytea) {
r.eventBox.Set(EvtReadNew, nil)
}
}

View File

@@ -10,7 +10,7 @@ func TestReadFromCommand(t *testing.T) {
strs := []string{}
eb := util.NewEventBox()
reader := Reader{
pusher: func(s string) bool { strs = append(strs, s); return true },
pusher: func(s []byte) bool { strs = append(strs, string(s)); return true },
eventBox: eb}
// Check EventBox

View File

@@ -106,7 +106,7 @@ const (
actCancel
actClearScreen
actDeleteChar
actDeleteCharEof
actDeleteCharEOF
actEndOfLine
actForwardChar
actForwardWord
@@ -141,7 +141,7 @@ func defaultKeymap() map[int]actionType {
keymap[C.CtrlG] = actAbort
keymap[C.CtrlQ] = actAbort
keymap[C.ESC] = actAbort
keymap[C.CtrlD] = actDeleteCharEof
keymap[C.CtrlD] = actDeleteCharEOF
keymap[C.CtrlE] = actEndOfLine
keymap[C.CtrlF] = actForwardChar
keymap[C.CtrlH] = actBackwardDeleteChar
@@ -436,15 +436,15 @@ func (t *Terminal) printHeader() {
}
line := idx + 2
if t.inlineInfo {
line -= 1
line--
}
if line >= max {
continue
}
trimmed, colors, newState := extractColor(&lineStr, state)
trimmed, colors, newState := extractColor(lineStr, state)
state = newState
item := &Item{
text: trimmed,
text: []rune(trimmed),
index: 0,
colors: colors,
rank: Rank{0, 0, 0}}
@@ -462,7 +462,7 @@ func (t *Terminal) printList() {
for i := 0; i < maxy; i++ {
line := i + 2 + len(t.header)
if t.inlineInfo {
line -= 1
line--
}
t.move(line, 0, true)
if i < count {
@@ -537,7 +537,8 @@ func (t *Terminal) printHighlighted(item *Item, bold bool, col1 int, col2 int, c
}
// Overflow
text := []rune(*item.text)
text := make([]rune, len(item.text))
copy(text, item.text)
offsets := item.colorOffsets(col2, bold, current)
maxWidth := C.MaxX() - 3 - t.marginInt[1] - t.marginInt[3]
fullWidth := displayWidth(text)
@@ -862,7 +863,7 @@ func (t *Terminal) Loop() {
req(reqQuit)
case actDeleteChar:
t.delChar()
case actDeleteCharEof:
case actDeleteCharEOF:
if !t.delChar() && t.cx == 0 {
req(reqQuit)
}
@@ -1013,7 +1014,7 @@ func (t *Terminal) Loop() {
}
min := 2 + len(t.header)
if t.inlineInfo {
min -= 1
min--
}
if me.Double {
// Double-click
@@ -1099,7 +1100,7 @@ func (t *Terminal) vset(o int) bool {
func (t *Terminal) maxItems() int {
max := t.maxHeight() - 2 - len(t.header)
if t.inlineInfo {
max += 1
max++
}
return util.Max(max, 0)
}

View File

@@ -18,10 +18,16 @@ type Range struct {
// Token contains the tokenized part of the strings and its prefix length
type Token struct {
text *[]rune
text []rune
prefixLength int
}
// Delimiter for tokenizing the input
type Delimiter struct {
regex *regexp.Regexp
str *string
}
func newRange(begin int, end int) Range {
if begin == 1 {
begin = rangeEllipsis
@@ -68,16 +74,15 @@ func ParseRange(str *string) (Range, bool) {
return newRange(n, n), true
}
func withPrefixLengths(tokens []string, begin int) []Token {
func withPrefixLengths(tokens [][]rune, begin int) []Token {
ret := make([]Token, len(tokens))
prefixLength := begin
for idx, token := range tokens {
// Need to define a new local variable instead of the reused token to take
// the pointer to it
runes := []rune(token)
ret[idx] = Token{text: &runes, prefixLength: prefixLength}
prefixLength += len([]rune(token))
ret[idx] = Token{text: token, prefixLength: prefixLength}
prefixLength += len(token)
}
return ret
}
@@ -88,13 +93,13 @@ const (
awkWhite
)
func awkTokenizer(input *string) ([]string, int) {
func awkTokenizer(input []rune) ([][]rune, int) {
// 9, 32
ret := []string{}
ret := [][]rune{}
str := []rune{}
prefixLength := 0
state := awkNil
for _, r := range []rune(*input) {
for _, r := range input {
white := r == 9 || r == 32
switch state {
case awkNil:
@@ -113,47 +118,69 @@ func awkTokenizer(input *string) ([]string, int) {
if white {
str = append(str, r)
} else {
ret = append(ret, string(str))
ret = append(ret, str)
state = awkBlack
str = []rune{r}
}
}
}
if len(str) > 0 {
ret = append(ret, string(str))
ret = append(ret, str)
}
return ret, prefixLength
}
// Tokenize tokenizes the given string with the delimiter
func Tokenize(str *string, delimiter *regexp.Regexp) []Token {
if delimiter == nil {
func Tokenize(runes []rune, delimiter Delimiter) []Token {
if delimiter.str == nil && delimiter.regex == nil {
// AWK-style (\S+\s*)
tokens, prefixLength := awkTokenizer(str)
tokens, prefixLength := awkTokenizer(runes)
return withPrefixLengths(tokens, prefixLength)
}
tokens := delimiter.FindAllString(*str, -1)
return withPrefixLengths(tokens, 0)
}
func joinTokens(tokens *[]Token) *string {
ret := ""
for _, token := range *tokens {
ret += string(*token.text)
var tokens []string
if delimiter.str != nil {
tokens = strings.Split(string(runes), *delimiter.str)
for i := 0; i < len(tokens)-1; i++ {
tokens[i] = tokens[i] + *delimiter.str
}
} else if delimiter.regex != nil {
str := string(runes)
for len(str) > 0 {
loc := delimiter.regex.FindStringIndex(str)
if loc == nil {
loc = []int{0, len(str)}
}
last := util.Max(loc[1], 1)
tokens = append(tokens, str[:last])
str = str[last:]
}
}
return &ret
asRunes := make([][]rune, len(tokens))
for i, token := range tokens {
asRunes[i] = []rune(token)
}
return withPrefixLengths(asRunes, 0)
}
func joinTokensAsRunes(tokens *[]Token) *[]rune {
func joinTokens(tokens []Token) []rune {
ret := []rune{}
for _, token := range *tokens {
ret = append(ret, *token.text...)
for _, token := range tokens {
ret = append(ret, token.text...)
}
return &ret
return ret
}
func joinTokensAsRunes(tokens []Token) []rune {
ret := []rune{}
for _, token := range tokens {
ret = append(ret, token.text...)
}
return ret
}
// Transform is used to transform the input when --with-nth option is given
func Transform(tokens []Token, withNth []Range) *[]Token {
func Transform(tokens []Token, withNth []Range) []Token {
transTokens := make([]Token, len(withNth))
numTokens := len(tokens)
for idx, r := range withNth {
@@ -162,14 +189,14 @@ func Transform(tokens []Token, withNth []Range) *[]Token {
if r.begin == r.end {
idx := r.begin
if idx == rangeEllipsis {
part = append(part, *joinTokensAsRunes(&tokens)...)
part = append(part, joinTokensAsRunes(tokens)...)
} else {
if idx < 0 {
idx += numTokens + 1
}
if idx >= 1 && idx <= numTokens {
minIdx = idx - 1
part = append(part, *tokens[idx-1].text...)
part = append(part, tokens[idx-1].text...)
}
}
} else {
@@ -196,7 +223,7 @@ func Transform(tokens []Token, withNth []Range) *[]Token {
minIdx = util.Max(0, begin-1)
for idx := begin; idx <= end; idx++ {
if idx >= 1 && idx <= numTokens {
part = append(part, *tokens[idx-1].text...)
part = append(part, tokens[idx-1].text...)
}
}
}
@@ -206,7 +233,7 @@ func Transform(tokens []Token, withNth []Range) *[]Token {
} else {
prefixLength = 0
}
transTokens[idx] = Token{&part, prefixLength}
transTokens[idx] = Token{part, prefixLength}
}
return &transTokens
return transTokens
}

View File

@@ -43,14 +43,23 @@ func TestParseRange(t *testing.T) {
func TestTokenize(t *testing.T) {
// AWK-style
input := " abc: def: ghi "
tokens := Tokenize(&input, nil)
if string(*tokens[0].text) != "abc: " || tokens[0].prefixLength != 2 {
tokens := Tokenize([]rune(input), Delimiter{})
if string(tokens[0].text) != "abc: " || tokens[0].prefixLength != 2 {
t.Errorf("%s", tokens)
}
// With delimiter
tokens = Tokenize(&input, delimiterRegexp(":"))
if string(*tokens[0].text) != " abc:" || tokens[0].prefixLength != 0 {
tokens = Tokenize([]rune(input), delimiterRegexp(":"))
if string(tokens[0].text) != " abc:" || tokens[0].prefixLength != 0 {
t.Errorf("%s", tokens)
}
// With delimiter regex
tokens = Tokenize([]rune(input), delimiterRegexp("\\s+"))
if string(tokens[0].text) != " " || tokens[0].prefixLength != 0 ||
string(tokens[1].text) != "abc: " || tokens[1].prefixLength != 2 ||
string(tokens[2].text) != "def: " || tokens[2].prefixLength != 8 ||
string(tokens[3].text) != "ghi " || tokens[3].prefixLength != 14 {
t.Errorf("%s", tokens)
}
}
@@ -58,39 +67,39 @@ func TestTokenize(t *testing.T) {
func TestTransform(t *testing.T) {
input := " abc: def: ghi: jkl"
{
tokens := Tokenize(&input, nil)
tokens := Tokenize([]rune(input), Delimiter{})
{
ranges := splitNth("1,2,3")
tx := Transform(tokens, ranges)
if *joinTokens(tx) != "abc: def: ghi: " {
t.Errorf("%s", *tx)
if string(joinTokens(tx)) != "abc: def: ghi: " {
t.Errorf("%s", tx)
}
}
{
ranges := splitNth("1..2,3,2..,1")
tx := Transform(tokens, ranges)
if *joinTokens(tx) != "abc: def: ghi: def: ghi: jklabc: " ||
len(*tx) != 4 ||
string(*(*tx)[0].text) != "abc: def: " || (*tx)[0].prefixLength != 2 ||
string(*(*tx)[1].text) != "ghi: " || (*tx)[1].prefixLength != 14 ||
string(*(*tx)[2].text) != "def: ghi: jkl" || (*tx)[2].prefixLength != 8 ||
string(*(*tx)[3].text) != "abc: " || (*tx)[3].prefixLength != 2 {
t.Errorf("%s", *tx)
if string(joinTokens(tx)) != "abc: def: ghi: def: ghi: jklabc: " ||
len(tx) != 4 ||
string(tx[0].text) != "abc: def: " || tx[0].prefixLength != 2 ||
string(tx[1].text) != "ghi: " || tx[1].prefixLength != 14 ||
string(tx[2].text) != "def: ghi: jkl" || tx[2].prefixLength != 8 ||
string(tx[3].text) != "abc: " || tx[3].prefixLength != 2 {
t.Errorf("%s", tx)
}
}
}
{
tokens := Tokenize(&input, delimiterRegexp(":"))
tokens := Tokenize([]rune(input), delimiterRegexp(":"))
{
ranges := splitNth("1..2,3,2..,1")
tx := Transform(tokens, ranges)
if *joinTokens(tx) != " abc: def: ghi: def: ghi: jkl abc:" ||
len(*tx) != 4 ||
string(*(*tx)[0].text) != " abc: def:" || (*tx)[0].prefixLength != 0 ||
string(*(*tx)[1].text) != " ghi:" || (*tx)[1].prefixLength != 12 ||
string(*(*tx)[2].text) != " def: ghi: jkl" || (*tx)[2].prefixLength != 6 ||
string(*(*tx)[3].text) != " abc:" || (*tx)[3].prefixLength != 0 {
t.Errorf("%s", *tx)
if string(joinTokens(tx)) != " abc: def: ghi: def: ghi: jkl abc:" ||
len(tx) != 4 ||
string(tx[0].text) != " abc: def:" || tx[0].prefixLength != 0 ||
string(tx[1].text) != " ghi:" || tx[1].prefixLength != 12 ||
string(tx[2].text) != " def: ghi: jkl" || tx[2].prefixLength != 6 ||
string(tx[3].text) != " abc:" || tx[3].prefixLength != 0 {
t.Errorf("%s", tx)
}
}
}

View File

@@ -6,6 +6,7 @@ import "C"
import (
"os"
"time"
"unicode/utf8"
)
// Max returns the largest integer
@@ -19,7 +20,7 @@ func Max(first int, items ...int) int {
return max
}
// Max32 returns the smallest 32-bit integer
// Min32 returns the smallest 32-bit integer
func Min32(first int32, second int32) int32 {
if first <= second {
return first
@@ -69,22 +70,33 @@ func DurWithin(
return val
}
func Between(val int, min int, max int) bool {
return val >= min && val <= max
}
// IsTty returns true is stdin is a terminal
func IsTty() bool {
return int(C.isatty(C.int(os.Stdin.Fd()))) != 0
}
func TrimRight(runes *[]rune) []rune {
func TrimRight(runes []rune) []rune {
var i int
for i = len(*runes) - 1; i >= 0; i-- {
char := (*runes)[i]
for i = len(runes) - 1; i >= 0; i-- {
char := runes[i]
if char != ' ' && char != '\t' {
break
}
}
return (*runes)[0 : i+1]
return runes[0 : i+1]
}
func BytesToRunes(bytea []byte) []rune {
runes := make([]rune, 0, len(bytea))
for i := 0; i < len(bytea); {
if bytea[i] < utf8.RuneSelf {
runes = append(runes, rune(bytea[i]))
i++
} else {
r, sz := utf8.DecodeRune(bytea[i:])
i += sz
runes = append(runes, r)
}
}
return runes
}

View File

@@ -501,6 +501,32 @@ class TestGoFZF < TestBase
assert_equal input, `cat #{tempname} | #{FZF} -f"!z" -x --tiebreak end`.split($/)
end
def test_tiebreak_length_with_nth
input = %w[
1:hell
123:hello
12345:he
1234567:h
]
writelines tempname, input
output = %w[
1:hell
12345:he
123:hello
1234567:h
]
assert_equal output, `cat #{tempname} | #{FZF} -fh`.split($/)
output = %w[
1234567:h
12345:he
1:hell
123:hello
]
assert_equal output, `cat #{tempname} | #{FZF} -fh -n2 -d:`.split($/)
end
def test_invalid_cache
tmux.send_keys "(echo d; echo D; echo x) | #{fzf '-q d'}", :Enter
tmux.until { |lines| lines[-2].include? '2/3' }
@@ -743,6 +769,11 @@ class TestGoFZF < TestBase
tmux.send_keys :Enter
end
def test_invalid_term
tmux.send_keys "TERM=xxx fzf", :Enter
tmux.until { |lines| lines.any? { |l| l.include? 'Invalid $TERM: xxx' } }
end
private
def writelines path, lines
File.unlink path while File.exists? path