mirror of
https://github.com/junegunn/fzf.git
synced 2025-07-31 20:22:01 -07:00
Compare commits
18 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
37f43fbb35 | ||
|
401a5fd5ff | ||
|
1854922f0c | ||
|
2fc7c18747 | ||
|
8ef2420677 | ||
|
cf6f4d74c4 | ||
|
f44d40f6b4 | ||
|
1c81a58127 | ||
|
9baf7c4874 | ||
|
22b089e47e | ||
|
b166f18220 | ||
|
68600f6ecf | ||
|
4d4447779f | ||
|
639de4c27b | ||
|
d87390934e | ||
|
411ec2e557 | ||
|
f025602841 | ||
|
f958c9daf5 |
@@ -1,6 +1,15 @@
|
||||
CHANGELOG
|
||||
=========
|
||||
|
||||
0.15.0
|
||||
------
|
||||
- Improved fuzzy search algorithm
|
||||
- Added `--algo=[v1|v2]` option so one can still choose the old algorithm
|
||||
which values the search performance over the quality of the result
|
||||
- Advanced scoring criteria
|
||||
- `--read0` to read input delimited by ASCII NUL character
|
||||
- `--print0` to print output delimited by ASCII NUL character
|
||||
|
||||
0.13.5
|
||||
------
|
||||
- Memory and performance optimization
|
||||
|
37
README.md
37
README.md
@@ -10,18 +10,15 @@ Pros
|
||||
|
||||
- No dependencies
|
||||
- Blazingly fast
|
||||
- e.g. `locate / | fzf`
|
||||
- Flexible layout
|
||||
- Runs in fullscreen or in horizontal/vertical split using tmux
|
||||
- The most comprehensive feature set
|
||||
- Try `fzf --help` and be surprised
|
||||
- Flexible layout using tmux panes
|
||||
- Batteries included
|
||||
- Vim/Neovim plugin, key bindings and fuzzy auto-completion
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
fzf project consists of the following:
|
||||
fzf project consists of the following components:
|
||||
|
||||
- `fzf` executable
|
||||
- `fzf-tmux` script for launching fzf in a tmux pane
|
||||
@@ -30,12 +27,12 @@ fzf project consists of the following:
|
||||
- Fuzzy auto-completion (bash, zsh)
|
||||
- Vim/Neovim plugin
|
||||
|
||||
You can [download fzf executable][bin] alone, but it's recommended that you
|
||||
install the extra stuff using the attached install script.
|
||||
You can [download fzf executable][bin] alone if you don't need the extra
|
||||
stuff.
|
||||
|
||||
[bin]: https://github.com/junegunn/fzf-bin/releases
|
||||
|
||||
#### Using git (recommended)
|
||||
### Using git
|
||||
|
||||
Clone this repository and run
|
||||
[install](https://github.com/junegunn/fzf/blob/master/install) script.
|
||||
@@ -45,7 +42,7 @@ git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
|
||||
~/.fzf/install
|
||||
```
|
||||
|
||||
#### Using Homebrew
|
||||
### Using Homebrew
|
||||
|
||||
On OS X, you can use [Homebrew](http://brew.sh/) to install fzf.
|
||||
|
||||
@@ -56,26 +53,30 @@ brew install fzf
|
||||
/usr/local/opt/fzf/install
|
||||
```
|
||||
|
||||
#### Install as Vim plugin
|
||||
### Vim plugin
|
||||
|
||||
Once you have cloned the repository, add the following line to your .vimrc.
|
||||
You can manually add the directory to `&runtimepath` as follows,
|
||||
|
||||
```vim
|
||||
" If installed using git
|
||||
set rtp+=~/.fzf
|
||||
|
||||
" If installed using Homebrew
|
||||
set rtp+=/usr/local/opt/fzf
|
||||
```
|
||||
|
||||
Or you can have [vim-plug](https://github.com/junegunn/vim-plug) manage fzf
|
||||
(recommended):
|
||||
But it's recommended that you use a plugin manager like
|
||||
[vim-plug](https://github.com/junegunn/vim-plug).
|
||||
|
||||
```vim
|
||||
Plug 'junegunn/fzf', { 'dir': '~/.fzf', 'do': './install --all' }
|
||||
```
|
||||
|
||||
#### Upgrading fzf
|
||||
### Upgrading fzf
|
||||
|
||||
fzf is being actively developed and you might want to upgrade it once in a
|
||||
while. Please follow the instruction below depending on the installation
|
||||
method.
|
||||
method used.
|
||||
|
||||
- git: `cd ~/.fzf && git pull && ./install`
|
||||
- brew: `brew update; brew reinstall fzf`
|
||||
@@ -390,6 +391,12 @@ fzf
|
||||
export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
|
||||
```
|
||||
|
||||
If you don't want to exclude hidden files, use the following command:
|
||||
|
||||
```sh
|
||||
export FZF_DEFAULT_COMMAND='ag --hidden --ignore .git -g ""'
|
||||
```
|
||||
|
||||
#### `git ls-tree` for fast traversal
|
||||
|
||||
If you're running fzf in a large git repository, `git ls-tree` can boost up the
|
||||
|
@@ -161,14 +161,14 @@ done
|
||||
|
||||
if [[ -n "$term" ]] || [[ -t 0 ]]; then
|
||||
cat <<< "\"$fzf\" $opts > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
||||
tmux set-window-option synchronize-panes off \;\
|
||||
TMUX=$(echo $TMUX | cut -d , -f 1,2) tmux set-window-option synchronize-panes off \;\
|
||||
set-window-option remain-on-exit off \;\
|
||||
split-window $opt "cd $(printf %q "$PWD");$envs bash $argsf" $swap \
|
||||
> /dev/null 2>&1
|
||||
else
|
||||
mkfifo $fifo1
|
||||
cat <<< "\"$fzf\" $opts < $fifo1 > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
||||
tmux set-window-option synchronize-panes off \;\
|
||||
TMUX=$(echo $TMUX | cut -d , -f 1,2) tmux set-window-option synchronize-panes off \;\
|
||||
set-window-option remain-on-exit off \;\
|
||||
split-window $opt "$envs bash $argsf" $swap \
|
||||
> /dev/null 2>&1
|
||||
|
4
install
4
install
@@ -2,8 +2,8 @@
|
||||
|
||||
set -u
|
||||
|
||||
[[ "$@" =~ --pre ]] && version=0.13.5 pre=1 ||
|
||||
version=0.13.5 pre=0
|
||||
[[ "$@" =~ --pre ]] && version=0.15.0 pre=1 ||
|
||||
version=0.15.0 pre=0
|
||||
|
||||
auto_completion=
|
||||
key_bindings=
|
||||
|
@@ -21,7 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
..
|
||||
.TH fzf-tmux 1 "Aug 2016" "fzf 0.13.5" "fzf-tmux - open fzf in tmux split pane"
|
||||
.TH fzf-tmux 1 "Sep 2016" "fzf 0.15.0" "fzf-tmux - open fzf in tmux split pane"
|
||||
|
||||
.SH NAME
|
||||
fzf-tmux - open fzf in tmux split pane
|
||||
|
@@ -21,7 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
..
|
||||
.TH fzf 1 "Aug 2016" "fzf 0.13.5" "fzf - a command-line fuzzy finder"
|
||||
.TH fzf 1 "Sep 2016" "fzf 0.15.0" "fzf - a command-line fuzzy finder"
|
||||
|
||||
.SH NAME
|
||||
fzf - a command-line fuzzy finder
|
||||
@@ -47,6 +47,16 @@ Case-insensitive match (default: smart-case match)
|
||||
.TP
|
||||
.B "+i"
|
||||
Case-sensitive match
|
||||
.TP
|
||||
.BI "--algo=" TYPE
|
||||
Fuzzy matching algorithm (default: v2)
|
||||
|
||||
.br
|
||||
.BR v2 " Optimal scoring algorithm (quality)"
|
||||
.br
|
||||
.BR v1 " Faster but not guaranteed to find the optimal result (performance)"
|
||||
.br
|
||||
|
||||
.TP
|
||||
.BI "-n, --nth=" "N[,..]"
|
||||
Comma-separated list of field index expressions for limiting search scope.
|
||||
@@ -275,6 +285,12 @@ with the default enter key.
|
||||
e.g. \fBfzf --expect=ctrl-v,ctrl-t,alt-s,f1,f2,~,@\fR
|
||||
.RE
|
||||
.TP
|
||||
.B "--read0"
|
||||
Read input delimited by ASCII NUL character instead of newline character
|
||||
.TP
|
||||
.B "--print0"
|
||||
Print output delimited by ASCII NUL character instead of newline character
|
||||
.TP
|
||||
.B "--sync"
|
||||
Synchronous search for multi-staged filtering. If specified, fzf will launch
|
||||
ncurses finder only after the input stream is complete.
|
||||
|
@@ -276,10 +276,10 @@ function! s:fzf_tmux(dict)
|
||||
if s:present(a:dict, o)
|
||||
let spec = a:dict[o]
|
||||
if (o == 'up' || o == 'down') && spec[0] == '~'
|
||||
let size = '-'.o[0].s:calc_size(&lines, spec[1:], a:dict)
|
||||
let size = '-'.o[0].s:calc_size(&lines, spec, a:dict)
|
||||
else
|
||||
" Legacy boolean option
|
||||
let size = '-'.o[0].(spec == 1 ? '' : spec)
|
||||
let size = '-'.o[0].(spec == 1 ? '' : substitute(spec, '^\~', '', ''))
|
||||
endif
|
||||
break
|
||||
endif
|
||||
@@ -375,10 +375,11 @@ function! s:execute_tmux(dict, command, temps) abort
|
||||
endfunction
|
||||
|
||||
function! s:calc_size(max, val, dict)
|
||||
if a:val =~ '%$'
|
||||
let size = a:max * str2nr(a:val[:-2]) / 100
|
||||
let val = substitute(a:val, '^\~', '', '')
|
||||
if val =~ '%$'
|
||||
let size = a:max * str2nr(val[:-2]) / 100
|
||||
else
|
||||
let size = min([a:max, str2nr(a:val)])
|
||||
let size = min([a:max, str2nr(val)])
|
||||
endif
|
||||
|
||||
let srcsz = -1
|
||||
@@ -409,7 +410,7 @@ function! s:split(dict)
|
||||
if !empty(val)
|
||||
let [cmd, resz, max] = triple
|
||||
if (dir == 'up' || dir == 'down') && val[0] == '~'
|
||||
let sz = s:calc_size(max, val[1:], a:dict)
|
||||
let sz = s:calc_size(max, val, a:dict)
|
||||
else
|
||||
let sz = s:calc_size(max, val, {})
|
||||
endif
|
||||
@@ -430,9 +431,11 @@ function! s:split(dict)
|
||||
endfunction
|
||||
|
||||
function! s:execute_term(dict, command, temps) abort
|
||||
let winrest = winrestcmd()
|
||||
let [ppos, winopts] = s:split(a:dict)
|
||||
let fzf = { 'buf': bufnr('%'), 'ppos': ppos, 'dict': a:dict, 'temps': a:temps,
|
||||
\ 'winopts': winopts, 'command': a:command }
|
||||
\ 'winopts': winopts, 'winrest': winrest, 'lines': &lines,
|
||||
\ 'columns': &columns, 'command': a:command }
|
||||
function! fzf.switch_back(inplace)
|
||||
if a:inplace && bufnr('') == self.buf
|
||||
" FIXME: Can't re-enter normal mode from terminal mode
|
||||
@@ -464,6 +467,10 @@ function! s:execute_term(dict, command, temps) abort
|
||||
execute 'bd!' self.buf
|
||||
endif
|
||||
|
||||
if &lines == self.lines && &columns == self.columns && s:getpos() == self.ppos
|
||||
execute self.winrest
|
||||
endif
|
||||
|
||||
if !s:exit_handler(a:code, self.command, 1)
|
||||
return
|
||||
endif
|
||||
|
@@ -186,7 +186,7 @@ fzf-completion() {
|
||||
|
||||
[ -z "$fzf_default_completion" ] && {
|
||||
binding=$(bindkey '^I')
|
||||
[[ $binding =~ 'undefined-key' ]] || fzf_default_completion=$binding[(w)2]
|
||||
[[ $binding =~ 'undefined-key' ]] || fzf_default_completion=$binding[(s: :w)2]
|
||||
unset binding
|
||||
}
|
||||
|
||||
|
@@ -47,33 +47,6 @@ proportional to the number of CPU cores. On my MacBook Pro (Mid 2012), the new
|
||||
version was shown to be an order of magnitude faster on certain cases. It also
|
||||
starts much faster though the difference may not be noticeable.
|
||||
|
||||
Differences with Ruby version
|
||||
-----------------------------
|
||||
|
||||
The Go version is designed to be perfectly compatible with the previous Ruby
|
||||
version. The only behavioral difference is that the new version ignores the
|
||||
numeric argument to `--sort=N` option and always sorts the result regardless
|
||||
of the number of matches. The value was introduced to limit the response time
|
||||
of the query, but the Go version is blazingly fast (almost instant response
|
||||
even for 1M+ items) so I decided that it's no longer required.
|
||||
|
||||
System requirements
|
||||
-------------------
|
||||
|
||||
Currently, prebuilt binaries are provided only for OS X and Linux. The install
|
||||
script will fall back to the legacy Ruby version on the other systems, but if
|
||||
you have Go 1.4 installed, you can try building it yourself.
|
||||
|
||||
However, as pointed out in [golang.org/doc/install][req], the Go version may
|
||||
not run on CentOS/RHEL 5.x, and if that's the case, the install script will
|
||||
choose the Ruby version instead.
|
||||
|
||||
The Go version depends on [ncurses][ncurses] and some Unix system calls, so it
|
||||
shouldn't run natively on Windows at the moment. But it won't be impossible to
|
||||
support Windows by falling back to a cross-platform alternative such as
|
||||
[termbox][termbox] only on Windows. If you're interested in making fzf work on
|
||||
Windows, please let me know.
|
||||
|
||||
Build
|
||||
-----
|
||||
|
||||
@@ -88,16 +61,22 @@ make install
|
||||
make linux
|
||||
```
|
||||
|
||||
Contribution
|
||||
------------
|
||||
Test
|
||||
----
|
||||
|
||||
For the time being, I will not add or accept any new features until we can be
|
||||
sure that the implementation is stable and we have a sufficient number of test
|
||||
cases. However, fixes for obvious bugs and new test cases are welcome.
|
||||
Unit tests can be run with `make test`. Integration tests are written in Ruby
|
||||
script that should be run on tmux.
|
||||
|
||||
I also care much about the performance of the implementation, so please make
|
||||
sure that your change does not result in performance regression. And please be
|
||||
noted that we don't have a quantitative measure of the performance yet.
|
||||
```sh
|
||||
# Unit tests
|
||||
make test
|
||||
|
||||
# Install the executable to ../bin directory
|
||||
make install
|
||||
|
||||
# Integration tests
|
||||
ruby ../test/test_go.rb
|
||||
```
|
||||
|
||||
Third-party libraries used
|
||||
--------------------------
|
||||
|
655
src/algo/algo.go
655
src/algo/algo.go
@@ -1,19 +1,91 @@
|
||||
package algo
|
||||
|
||||
/*
|
||||
|
||||
Algorithm
|
||||
---------
|
||||
|
||||
FuzzyMatchV1 finds the first "fuzzy" occurrence of the pattern within the given
|
||||
text in O(n) time where n is the length of the text. Once the position of the
|
||||
last character is located, it traverses backwards to see if there's a shorter
|
||||
substring that matches the pattern.
|
||||
|
||||
a_____b___abc__ To find "abc"
|
||||
*-----*-----*> 1. Forward scan
|
||||
<*** 2. Backward scan
|
||||
|
||||
The algorithm is simple and fast, but as it only sees the first occurrence,
|
||||
it is not guaranteed to find the occurrence with the highest score.
|
||||
|
||||
a_____b__c__abc
|
||||
*-----*--* ***
|
||||
|
||||
FuzzyMatchV2 implements a modified version of Smith-Waterman algorithm to find
|
||||
the optimal solution (highest score) according to the scoring criteria. Unlike
|
||||
the original algorithm, omission or mismatch of a character in the pattern is
|
||||
not allowed.
|
||||
|
||||
Performance
|
||||
-----------
|
||||
|
||||
The new V2 algorithm is slower than V1 as it examines all occurrences of the
|
||||
pattern instead of stopping immediately after finding the first one. The time
|
||||
complexity of the algorithm is O(nm) if a match is found and O(n) otherwise
|
||||
where n is the length of the item and m is the length of the pattern. Thus, the
|
||||
performance overhead may not be noticeable for a query with high selectivity.
|
||||
However, if the performance is more important than the quality of the result,
|
||||
you can still choose v1 algorithm with --algo=v1.
|
||||
|
||||
Scoring criteria
|
||||
----------------
|
||||
|
||||
- We prefer matches at special positions, such as the start of a word, or
|
||||
uppercase character in camelCase words.
|
||||
|
||||
- That is, we prefer an occurrence of the pattern with more characters
|
||||
matching at special positions, even if the total match length is longer.
|
||||
e.g. "fuzzyfinder" vs. "fuzzy-finder" on "ff"
|
||||
````````````
|
||||
- Also, if the first character in the pattern appears at one of the special
|
||||
positions, the bonus point for the position is multiplied by a constant
|
||||
as it is extremely likely that the first character in the typed pattern
|
||||
has more significance than the rest.
|
||||
e.g. "fo-bar" vs. "foob-r" on "br"
|
||||
``````
|
||||
- But since fzf is still a fuzzy finder, not an acronym finder, we should also
|
||||
consider the total length of the matched substring. This is why we have the
|
||||
gap penalty. The gap penalty increases as the length of the gap (distance
|
||||
between the matching characters) increases, so the effect of the bonus is
|
||||
eventually cancelled at some point.
|
||||
e.g. "fuzzyfinder" vs. "fuzzy-blurry-finder" on "ff"
|
||||
```````````
|
||||
- Consequently, it is crucial to find the right balance between the bonus
|
||||
and the gap penalty. The parameters were chosen that the bonus is cancelled
|
||||
when the gap size increases beyond 8 characters.
|
||||
|
||||
- The bonus mechanism can have the undesirable side effect where consecutive
|
||||
matches are ranked lower than the ones with gaps.
|
||||
e.g. "foobar" vs. "foo-bar" on "foob"
|
||||
```````
|
||||
- To correct this anomaly, we also give extra bonus point to each character
|
||||
in a consecutive matching chunk.
|
||||
e.g. "foobar" vs. "foo-bar" on "foob"
|
||||
``````
|
||||
- The amount of consecutive bonus is primarily determined by the bonus of the
|
||||
first character in the chunk.
|
||||
e.g. "foobar" vs. "out-of-bound" on "oob"
|
||||
````````````
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/junegunn/fzf/src/util"
|
||||
)
|
||||
|
||||
/*
|
||||
* String matching algorithms here do not use strings.ToLower to avoid
|
||||
* performance penalty. And they assume pattern runes are given in lowercase
|
||||
* letters when caseSensitive is false.
|
||||
*
|
||||
* In short: They try to do as little work as possible.
|
||||
*/
|
||||
var DEBUG bool
|
||||
|
||||
func indexAt(index int, max int, forward bool) int {
|
||||
if forward {
|
||||
@@ -22,16 +94,50 @@ func indexAt(index int, max int, forward bool) int {
|
||||
return max - index - 1
|
||||
}
|
||||
|
||||
// Result conatins the results of running a match function.
|
||||
// Result contains the results of running a match function.
|
||||
type Result struct {
|
||||
// TODO int32 should suffice
|
||||
Start int
|
||||
End int
|
||||
|
||||
// Items are basically sorted by the lengths of matched substrings.
|
||||
// But we slightly adjust the score with bonus for better results.
|
||||
Bonus int
|
||||
Score int
|
||||
}
|
||||
|
||||
const (
|
||||
scoreMatch = 16
|
||||
scoreGapStart = -3
|
||||
scoreGapExtention = -1
|
||||
|
||||
// We prefer matches at the beginning of a word, but the bonus should not be
|
||||
// too great to prevent the longer acronym matches from always winning over
|
||||
// shorter fuzzy matches. The bonus point here was specifically chosen that
|
||||
// the bonus is cancelled when the gap between the acronyms grows over
|
||||
// 8 characters, which is approximately the average length of the words found
|
||||
// in web2 dictionary and my file system.
|
||||
bonusBoundary = scoreMatch / 2
|
||||
|
||||
// Although bonus point for non-word characters is non-contextual, we need it
|
||||
// for computing bonus points for consecutive chunks starting with a non-word
|
||||
// character.
|
||||
bonusNonWord = scoreMatch / 2
|
||||
|
||||
// Edge-triggered bonus for matches in camelCase words.
|
||||
// Compared to word-boundary case, they don't accompany single-character gaps
|
||||
// (e.g. FooBar vs. foo-bar), so we deduct bonus point accordingly.
|
||||
bonusCamel123 = bonusBoundary + scoreGapExtention
|
||||
|
||||
// Minimum bonus point given to characters in consecutive chunks.
|
||||
// Note that bonus points for consecutive matches shouldn't have needed if we
|
||||
// used fixed match score as in the original algorithm.
|
||||
bonusConsecutive = -(scoreGapStart + scoreGapExtention)
|
||||
|
||||
// The first character in the typed pattern usually has more significance
|
||||
// than the rest so it's important that it appears at special positions where
|
||||
// bonus points are given. e.g. "to-go" vs. "ongoing" on "og" or on "ogo".
|
||||
// The amount of the extra bonus should be limited so that the gap penalty is
|
||||
// still respected.
|
||||
bonusFirstCharMultiplier = 2
|
||||
)
|
||||
|
||||
type charClass int
|
||||
|
||||
const (
|
||||
@@ -42,85 +148,350 @@ const (
|
||||
charNumber
|
||||
)
|
||||
|
||||
func evaluateBonus(caseSensitive bool, text util.Chars, pattern []rune, sidx int, eidx int) int {
|
||||
var bonus int
|
||||
pidx := 0
|
||||
lenPattern := len(pattern)
|
||||
consecutive := false
|
||||
prevClass := charNonWord
|
||||
for index := util.Max(0, sidx-1); index < eidx; index++ {
|
||||
char := text.Get(index)
|
||||
var class charClass
|
||||
if unicode.IsLower(char) {
|
||||
class = charLower
|
||||
} else if unicode.IsUpper(char) {
|
||||
class = charUpper
|
||||
} else if unicode.IsLetter(char) {
|
||||
class = charLetter
|
||||
} else if unicode.IsNumber(char) {
|
||||
class = charNumber
|
||||
} else {
|
||||
class = charNonWord
|
||||
}
|
||||
|
||||
var point int
|
||||
if prevClass == charNonWord && class != charNonWord {
|
||||
// Word boundary
|
||||
point = 2
|
||||
} else if prevClass == charLower && class == charUpper ||
|
||||
prevClass != charNumber && class == charNumber {
|
||||
// camelCase letter123
|
||||
point = 1
|
||||
}
|
||||
prevClass = class
|
||||
|
||||
if index >= sidx {
|
||||
if !caseSensitive {
|
||||
if char >= 'A' && char <= 'Z' {
|
||||
char += 32
|
||||
} else if char > unicode.MaxASCII {
|
||||
char = unicode.To(unicode.LowerCase, char)
|
||||
}
|
||||
}
|
||||
pchar := pattern[pidx]
|
||||
if pchar == char {
|
||||
// Boost bonus for the first character in the pattern
|
||||
if pidx == 0 {
|
||||
point *= 2
|
||||
}
|
||||
// Bonus to consecutive matching chars
|
||||
if consecutive {
|
||||
point++
|
||||
}
|
||||
bonus += point
|
||||
|
||||
if pidx++; pidx == lenPattern {
|
||||
break
|
||||
}
|
||||
consecutive = true
|
||||
} else {
|
||||
consecutive = false
|
||||
}
|
||||
}
|
||||
func posArray(withPos bool, len int) *[]int {
|
||||
if withPos {
|
||||
pos := make([]int, 0, len)
|
||||
return &pos
|
||||
}
|
||||
return bonus
|
||||
return nil
|
||||
}
|
||||
|
||||
// FuzzyMatch performs fuzzy-match
|
||||
func FuzzyMatch(caseSensitive bool, forward bool, text util.Chars, pattern []rune) Result {
|
||||
if len(pattern) == 0 {
|
||||
return Result{0, 0, 0}
|
||||
func alloc16(offset int, slab *util.Slab, size int, clear bool) (int, []int16) {
|
||||
if slab != nil && cap(slab.I16) > offset+size {
|
||||
slice := slab.I16[offset : offset+size]
|
||||
if clear {
|
||||
for idx := range slice {
|
||||
slice[idx] = 0
|
||||
}
|
||||
}
|
||||
return offset + size, slice
|
||||
}
|
||||
return offset, make([]int16, size)
|
||||
}
|
||||
|
||||
func alloc32(offset int, slab *util.Slab, size int, clear bool) (int, []int32) {
|
||||
if slab != nil && cap(slab.I32) > offset+size {
|
||||
slice := slab.I32[offset : offset+size]
|
||||
if clear {
|
||||
for idx := range slice {
|
||||
slice[idx] = 0
|
||||
}
|
||||
}
|
||||
return offset + size, slice
|
||||
}
|
||||
return offset, make([]int32, size)
|
||||
}
|
||||
|
||||
func charClassOfAscii(char rune) charClass {
|
||||
if char >= 'a' && char <= 'z' {
|
||||
return charLower
|
||||
} else if char >= 'A' && char <= 'Z' {
|
||||
return charUpper
|
||||
} else if char >= '0' && char <= '9' {
|
||||
return charNumber
|
||||
}
|
||||
return charNonWord
|
||||
}
|
||||
|
||||
func charClassOfNonAscii(char rune) charClass {
|
||||
if unicode.IsLower(char) {
|
||||
return charLower
|
||||
} else if unicode.IsUpper(char) {
|
||||
return charUpper
|
||||
} else if unicode.IsNumber(char) {
|
||||
return charNumber
|
||||
} else if unicode.IsLetter(char) {
|
||||
return charLetter
|
||||
}
|
||||
return charNonWord
|
||||
}
|
||||
|
||||
func charClassOf(char rune) charClass {
|
||||
if char <= unicode.MaxASCII {
|
||||
return charClassOfAscii(char)
|
||||
}
|
||||
return charClassOfNonAscii(char)
|
||||
}
|
||||
|
||||
func bonusFor(prevClass charClass, class charClass) int16 {
|
||||
if prevClass == charNonWord && class != charNonWord {
|
||||
// Word boundary
|
||||
return bonusBoundary
|
||||
} else if prevClass == charLower && class == charUpper ||
|
||||
prevClass != charNumber && class == charNumber {
|
||||
// camelCase letter123
|
||||
return bonusCamel123
|
||||
} else if class == charNonWord {
|
||||
return bonusNonWord
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func bonusAt(input util.Chars, idx int) int16 {
|
||||
if idx == 0 {
|
||||
return bonusBoundary
|
||||
}
|
||||
return bonusFor(charClassOf(input.Get(idx-1)), charClassOf(input.Get(idx)))
|
||||
}
|
||||
|
||||
type Algo func(caseSensitive bool, forward bool, input util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int)
|
||||
|
||||
func FuzzyMatchV2(caseSensitive bool, forward bool, input util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
// Assume that pattern is given in lowercase if case-insensitive.
|
||||
// First check if there's a match and calculate bonus for each position.
|
||||
// If the input string is too long, consider finding the matching chars in
|
||||
// this phase as well (non-optimal alignment).
|
||||
N := input.Length()
|
||||
M := len(pattern)
|
||||
switch M {
|
||||
case 0:
|
||||
return Result{0, 0, 0}, posArray(withPos, M)
|
||||
case 1:
|
||||
return ExactMatchNaive(caseSensitive, forward, input, pattern[0:1], withPos, slab)
|
||||
}
|
||||
|
||||
// Since O(nm) algorithm can be prohibitively expensive for large input,
|
||||
// we fall back to the greedy algorithm.
|
||||
if slab != nil && N*M > cap(slab.I16) {
|
||||
return FuzzyMatchV1(caseSensitive, forward, input, pattern, withPos, slab)
|
||||
}
|
||||
|
||||
// Reuse pre-allocated integer slice to avoid unnecessary sweeping of garbages
|
||||
offset := 0
|
||||
// Bonus point for each position
|
||||
offset, B := alloc16(offset, slab, N, false)
|
||||
// The first occurrence of each character in the pattern
|
||||
offset, F := alloc16(offset, slab, M, false)
|
||||
// Rune array
|
||||
_, T := alloc32(0, slab, N, false)
|
||||
|
||||
// Phase 1. Check if there's a match and calculate bonus for each point
|
||||
pidx, lastIdx, prevClass := 0, 0, charNonWord
|
||||
for idx := 0; idx < N; idx++ {
|
||||
char := input.Get(idx)
|
||||
var class charClass
|
||||
if char <= unicode.MaxASCII {
|
||||
class = charClassOfAscii(char)
|
||||
} else {
|
||||
class = charClassOfNonAscii(char)
|
||||
}
|
||||
|
||||
if !caseSensitive && class == charUpper {
|
||||
if char <= unicode.MaxASCII {
|
||||
char += 32
|
||||
} else {
|
||||
char = unicode.To(unicode.LowerCase, char)
|
||||
}
|
||||
}
|
||||
|
||||
T[idx] = char
|
||||
B[idx] = bonusFor(prevClass, class)
|
||||
prevClass = class
|
||||
|
||||
if pidx < M {
|
||||
if char == pattern[pidx] {
|
||||
lastIdx = idx
|
||||
F[pidx] = int16(idx)
|
||||
pidx++
|
||||
}
|
||||
} else {
|
||||
if char == pattern[M-1] {
|
||||
lastIdx = idx
|
||||
}
|
||||
}
|
||||
}
|
||||
if pidx != M {
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
|
||||
// Phase 2. Fill in score matrix (H)
|
||||
// Unlike the original algorithm, we do not allow omission.
|
||||
width := lastIdx - int(F[0]) + 1
|
||||
offset, H := alloc16(offset, slab, width*M, false)
|
||||
|
||||
// Possible length of consecutive chunk at each position.
|
||||
offset, C := alloc16(offset, slab, width*M, false)
|
||||
|
||||
maxScore, maxScorePos := int16(0), 0
|
||||
for i := 0; i < M; i++ {
|
||||
I := i * width
|
||||
inGap := false
|
||||
for j := int(F[i]); j <= lastIdx; j++ {
|
||||
j0 := j - int(F[0])
|
||||
var s1, s2, consecutive int16
|
||||
|
||||
if j > int(F[i]) {
|
||||
if inGap {
|
||||
s2 = H[I+j0-1] + scoreGapExtention
|
||||
} else {
|
||||
s2 = H[I+j0-1] + scoreGapStart
|
||||
}
|
||||
}
|
||||
|
||||
if pattern[i] == T[j] {
|
||||
var diag int16
|
||||
if i > 0 && j0 > 0 {
|
||||
diag = H[I-width+j0-1]
|
||||
}
|
||||
s1 = diag + scoreMatch
|
||||
b := B[j]
|
||||
if i > 0 {
|
||||
// j > 0 if i > 0
|
||||
consecutive = C[I-width+j0-1] + 1
|
||||
// Break consecutive chunk
|
||||
if b == bonusBoundary {
|
||||
consecutive = 1
|
||||
} else if consecutive > 1 {
|
||||
b = util.Max16(b, util.Max16(bonusConsecutive, B[j-int(consecutive)+1]))
|
||||
}
|
||||
} else {
|
||||
consecutive = 1
|
||||
b *= bonusFirstCharMultiplier
|
||||
}
|
||||
if s1+b < s2 {
|
||||
s1 += B[j]
|
||||
consecutive = 0
|
||||
} else {
|
||||
s1 += b
|
||||
}
|
||||
}
|
||||
C[I+j0] = consecutive
|
||||
|
||||
inGap = s1 < s2
|
||||
score := util.Max16(util.Max16(s1, s2), 0)
|
||||
if i == M-1 && (forward && score > maxScore || !forward && score >= maxScore) {
|
||||
maxScore, maxScorePos = score, j
|
||||
}
|
||||
H[I+j0] = score
|
||||
}
|
||||
|
||||
if DEBUG {
|
||||
if i == 0 {
|
||||
fmt.Print(" ")
|
||||
for j := int(F[i]); j <= lastIdx; j++ {
|
||||
fmt.Printf(" " + string(input.Get(j)) + " ")
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Print(string(pattern[i]) + " ")
|
||||
for idx := int(F[0]); idx < int(F[i]); idx++ {
|
||||
fmt.Print(" 0 ")
|
||||
}
|
||||
for idx := int(F[i]); idx <= lastIdx; idx++ {
|
||||
fmt.Printf("%2d ", H[i*width+idx-int(F[0])])
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
fmt.Print(" ")
|
||||
for idx, p := range C[I : I+width] {
|
||||
if idx+int(F[0]) < int(F[i]) {
|
||||
p = 0
|
||||
}
|
||||
fmt.Printf("%2d ", p)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 3. (Optional) Backtrace to find character positions
|
||||
pos := posArray(withPos, M)
|
||||
j := int(F[0])
|
||||
if withPos {
|
||||
i := M - 1
|
||||
j = maxScorePos
|
||||
preferMatch := true
|
||||
for {
|
||||
I := i * width
|
||||
j0 := j - int(F[0])
|
||||
s := H[I+j0]
|
||||
|
||||
var s1, s2 int16
|
||||
if i > 0 && j >= int(F[i]) {
|
||||
s1 = H[I-width+j0-1]
|
||||
}
|
||||
if j > int(F[i]) {
|
||||
s2 = H[I+j0-1]
|
||||
}
|
||||
|
||||
if s > s1 && (s > s2 || s == s2 && preferMatch) {
|
||||
*pos = append(*pos, j)
|
||||
if i == 0 {
|
||||
break
|
||||
}
|
||||
i--
|
||||
}
|
||||
preferMatch = C[I+j0] > 1 || I+width+j0+1 < len(C) && C[I+width+j0+1] > 0
|
||||
j--
|
||||
}
|
||||
}
|
||||
// Start offset we return here is only relevant when begin tiebreak is used.
|
||||
// However finding the accurate offset requires backtracking, and we don't
|
||||
// want to pay extra cost for the option that has lost its importance.
|
||||
return Result{j, maxScorePos + 1, int(maxScore)}, pos
|
||||
}
|
||||
|
||||
// Implement the same sorting criteria as V2
|
||||
func calculateScore(caseSensitive bool, text util.Chars, pattern []rune, sidx int, eidx int, withPos bool) (int, *[]int) {
|
||||
pidx, score, inGap, consecutive, firstBonus := 0, 0, false, 0, int16(0)
|
||||
pos := posArray(withPos, len(pattern))
|
||||
prevClass := charNonWord
|
||||
if sidx > 0 {
|
||||
prevClass = charClassOf(text.Get(sidx - 1))
|
||||
}
|
||||
for idx := sidx; idx < eidx; idx++ {
|
||||
char := text.Get(idx)
|
||||
class := charClassOf(char)
|
||||
if !caseSensitive {
|
||||
if char >= 'A' && char <= 'Z' {
|
||||
char += 32
|
||||
} else if char > unicode.MaxASCII {
|
||||
char = unicode.To(unicode.LowerCase, char)
|
||||
}
|
||||
}
|
||||
if char == pattern[pidx] {
|
||||
if withPos {
|
||||
*pos = append(*pos, idx)
|
||||
}
|
||||
score += scoreMatch
|
||||
bonus := bonusFor(prevClass, class)
|
||||
if consecutive == 0 {
|
||||
firstBonus = bonus
|
||||
} else {
|
||||
// Break consecutive chunk
|
||||
if bonus == bonusBoundary {
|
||||
firstBonus = bonus
|
||||
}
|
||||
bonus = util.Max16(util.Max16(bonus, firstBonus), bonusConsecutive)
|
||||
}
|
||||
if pidx == 0 {
|
||||
score += int(bonus * bonusFirstCharMultiplier)
|
||||
} else {
|
||||
score += int(bonus)
|
||||
}
|
||||
inGap = false
|
||||
consecutive++
|
||||
pidx++
|
||||
} else {
|
||||
if inGap {
|
||||
score += scoreGapExtention
|
||||
} else {
|
||||
score += scoreGapStart
|
||||
}
|
||||
inGap = true
|
||||
consecutive = 0
|
||||
firstBonus = 0
|
||||
}
|
||||
prevClass = class
|
||||
}
|
||||
return score, pos
|
||||
}
|
||||
|
||||
// FuzzyMatchV1 performs fuzzy-match
|
||||
func FuzzyMatchV1(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
if len(pattern) == 0 {
|
||||
return Result{0, 0, 0}, nil
|
||||
}
|
||||
|
||||
// 0. (FIXME) How to find the shortest match?
|
||||
// a_____b__c__abc
|
||||
// ^^^^^^^^^^ ^^^
|
||||
// 1. forward scan (abc)
|
||||
// *-----*-----*>
|
||||
// a_____b___abc__
|
||||
// 2. reverse scan (cba)
|
||||
// a_____b___abc__
|
||||
// <***
|
||||
pidx := 0
|
||||
sidx := -1
|
||||
eidx := -1
|
||||
@@ -157,7 +528,8 @@ func FuzzyMatch(caseSensitive bool, forward bool, text util.Chars, pattern []run
|
||||
if sidx >= 0 && eidx >= 0 {
|
||||
pidx--
|
||||
for index := eidx - 1; index >= sidx; index-- {
|
||||
char := text.Get(indexAt(index, lenRunes, forward))
|
||||
tidx := indexAt(index, lenRunes, forward)
|
||||
char := text.Get(tidx)
|
||||
if !caseSensitive {
|
||||
if char >= 'A' && char <= 'Z' {
|
||||
char += 32
|
||||
@@ -166,7 +538,8 @@ func FuzzyMatch(caseSensitive bool, forward bool, text util.Chars, pattern []run
|
||||
}
|
||||
}
|
||||
|
||||
pchar := pattern[indexAt(pidx, lenPattern, forward)]
|
||||
pidx_ := indexAt(pidx, lenPattern, forward)
|
||||
pchar := pattern[pidx_]
|
||||
if char == pchar {
|
||||
if pidx--; pidx < 0 {
|
||||
sidx = index
|
||||
@@ -175,16 +548,14 @@ func FuzzyMatch(caseSensitive bool, forward bool, text util.Chars, pattern []run
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate the bonus. This can't be done at the same time as the
|
||||
// pattern scan above because 'forward' may be false.
|
||||
if !forward {
|
||||
sidx, eidx = lenRunes-eidx, lenRunes-sidx
|
||||
}
|
||||
|
||||
return Result{sidx, eidx,
|
||||
evaluateBonus(caseSensitive, text, pattern, sidx, eidx)}
|
||||
score, pos := calculateScore(caseSensitive, text, pattern, sidx, eidx, withPos)
|
||||
return Result{sidx, eidx, score}, pos
|
||||
}
|
||||
return Result{-1, -1, 0}
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
|
||||
// ExactMatchNaive is a basic string searching algorithm that handles case
|
||||
@@ -192,23 +563,28 @@ func FuzzyMatch(caseSensitive bool, forward bool, text util.Chars, pattern []run
|
||||
// of strings.ToLower + strings.Index for typical fzf use cases where input
|
||||
// strings and patterns are not very long.
|
||||
//
|
||||
// We might try to implement better algorithms in the future:
|
||||
// http://en.wikipedia.org/wiki/String_searching_algorithm
|
||||
func ExactMatchNaive(caseSensitive bool, forward bool, text util.Chars, pattern []rune) Result {
|
||||
// Since 0.15.0, this function searches for the match with the highest
|
||||
// bonus point, instead of stopping immediately after finding the first match.
|
||||
// The solution is much cheaper since there is only one possible alignment of
|
||||
// the pattern.
|
||||
func ExactMatchNaive(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
if len(pattern) == 0 {
|
||||
return Result{0, 0, 0}
|
||||
return Result{0, 0, 0}, nil
|
||||
}
|
||||
|
||||
lenRunes := text.Length()
|
||||
lenPattern := len(pattern)
|
||||
|
||||
if lenRunes < lenPattern {
|
||||
return Result{-1, -1, 0}
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
|
||||
// For simplicity, only look at the bonus at the first character position
|
||||
pidx := 0
|
||||
bestPos, bonus, bestBonus := -1, int16(0), int16(-1)
|
||||
for index := 0; index < lenRunes; index++ {
|
||||
char := text.Get(indexAt(index, lenRunes, forward))
|
||||
index_ := indexAt(index, lenRunes, forward)
|
||||
char := text.Get(index_)
|
||||
if !caseSensitive {
|
||||
if char >= 'A' && char <= 'Z' {
|
||||
char += 32
|
||||
@@ -216,33 +592,51 @@ func ExactMatchNaive(caseSensitive bool, forward bool, text util.Chars, pattern
|
||||
char = unicode.To(unicode.LowerCase, char)
|
||||
}
|
||||
}
|
||||
pchar := pattern[indexAt(pidx, lenPattern, forward)]
|
||||
pidx_ := indexAt(pidx, lenPattern, forward)
|
||||
pchar := pattern[pidx_]
|
||||
if pchar == char {
|
||||
if pidx_ == 0 {
|
||||
bonus = bonusAt(text, index_)
|
||||
}
|
||||
pidx++
|
||||
if pidx == lenPattern {
|
||||
var sidx, eidx int
|
||||
if forward {
|
||||
sidx = index - lenPattern + 1
|
||||
eidx = index + 1
|
||||
} else {
|
||||
sidx = lenRunes - (index + 1)
|
||||
eidx = lenRunes - (index - lenPattern + 1)
|
||||
if bonus > bestBonus {
|
||||
bestPos, bestBonus = index, bonus
|
||||
}
|
||||
return Result{sidx, eidx,
|
||||
evaluateBonus(caseSensitive, text, pattern, sidx, eidx)}
|
||||
if bonus == bonusBoundary {
|
||||
break
|
||||
}
|
||||
index -= pidx - 1
|
||||
pidx, bonus = 0, 0
|
||||
}
|
||||
} else {
|
||||
index -= pidx
|
||||
pidx = 0
|
||||
pidx, bonus = 0, 0
|
||||
}
|
||||
}
|
||||
return Result{-1, -1, 0}
|
||||
if bestPos >= 0 {
|
||||
var sidx, eidx int
|
||||
if forward {
|
||||
sidx = bestPos - lenPattern + 1
|
||||
eidx = bestPos + 1
|
||||
} else {
|
||||
sidx = lenRunes - (bestPos + 1)
|
||||
eidx = lenRunes - (bestPos - lenPattern + 1)
|
||||
}
|
||||
score, _ := calculateScore(caseSensitive, text, pattern, sidx, eidx, false)
|
||||
return Result{sidx, eidx, score}, nil
|
||||
}
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
|
||||
// PrefixMatch performs prefix-match
|
||||
func PrefixMatch(caseSensitive bool, forward bool, text util.Chars, pattern []rune) Result {
|
||||
func PrefixMatch(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
if len(pattern) == 0 {
|
||||
return Result{0, 0, 0}, nil
|
||||
}
|
||||
|
||||
if text.Length() < len(pattern) {
|
||||
return Result{-1, -1, 0}
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
|
||||
for index, r := range pattern {
|
||||
@@ -251,20 +645,24 @@ func PrefixMatch(caseSensitive bool, forward bool, text util.Chars, pattern []ru
|
||||
char = unicode.ToLower(char)
|
||||
}
|
||||
if char != r {
|
||||
return Result{-1, -1, 0}
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
}
|
||||
lenPattern := len(pattern)
|
||||
return Result{0, lenPattern,
|
||||
evaluateBonus(caseSensitive, text, pattern, 0, lenPattern)}
|
||||
score, _ := calculateScore(caseSensitive, text, pattern, 0, lenPattern, false)
|
||||
return Result{0, lenPattern, score}, nil
|
||||
}
|
||||
|
||||
// SuffixMatch performs suffix-match
|
||||
func SuffixMatch(caseSensitive bool, forward bool, text util.Chars, pattern []rune) Result {
|
||||
trimmedLen := text.Length() - text.TrailingWhitespaces()
|
||||
func SuffixMatch(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
lenRunes := text.Length()
|
||||
trimmedLen := lenRunes - text.TrailingWhitespaces()
|
||||
if len(pattern) == 0 {
|
||||
return Result{trimmedLen, trimmedLen, 0}, nil
|
||||
}
|
||||
diff := trimmedLen - len(pattern)
|
||||
if diff < 0 {
|
||||
return Result{-1, -1, 0}
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
|
||||
for index, r := range pattern {
|
||||
@@ -273,28 +671,29 @@ func SuffixMatch(caseSensitive bool, forward bool, text util.Chars, pattern []ru
|
||||
char = unicode.ToLower(char)
|
||||
}
|
||||
if char != r {
|
||||
return Result{-1, -1, 0}
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
}
|
||||
lenPattern := len(pattern)
|
||||
sidx := trimmedLen - lenPattern
|
||||
eidx := trimmedLen
|
||||
return Result{sidx, eidx,
|
||||
evaluateBonus(caseSensitive, text, pattern, sidx, eidx)}
|
||||
score, _ := calculateScore(caseSensitive, text, pattern, sidx, eidx, false)
|
||||
return Result{sidx, eidx, score}, nil
|
||||
}
|
||||
|
||||
// EqualMatch performs equal-match
|
||||
func EqualMatch(caseSensitive bool, forward bool, text util.Chars, pattern []rune) Result {
|
||||
// Note: EqualMatch always return a zero bonus.
|
||||
if text.Length() != len(pattern) {
|
||||
return Result{-1, -1, 0}
|
||||
func EqualMatch(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
lenPattern := len(pattern)
|
||||
if text.Length() != lenPattern {
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
runesStr := text.ToString()
|
||||
if !caseSensitive {
|
||||
runesStr = strings.ToLower(runesStr)
|
||||
}
|
||||
if runesStr == string(pattern) {
|
||||
return Result{0, len(pattern), 0}
|
||||
return Result{0, lenPattern, (scoreMatch+bonusBoundary)*lenPattern +
|
||||
(bonusFirstCharMultiplier-1)*bonusBoundary}, nil
|
||||
}
|
||||
return Result{-1, -1, 0}
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
|
@@ -1,95 +1,154 @@
|
||||
package algo
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/junegunn/fzf/src/util"
|
||||
)
|
||||
|
||||
func assertMatch(t *testing.T, fun func(bool, bool, util.Chars, []rune) Result, caseSensitive, forward bool, input, pattern string, sidx int, eidx int, bonus int) {
|
||||
func assertMatch(t *testing.T, fun Algo, caseSensitive, forward bool, input, pattern string, sidx int, eidx int, score int) {
|
||||
if !caseSensitive {
|
||||
pattern = strings.ToLower(pattern)
|
||||
}
|
||||
res := fun(caseSensitive, forward, util.RunesToChars([]rune(input)), []rune(pattern))
|
||||
if res.Start != sidx {
|
||||
t.Errorf("Invalid start index: %d (expected: %d, %s / %s)", res.Start, sidx, input, pattern)
|
||||
res, pos := fun(caseSensitive, forward, util.RunesToChars([]rune(input)), []rune(pattern), true, nil)
|
||||
var start, end int
|
||||
if pos == nil || len(*pos) == 0 {
|
||||
start = res.Start
|
||||
end = res.End
|
||||
} else {
|
||||
sort.Ints(*pos)
|
||||
start = (*pos)[0]
|
||||
end = (*pos)[len(*pos)-1] + 1
|
||||
}
|
||||
if res.End != eidx {
|
||||
t.Errorf("Invalid end index: %d (expected: %d, %s / %s)", res.End, eidx, input, pattern)
|
||||
if start != sidx {
|
||||
t.Errorf("Invalid start index: %d (expected: %d, %s / %s)", start, sidx, input, pattern)
|
||||
}
|
||||
if res.Bonus != bonus {
|
||||
t.Errorf("Invalid bonus: %d (expected: %d, %s / %s)", res.Bonus, bonus, input, pattern)
|
||||
if end != eidx {
|
||||
t.Errorf("Invalid end index: %d (expected: %d, %s / %s)", end, eidx, input, pattern)
|
||||
}
|
||||
if res.Score != score {
|
||||
t.Errorf("Invalid score: %d (expected: %d, %s / %s)", res.Score, score, input, pattern)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzyMatch(t *testing.T) {
|
||||
assertMatch(t, FuzzyMatch, false, true, "fooBarbaz", "oBZ", 2, 9, 2)
|
||||
assertMatch(t, FuzzyMatch, false, true, "foo bar baz", "fbb", 0, 9, 8)
|
||||
assertMatch(t, FuzzyMatch, false, true, "/AutomatorDocument.icns", "rdoc", 9, 13, 4)
|
||||
assertMatch(t, FuzzyMatch, false, true, "/man1/zshcompctl.1", "zshc", 6, 10, 7)
|
||||
assertMatch(t, FuzzyMatch, false, true, "/.oh-my-zsh/cache", "zshc", 8, 13, 8)
|
||||
assertMatch(t, FuzzyMatch, false, true, "ab0123 456", "12356", 3, 10, 3)
|
||||
assertMatch(t, FuzzyMatch, false, true, "abc123 456", "12356", 3, 10, 5)
|
||||
for _, fn := range []Algo{FuzzyMatchV1, FuzzyMatchV2} {
|
||||
for _, forward := range []bool{true, false} {
|
||||
assertMatch(t, fn, false, forward, "fooBarbaz1", "oBZ", 2, 9,
|
||||
scoreMatch*3+bonusCamel123+scoreGapStart+scoreGapExtention*3)
|
||||
assertMatch(t, fn, false, forward, "foo bar baz", "fbb", 0, 9,
|
||||
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+
|
||||
bonusBoundary*2+2*scoreGapStart+4*scoreGapExtention)
|
||||
assertMatch(t, fn, false, forward, "/AutomatorDocument.icns", "rdoc", 9, 13,
|
||||
scoreMatch*4+bonusCamel123+bonusConsecutive*2)
|
||||
assertMatch(t, fn, false, forward, "/man1/zshcompctl.1", "zshc", 6, 10,
|
||||
scoreMatch*4+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary*3)
|
||||
assertMatch(t, fn, false, forward, "/.oh-my-zsh/cache", "zshc", 8, 13,
|
||||
scoreMatch*4+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary*3+scoreGapStart)
|
||||
assertMatch(t, fn, false, forward, "ab0123 456", "12356", 3, 10,
|
||||
scoreMatch*5+bonusConsecutive*3+scoreGapStart+scoreGapExtention)
|
||||
assertMatch(t, fn, false, forward, "abc123 456", "12356", 3, 10,
|
||||
scoreMatch*5+bonusCamel123*bonusFirstCharMultiplier+bonusCamel123*2+bonusConsecutive+scoreGapStart+scoreGapExtention)
|
||||
assertMatch(t, fn, false, forward, "foo/bar/baz", "fbb", 0, 9,
|
||||
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+
|
||||
bonusBoundary*2+2*scoreGapStart+4*scoreGapExtention)
|
||||
assertMatch(t, fn, false, forward, "fooBarBaz", "fbb", 0, 7,
|
||||
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+
|
||||
bonusCamel123*2+2*scoreGapStart+2*scoreGapExtention)
|
||||
assertMatch(t, fn, false, forward, "foo barbaz", "fbb", 0, 8,
|
||||
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary+
|
||||
scoreGapStart*2+scoreGapExtention*3)
|
||||
assertMatch(t, fn, false, forward, "fooBar Baz", "foob", 0, 4,
|
||||
scoreMatch*4+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary*3)
|
||||
assertMatch(t, fn, false, forward, "xFoo-Bar Baz", "foo-b", 1, 6,
|
||||
scoreMatch*5+bonusCamel123*bonusFirstCharMultiplier+bonusCamel123*2+
|
||||
bonusNonWord+bonusBoundary)
|
||||
|
||||
assertMatch(t, FuzzyMatch, false, true, "foo/bar/baz", "fbb", 0, 9, 8)
|
||||
assertMatch(t, FuzzyMatch, false, true, "fooBarBaz", "fbb", 0, 7, 6)
|
||||
assertMatch(t, FuzzyMatch, false, true, "foo barbaz", "fbb", 0, 8, 6)
|
||||
assertMatch(t, FuzzyMatch, false, true, "fooBar Baz", "foob", 0, 4, 8)
|
||||
assertMatch(t, FuzzyMatch, true, true, "fooBarbaz", "oBZ", -1, -1, 0)
|
||||
assertMatch(t, FuzzyMatch, true, true, "fooBarbaz", "oBz", 2, 9, 2)
|
||||
assertMatch(t, FuzzyMatch, true, true, "Foo Bar Baz", "fbb", -1, -1, 0)
|
||||
assertMatch(t, FuzzyMatch, true, true, "Foo/Bar/Baz", "FBB", 0, 9, 8)
|
||||
assertMatch(t, FuzzyMatch, true, true, "FooBarBaz", "FBB", 0, 7, 6)
|
||||
assertMatch(t, FuzzyMatch, true, true, "foo BarBaz", "fBB", 0, 8, 7)
|
||||
assertMatch(t, FuzzyMatch, true, true, "FooBar Baz", "FooB", 0, 4, 8)
|
||||
assertMatch(t, FuzzyMatch, true, true, "fooBarbaz", "fooBarbazz", -1, -1, 0)
|
||||
assertMatch(t, fn, true, forward, "fooBarbaz", "oBz", 2, 9,
|
||||
scoreMatch*3+bonusCamel123+scoreGapStart+scoreGapExtention*3)
|
||||
assertMatch(t, fn, true, forward, "Foo/Bar/Baz", "FBB", 0, 9,
|
||||
scoreMatch*3+bonusBoundary*(bonusFirstCharMultiplier+2)+
|
||||
scoreGapStart*2+scoreGapExtention*4)
|
||||
assertMatch(t, fn, true, forward, "FooBarBaz", "FBB", 0, 7,
|
||||
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+bonusCamel123*2+
|
||||
scoreGapStart*2+scoreGapExtention*2)
|
||||
assertMatch(t, fn, true, forward, "FooBar Baz", "FooB", 0, 4,
|
||||
scoreMatch*4+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary*2+
|
||||
util.Max(bonusCamel123, bonusBoundary))
|
||||
|
||||
// Consecutive bonus updated
|
||||
assertMatch(t, fn, true, forward, "foo-bar", "o-ba", 2, 6,
|
||||
scoreMatch*4+bonusBoundary*3)
|
||||
|
||||
// Non-match
|
||||
assertMatch(t, fn, true, forward, "fooBarbaz", "oBZ", -1, -1, 0)
|
||||
assertMatch(t, fn, true, forward, "Foo Bar Baz", "fbb", -1, -1, 0)
|
||||
assertMatch(t, fn, true, forward, "fooBarbaz", "fooBarbazz", -1, -1, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzyMatchBackward(t *testing.T) {
|
||||
assertMatch(t, FuzzyMatch, false, true, "foobar fb", "fb", 0, 4, 4)
|
||||
assertMatch(t, FuzzyMatch, false, false, "foobar fb", "fb", 7, 9, 5)
|
||||
assertMatch(t, FuzzyMatchV1, false, true, "foobar fb", "fb", 0, 4,
|
||||
scoreMatch*2+bonusBoundary*bonusFirstCharMultiplier+
|
||||
scoreGapStart+scoreGapExtention)
|
||||
assertMatch(t, FuzzyMatchV1, false, false, "foobar fb", "fb", 7, 9,
|
||||
scoreMatch*2+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary)
|
||||
}
|
||||
|
||||
func TestExactMatchNaive(t *testing.T) {
|
||||
for _, dir := range []bool{true, false} {
|
||||
assertMatch(t, ExactMatchNaive, false, dir, "fooBarbaz", "oBA", 2, 5, 3)
|
||||
assertMatch(t, ExactMatchNaive, true, dir, "fooBarbaz", "oBA", -1, -1, 0)
|
||||
assertMatch(t, ExactMatchNaive, true, dir, "fooBarbaz", "fooBarbazz", -1, -1, 0)
|
||||
|
||||
assertMatch(t, ExactMatchNaive, false, dir, "/AutomatorDocument.icns", "rdoc", 9, 13, 4)
|
||||
assertMatch(t, ExactMatchNaive, false, dir, "/man1/zshcompctl.1", "zshc", 6, 10, 7)
|
||||
assertMatch(t, ExactMatchNaive, false, dir, "/.oh-my-zsh/cache", "zsh/c", 8, 13, 10)
|
||||
assertMatch(t, ExactMatchNaive, false, dir, "fooBarbaz", "oBA", 2, 5,
|
||||
scoreMatch*3+bonusCamel123+bonusConsecutive)
|
||||
assertMatch(t, ExactMatchNaive, false, dir, "/AutomatorDocument.icns", "rdoc", 9, 13,
|
||||
scoreMatch*4+bonusCamel123+bonusConsecutive*2)
|
||||
assertMatch(t, ExactMatchNaive, false, dir, "/man1/zshcompctl.1", "zshc", 6, 10,
|
||||
scoreMatch*4+bonusBoundary*(bonusFirstCharMultiplier+3))
|
||||
assertMatch(t, ExactMatchNaive, false, dir, "/.oh-my-zsh/cache", "zsh/c", 8, 13,
|
||||
scoreMatch*5+bonusBoundary*(bonusFirstCharMultiplier+4))
|
||||
}
|
||||
}
|
||||
|
||||
func TestExactMatchNaiveBackward(t *testing.T) {
|
||||
assertMatch(t, ExactMatchNaive, false, true, "foobar foob", "oo", 1, 3, 1)
|
||||
assertMatch(t, ExactMatchNaive, false, false, "foobar foob", "oo", 8, 10, 1)
|
||||
assertMatch(t, ExactMatchNaive, false, true, "foobar foob", "oo", 1, 3,
|
||||
scoreMatch*2+bonusConsecutive)
|
||||
assertMatch(t, ExactMatchNaive, false, false, "foobar foob", "oo", 8, 10,
|
||||
scoreMatch*2+bonusConsecutive)
|
||||
}
|
||||
|
||||
func TestPrefixMatch(t *testing.T) {
|
||||
score := (scoreMatch+bonusBoundary)*3 + bonusBoundary*(bonusFirstCharMultiplier-1)
|
||||
|
||||
for _, dir := range []bool{true, false} {
|
||||
assertMatch(t, PrefixMatch, true, dir, "fooBarbaz", "Foo", -1, -1, 0)
|
||||
assertMatch(t, PrefixMatch, false, dir, "fooBarBaz", "baz", -1, -1, 0)
|
||||
assertMatch(t, PrefixMatch, false, dir, "fooBarbaz", "Foo", 0, 3, 6)
|
||||
assertMatch(t, PrefixMatch, false, dir, "foOBarBaZ", "foo", 0, 3, 7)
|
||||
assertMatch(t, PrefixMatch, false, dir, "f-oBarbaz", "f-o", 0, 3, 8)
|
||||
assertMatch(t, PrefixMatch, false, dir, "fooBarbaz", "Foo", 0, 3, score)
|
||||
assertMatch(t, PrefixMatch, false, dir, "foOBarBaZ", "foo", 0, 3, score)
|
||||
assertMatch(t, PrefixMatch, false, dir, "f-oBarbaz", "f-o", 0, 3, score)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSuffixMatch(t *testing.T) {
|
||||
for _, dir := range []bool{true, false} {
|
||||
assertMatch(t, SuffixMatch, false, dir, "fooBarbaz", "Foo", -1, -1, 0)
|
||||
assertMatch(t, SuffixMatch, false, dir, "fooBarbaz", "baz", 6, 9, 2)
|
||||
assertMatch(t, SuffixMatch, false, dir, "fooBarBaZ", "baz", 6, 9, 5)
|
||||
assertMatch(t, SuffixMatch, true, dir, "fooBarbaz", "Baz", -1, -1, 0)
|
||||
assertMatch(t, SuffixMatch, false, dir, "fooBarbaz", "Foo", -1, -1, 0)
|
||||
|
||||
assertMatch(t, SuffixMatch, false, dir, "fooBarbaz", "baz", 6, 9,
|
||||
scoreMatch*3+bonusConsecutive*2)
|
||||
assertMatch(t, SuffixMatch, false, dir, "fooBarBaZ", "baz", 6, 9,
|
||||
(scoreMatch+bonusCamel123)*3+bonusCamel123*(bonusFirstCharMultiplier-1))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyPattern(t *testing.T) {
|
||||
for _, dir := range []bool{true, false} {
|
||||
assertMatch(t, FuzzyMatch, true, dir, "foobar", "", 0, 0, 0)
|
||||
assertMatch(t, FuzzyMatchV1, true, dir, "foobar", "", 0, 0, 0)
|
||||
assertMatch(t, FuzzyMatchV2, true, dir, "foobar", "", 0, 0, 0)
|
||||
assertMatch(t, ExactMatchNaive, true, dir, "foobar", "", 0, 0, 0)
|
||||
assertMatch(t, PrefixMatch, true, dir, "foobar", "", 0, 0, 0)
|
||||
assertMatch(t, SuffixMatch, true, dir, "foobar", "", 6, 6, 0)
|
||||
|
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
func TestChunkList(t *testing.T) {
|
||||
// FIXME global
|
||||
sortCriteria = []criterion{byMatchLen, byLength}
|
||||
sortCriteria = []criterion{byScore, byLength}
|
||||
|
||||
cl := NewChunkList(func(s []byte, i int) *Item {
|
||||
return &Item{text: util.ToChars(s), index: int32(i * 2)}
|
||||
|
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
const (
|
||||
// Current version
|
||||
version = "0.13.5"
|
||||
version = "0.15.0"
|
||||
|
||||
// Core
|
||||
coordinatorDelayMax time.Duration = 100 * time.Millisecond
|
||||
@@ -19,16 +19,23 @@ const (
|
||||
readerBufferSize = 64 * 1024
|
||||
|
||||
// Terminal
|
||||
initialDelay = 20 * time.Millisecond
|
||||
initialDelayTac = 100 * time.Millisecond
|
||||
spinnerDuration = 200 * time.Millisecond
|
||||
initialDelay = 20 * time.Millisecond
|
||||
initialDelayTac = 100 * time.Millisecond
|
||||
spinnerDuration = 200 * time.Millisecond
|
||||
maxPatternLength = 100
|
||||
|
||||
// Matcher
|
||||
progressMinDuration = 200 * time.Millisecond
|
||||
numPartitionsMultiplier = 8
|
||||
maxPartitions = 32
|
||||
progressMinDuration = 200 * time.Millisecond
|
||||
|
||||
// Capacity of each chunk
|
||||
chunkSize int = 100
|
||||
|
||||
// Pre-allocated memory slices to minimize GC
|
||||
slab16Size int = 100 * 1024 // 200KB * 32 = 12.8MB
|
||||
slab32Size int = 2048 // 8KB * 32 = 256KB
|
||||
|
||||
// Do not cache results of low selectivity queries
|
||||
queryCacheMax int = chunkSize / 5
|
||||
|
||||
|
19
src/core.go
19
src/core.go
@@ -143,27 +143,28 @@ func Run(opts *Options) {
|
||||
}
|
||||
patternBuilder := func(runes []rune) *Pattern {
|
||||
return BuildPattern(
|
||||
opts.Fuzzy, opts.Extended, opts.Case, forward, opts.Filter == nil,
|
||||
opts.Nth, opts.Delimiter, runes)
|
||||
opts.Fuzzy, opts.FuzzyAlgo, opts.Extended, opts.Case, forward,
|
||||
opts.Filter == nil, opts.Nth, opts.Delimiter, runes)
|
||||
}
|
||||
matcher := NewMatcher(patternBuilder, sort, opts.Tac, eventBox)
|
||||
|
||||
// Filtering mode
|
||||
if opts.Filter != nil {
|
||||
if opts.PrintQuery {
|
||||
fmt.Println(*opts.Filter)
|
||||
opts.Printer(*opts.Filter)
|
||||
}
|
||||
|
||||
pattern := patternBuilder([]rune(*opts.Filter))
|
||||
|
||||
found := false
|
||||
if streamingFilter {
|
||||
slab := util.MakeSlab(slab16Size, slab32Size)
|
||||
reader := Reader{
|
||||
func(runes []byte) bool {
|
||||
item := chunkList.trans(runes, 0)
|
||||
if item != nil {
|
||||
if result, _ := pattern.MatchItem(item); result != nil {
|
||||
fmt.Println(item.text.ToString())
|
||||
if result, _, _ := pattern.MatchItem(item, false, slab); result != nil {
|
||||
opts.Printer(item.text.ToString())
|
||||
found = true
|
||||
}
|
||||
}
|
||||
@@ -179,7 +180,7 @@ func Run(opts *Options) {
|
||||
chunks: snapshot,
|
||||
pattern: pattern})
|
||||
for i := 0; i < merger.Length(); i++ {
|
||||
fmt.Println(merger.Get(i).item.AsString(opts.Ansi))
|
||||
opts.Printer(merger.Get(i).item.AsString(opts.Ansi))
|
||||
found = true
|
||||
}
|
||||
}
|
||||
@@ -253,13 +254,13 @@ func Run(opts *Options) {
|
||||
} else if val.final {
|
||||
if opts.Exit0 && count == 0 || opts.Select1 && count == 1 {
|
||||
if opts.PrintQuery {
|
||||
fmt.Println(opts.Query)
|
||||
opts.Printer(opts.Query)
|
||||
}
|
||||
if len(opts.Expect) > 0 {
|
||||
fmt.Println()
|
||||
opts.Printer("")
|
||||
}
|
||||
for i := 0; i < count; i++ {
|
||||
fmt.Println(val.Get(i).item.AsString(opts.Ansi))
|
||||
opts.Printer(val.Get(i).item.AsString(opts.Ansi))
|
||||
}
|
||||
if count > 0 {
|
||||
os.Exit(exitOk)
|
||||
|
@@ -26,6 +26,7 @@ type Matcher struct {
|
||||
eventBox *util.EventBox
|
||||
reqBox *util.EventBox
|
||||
partitions int
|
||||
slab []*util.Slab
|
||||
mergerCache map[string]*Merger
|
||||
}
|
||||
|
||||
@@ -37,13 +38,15 @@ const (
|
||||
// NewMatcher returns a new Matcher
|
||||
func NewMatcher(patternBuilder func([]rune) *Pattern,
|
||||
sort bool, tac bool, eventBox *util.EventBox) *Matcher {
|
||||
partitions := util.Min(numPartitionsMultiplier*runtime.NumCPU(), maxPartitions)
|
||||
return &Matcher{
|
||||
patternBuilder: patternBuilder,
|
||||
sort: sort,
|
||||
tac: tac,
|
||||
eventBox: eventBox,
|
||||
reqBox: util.NewEventBox(),
|
||||
partitions: util.Min(8*runtime.NumCPU(), 32),
|
||||
partitions: partitions,
|
||||
slab: make([]*util.Slab, partitions),
|
||||
mergerCache: make(map[string]*Merger)}
|
||||
}
|
||||
|
||||
@@ -153,12 +156,15 @@ func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
||||
|
||||
for idx, chunks := range slices {
|
||||
waitGroup.Add(1)
|
||||
go func(idx int, chunks []*Chunk) {
|
||||
if m.slab[idx] == nil {
|
||||
m.slab[idx] = util.MakeSlab(slab16Size, slab32Size)
|
||||
}
|
||||
go func(idx int, slab *util.Slab, chunks []*Chunk) {
|
||||
defer func() { waitGroup.Done() }()
|
||||
count := 0
|
||||
allMatches := make([][]*Result, len(chunks))
|
||||
for idx, chunk := range chunks {
|
||||
matches := request.pattern.Match(chunk)
|
||||
matches := request.pattern.Match(chunk, slab)
|
||||
allMatches[idx] = matches
|
||||
count += len(matches)
|
||||
if cancelled.Get() {
|
||||
@@ -178,7 +184,7 @@ func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
||||
}
|
||||
}
|
||||
resultChan <- partialResult{idx, sliceMatches}
|
||||
}(idx, chunks)
|
||||
}(idx, m.slab[idx], chunks)
|
||||
}
|
||||
|
||||
wait := func() bool {
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/junegunn/fzf/src/algo"
|
||||
"github.com/junegunn/fzf/src/curses"
|
||||
|
||||
"github.com/junegunn/go-shellwords"
|
||||
@@ -19,6 +20,7 @@ const usage = `usage: fzf [options]
|
||||
-x, --extended Extended-search mode
|
||||
(enabled by default; +x or --no-extended to disable)
|
||||
-e, --exact Enable Exact-match
|
||||
--algo=TYPE Fuzzy matching algorithm: [v1|v2] (default: v2)
|
||||
-i Case-insensitive match (default: smart-case match)
|
||||
+i Case-sensitive match
|
||||
-n, --nth=N[,..] Comma-separated list of field index expressions
|
||||
@@ -94,8 +96,7 @@ const (
|
||||
type criterion int
|
||||
|
||||
const (
|
||||
byMatchLen criterion = iota
|
||||
byBonus
|
||||
byScore criterion = iota
|
||||
byLength
|
||||
byBegin
|
||||
byEnd
|
||||
@@ -129,6 +130,7 @@ type previewOpts struct {
|
||||
// Options stores the values of command-line options
|
||||
type Options struct {
|
||||
Fuzzy bool
|
||||
FuzzyAlgo algo.Algo
|
||||
Extended bool
|
||||
Case Case
|
||||
Nth []Range
|
||||
@@ -160,6 +162,7 @@ type Options struct {
|
||||
Preview previewOpts
|
||||
PrintQuery bool
|
||||
ReadZero bool
|
||||
Printer func(string)
|
||||
Sync bool
|
||||
History *History
|
||||
Header []string
|
||||
@@ -172,6 +175,7 @@ type Options struct {
|
||||
func defaultOptions() *Options {
|
||||
return &Options{
|
||||
Fuzzy: true,
|
||||
FuzzyAlgo: algo.FuzzyMatchV2,
|
||||
Extended: true,
|
||||
Case: CaseSmart,
|
||||
Nth: make([]Range, 0),
|
||||
@@ -179,7 +183,7 @@ func defaultOptions() *Options {
|
||||
Delimiter: Delimiter{},
|
||||
Sort: 1000,
|
||||
Tac: false,
|
||||
Criteria: []criterion{byMatchLen, byBonus, byLength},
|
||||
Criteria: []criterion{byScore, byLength},
|
||||
Multi: false,
|
||||
Ansi: false,
|
||||
Mouse: true,
|
||||
@@ -203,6 +207,7 @@ func defaultOptions() *Options {
|
||||
Preview: previewOpts{"", posRight, sizeSpec{50, true}, false},
|
||||
PrintQuery: false,
|
||||
ReadZero: false,
|
||||
Printer: func(str string) { fmt.Println(str) },
|
||||
Sync: false,
|
||||
History: nil,
|
||||
Header: make([]string, 0),
|
||||
@@ -322,6 +327,18 @@ func isAlphabet(char uint8) bool {
|
||||
return char >= 'a' && char <= 'z'
|
||||
}
|
||||
|
||||
func parseAlgo(str string) algo.Algo {
|
||||
switch str {
|
||||
case "v1":
|
||||
return algo.FuzzyMatchV1
|
||||
case "v2":
|
||||
return algo.FuzzyMatchV2
|
||||
default:
|
||||
errorExit("invalid algorithm (expected: v1 or v2)")
|
||||
}
|
||||
return algo.FuzzyMatchV2
|
||||
}
|
||||
|
||||
func parseKeyChords(str string, message string) map[int]string {
|
||||
if len(str) == 0 {
|
||||
errorExit(message)
|
||||
@@ -407,7 +424,7 @@ func parseKeyChords(str string, message string) map[int]string {
|
||||
}
|
||||
|
||||
func parseTiebreak(str string) []criterion {
|
||||
criteria := []criterion{byMatchLen, byBonus}
|
||||
criteria := []criterion{byScore}
|
||||
hasIndex := false
|
||||
hasLength := false
|
||||
hasBegin := false
|
||||
@@ -834,6 +851,8 @@ func parseOptions(opts *Options, allArgs []string) {
|
||||
case "-f", "--filter":
|
||||
filter := nextString(allArgs, &i, "query string required")
|
||||
opts.Filter = &filter
|
||||
case "--algo":
|
||||
opts.FuzzyAlgo = parseAlgo(nextString(allArgs, &i, "algorithm required (v1|v2)"))
|
||||
case "--expect":
|
||||
opts.Expect = parseKeyChords(nextString(allArgs, &i, "key names required"), "key names required")
|
||||
case "--tiebreak":
|
||||
@@ -918,6 +937,10 @@ func parseOptions(opts *Options, allArgs []string) {
|
||||
opts.ReadZero = true
|
||||
case "--no-read0":
|
||||
opts.ReadZero = false
|
||||
case "--print0":
|
||||
opts.Printer = func(str string) { fmt.Print(str, "\x00") }
|
||||
case "--no-print0":
|
||||
opts.Printer = func(str string) { fmt.Println(str) }
|
||||
case "--print-query":
|
||||
opts.PrintQuery = true
|
||||
case "--no-print-query":
|
||||
@@ -962,7 +985,9 @@ func parseOptions(opts *Options, allArgs []string) {
|
||||
case "--version":
|
||||
opts.Version = true
|
||||
default:
|
||||
if match, value := optString(arg, "-q", "--query="); match {
|
||||
if match, value := optString(arg, "--algo="); match {
|
||||
opts.FuzzyAlgo = parseAlgo(value)
|
||||
} else if match, value := optString(arg, "-q", "--query="); match {
|
||||
opts.Query = value
|
||||
} else if match, value := optString(arg, "-f", "--filter="); match {
|
||||
opts.Filter = &value
|
||||
|
@@ -40,6 +40,7 @@ type termSet []term
|
||||
// Pattern represents search pattern
|
||||
type Pattern struct {
|
||||
fuzzy bool
|
||||
fuzzyAlgo algo.Algo
|
||||
extended bool
|
||||
caseSensitive bool
|
||||
forward bool
|
||||
@@ -48,7 +49,7 @@ type Pattern struct {
|
||||
cacheable bool
|
||||
delimiter Delimiter
|
||||
nth []Range
|
||||
procFun map[termType]func(bool, bool, util.Chars, []rune) algo.Result
|
||||
procFun map[termType]algo.Algo
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -74,7 +75,7 @@ func clearChunkCache() {
|
||||
}
|
||||
|
||||
// BuildPattern builds Pattern object from the given arguments
|
||||
func BuildPattern(fuzzy bool, extended bool, caseMode Case, forward bool,
|
||||
func BuildPattern(fuzzy bool, fuzzyAlgo algo.Algo, extended bool, caseMode Case, forward bool,
|
||||
cacheable bool, nth []Range, delimiter Delimiter, runes []rune) *Pattern {
|
||||
|
||||
var asString string
|
||||
@@ -116,6 +117,7 @@ func BuildPattern(fuzzy bool, extended bool, caseMode Case, forward bool,
|
||||
|
||||
ptr := &Pattern{
|
||||
fuzzy: fuzzy,
|
||||
fuzzyAlgo: fuzzyAlgo,
|
||||
extended: extended,
|
||||
caseSensitive: caseSensitive,
|
||||
forward: forward,
|
||||
@@ -124,9 +126,9 @@ func BuildPattern(fuzzy bool, extended bool, caseMode Case, forward bool,
|
||||
cacheable: cacheable,
|
||||
nth: nth,
|
||||
delimiter: delimiter,
|
||||
procFun: make(map[termType]func(bool, bool, util.Chars, []rune) algo.Result)}
|
||||
procFun: make(map[termType]algo.Algo)}
|
||||
|
||||
ptr.procFun[termFuzzy] = algo.FuzzyMatch
|
||||
ptr.procFun[termFuzzy] = fuzzyAlgo
|
||||
ptr.procFun[termEqual] = algo.EqualMatch
|
||||
ptr.procFun[termExact] = algo.ExactMatchNaive
|
||||
ptr.procFun[termPrefix] = algo.PrefixMatch
|
||||
@@ -234,7 +236,7 @@ func (p *Pattern) CacheKey() string {
|
||||
}
|
||||
|
||||
// Match returns the list of matches Items in the given Chunk
|
||||
func (p *Pattern) Match(chunk *Chunk) []*Result {
|
||||
func (p *Pattern) Match(chunk *Chunk, slab *util.Slab) []*Result {
|
||||
// ChunkCache: Exact match
|
||||
cacheKey := p.CacheKey()
|
||||
if p.cacheable {
|
||||
@@ -260,7 +262,7 @@ Loop:
|
||||
}
|
||||
}
|
||||
|
||||
matches := p.matchChunk(chunk, space)
|
||||
matches := p.matchChunk(chunk, space, slab)
|
||||
|
||||
if p.cacheable {
|
||||
_cache.Add(chunk, cacheKey, matches)
|
||||
@@ -268,18 +270,18 @@ Loop:
|
||||
return matches
|
||||
}
|
||||
|
||||
func (p *Pattern) matchChunk(chunk *Chunk, space []*Result) []*Result {
|
||||
func (p *Pattern) matchChunk(chunk *Chunk, space []*Result, slab *util.Slab) []*Result {
|
||||
matches := []*Result{}
|
||||
|
||||
if space == nil {
|
||||
for _, item := range *chunk {
|
||||
if match, _ := p.MatchItem(item); match != nil {
|
||||
if match, _, _ := p.MatchItem(item, false, slab); match != nil {
|
||||
matches = append(matches, match)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, result := range space {
|
||||
if match, _ := p.MatchItem(result.item); match != nil {
|
||||
if match, _, _ := p.MatchItem(result.item, false, slab); match != nil {
|
||||
matches = append(matches, match)
|
||||
}
|
||||
}
|
||||
@@ -288,62 +290,75 @@ func (p *Pattern) matchChunk(chunk *Chunk, space []*Result) []*Result {
|
||||
}
|
||||
|
||||
// MatchItem returns true if the Item is a match
|
||||
func (p *Pattern) MatchItem(item *Item) (*Result, []Offset) {
|
||||
func (p *Pattern) MatchItem(item *Item, withPos bool, slab *util.Slab) (*Result, []Offset, *[]int) {
|
||||
if p.extended {
|
||||
if offsets, bonus, trimLen := p.extendedMatch(item); len(offsets) == len(p.termSets) {
|
||||
return buildResult(item, offsets, bonus, trimLen), offsets
|
||||
if offsets, bonus, trimLen, pos := p.extendedMatch(item, withPos, slab); len(offsets) == len(p.termSets) {
|
||||
return buildResult(item, offsets, bonus, trimLen), offsets, pos
|
||||
}
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
offset, bonus, trimLen := p.basicMatch(item)
|
||||
offset, bonus, trimLen, pos := p.basicMatch(item, withPos, slab)
|
||||
if sidx := offset[0]; sidx >= 0 {
|
||||
offsets := []Offset{offset}
|
||||
return buildResult(item, offsets, bonus, trimLen), offsets
|
||||
return buildResult(item, offsets, bonus, trimLen), offsets, pos
|
||||
}
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (p *Pattern) basicMatch(item *Item) (Offset, int, int) {
|
||||
func (p *Pattern) basicMatch(item *Item, withPos bool, slab *util.Slab) (Offset, int, int, *[]int) {
|
||||
input := p.prepareInput(item)
|
||||
if p.fuzzy {
|
||||
return p.iter(algo.FuzzyMatch, input, p.caseSensitive, p.forward, p.text)
|
||||
return p.iter(p.fuzzyAlgo, input, p.caseSensitive, p.forward, p.text, withPos, slab)
|
||||
}
|
||||
return p.iter(algo.ExactMatchNaive, input, p.caseSensitive, p.forward, p.text)
|
||||
return p.iter(algo.ExactMatchNaive, input, p.caseSensitive, p.forward, p.text, withPos, slab)
|
||||
}
|
||||
|
||||
func (p *Pattern) extendedMatch(item *Item) ([]Offset, int, int) {
|
||||
func (p *Pattern) extendedMatch(item *Item, withPos bool, slab *util.Slab) ([]Offset, int, int, *[]int) {
|
||||
input := p.prepareInput(item)
|
||||
offsets := []Offset{}
|
||||
var totalBonus int
|
||||
var totalScore int
|
||||
var totalTrimLen int
|
||||
var allPos *[]int
|
||||
if withPos {
|
||||
allPos = &[]int{}
|
||||
}
|
||||
for _, termSet := range p.termSets {
|
||||
var offset Offset
|
||||
var bonus int
|
||||
var currentScore int
|
||||
var trimLen int
|
||||
matched := false
|
||||
for _, term := range termSet {
|
||||
pfun := p.procFun[term.typ]
|
||||
off, pen, tLen := p.iter(pfun, input, term.caseSensitive, p.forward, term.text)
|
||||
off, score, tLen, pos := p.iter(pfun, input, term.caseSensitive, p.forward, term.text, withPos, slab)
|
||||
if sidx := off[0]; sidx >= 0 {
|
||||
if term.inv {
|
||||
continue
|
||||
}
|
||||
offset, bonus, trimLen = off, pen, tLen
|
||||
offset, currentScore, trimLen = off, score, tLen
|
||||
matched = true
|
||||
if withPos {
|
||||
if pos != nil {
|
||||
*allPos = append(*allPos, *pos...)
|
||||
} else {
|
||||
for idx := off[0]; idx < off[1]; idx++ {
|
||||
*allPos = append(*allPos, int(idx))
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
} else if term.inv {
|
||||
offset, bonus, trimLen = Offset{0, 0}, 0, 0
|
||||
offset, currentScore, trimLen = Offset{0, 0}, 0, 0
|
||||
matched = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
if matched {
|
||||
offsets = append(offsets, offset)
|
||||
totalBonus += bonus
|
||||
totalScore += currentScore
|
||||
totalTrimLen += trimLen
|
||||
}
|
||||
}
|
||||
return offsets, totalBonus, totalTrimLen
|
||||
return offsets, totalScore, totalTrimLen, allPos
|
||||
}
|
||||
|
||||
func (p *Pattern) prepareInput(item *Item) []Token {
|
||||
@@ -362,14 +377,18 @@ func (p *Pattern) prepareInput(item *Item) []Token {
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *Pattern) iter(pfun func(bool, bool, util.Chars, []rune) algo.Result,
|
||||
tokens []Token, caseSensitive bool, forward bool, pattern []rune) (Offset, int, int) {
|
||||
func (p *Pattern) iter(pfun algo.Algo, tokens []Token, caseSensitive bool, forward bool, pattern []rune, withPos bool, slab *util.Slab) (Offset, int, int, *[]int) {
|
||||
for _, part := range tokens {
|
||||
if res := pfun(caseSensitive, forward, *part.text, pattern); res.Start >= 0 {
|
||||
if res, pos := pfun(caseSensitive, forward, *part.text, pattern, withPos, slab); res.Start >= 0 {
|
||||
sidx := int32(res.Start) + part.prefixLength
|
||||
eidx := int32(res.End) + part.prefixLength
|
||||
return Offset{sidx, eidx}, res.Bonus, int(part.trimLength)
|
||||
if pos != nil {
|
||||
for idx := range *pos {
|
||||
(*pos)[idx] += int(part.prefixLength)
|
||||
}
|
||||
}
|
||||
return Offset{sidx, eidx}, res.Score, int(part.trimLength), pos
|
||||
}
|
||||
}
|
||||
return Offset{-1, -1}, 0, -1
|
||||
return Offset{-1, -1}, 0, -1, nil
|
||||
}
|
||||
|
@@ -8,6 +8,12 @@ import (
|
||||
"github.com/junegunn/fzf/src/util"
|
||||
)
|
||||
|
||||
var slab *util.Slab
|
||||
|
||||
func init() {
|
||||
slab = util.MakeSlab(slab16Size, slab32Size)
|
||||
}
|
||||
|
||||
func TestParseTermsExtended(t *testing.T) {
|
||||
terms := parseTerms(true, CaseSmart,
|
||||
"| aaa 'bbb ^ccc ddd$ !eee !'fff !^ggg !hhh$ | ^iii$ ^xxx | 'yyy | | zzz$ | !ZZZ |")
|
||||
@@ -69,26 +75,32 @@ func TestParseTermsEmpty(t *testing.T) {
|
||||
func TestExact(t *testing.T) {
|
||||
defer clearPatternCache()
|
||||
clearPatternCache()
|
||||
pattern := BuildPattern(true, true, CaseSmart, true, true,
|
||||
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, true, true,
|
||||
[]Range{}, Delimiter{}, []rune("'abc"))
|
||||
res := algo.ExactMatchNaive(
|
||||
pattern.caseSensitive, pattern.forward, util.RunesToChars([]rune("aabbcc abc")), pattern.termSets[0][0].text)
|
||||
res, pos := algo.ExactMatchNaive(
|
||||
pattern.caseSensitive, pattern.forward, util.RunesToChars([]rune("aabbcc abc")), pattern.termSets[0][0].text, true, nil)
|
||||
if res.Start != 7 || res.End != 10 {
|
||||
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
|
||||
}
|
||||
if pos != nil {
|
||||
t.Errorf("pos is expected to be nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEqual(t *testing.T) {
|
||||
defer clearPatternCache()
|
||||
clearPatternCache()
|
||||
pattern := BuildPattern(true, true, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("^AbC$"))
|
||||
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("^AbC$"))
|
||||
|
||||
match := func(str string, sidxExpected int, eidxExpected int) {
|
||||
res := algo.EqualMatch(
|
||||
pattern.caseSensitive, pattern.forward, util.RunesToChars([]rune(str)), pattern.termSets[0][0].text)
|
||||
res, pos := algo.EqualMatch(
|
||||
pattern.caseSensitive, pattern.forward, util.RunesToChars([]rune(str)), pattern.termSets[0][0].text, true, nil)
|
||||
if res.Start != sidxExpected || res.End != eidxExpected {
|
||||
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
|
||||
}
|
||||
if pos != nil {
|
||||
t.Errorf("pos is expected to be nil")
|
||||
}
|
||||
}
|
||||
match("ABC", -1, -1)
|
||||
match("AbC", 0, 3)
|
||||
@@ -97,17 +109,17 @@ func TestEqual(t *testing.T) {
|
||||
func TestCaseSensitivity(t *testing.T) {
|
||||
defer clearPatternCache()
|
||||
clearPatternCache()
|
||||
pat1 := BuildPattern(true, false, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("abc"))
|
||||
pat1 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("abc"))
|
||||
clearPatternCache()
|
||||
pat2 := BuildPattern(true, false, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("Abc"))
|
||||
pat2 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("Abc"))
|
||||
clearPatternCache()
|
||||
pat3 := BuildPattern(true, false, CaseIgnore, true, true, []Range{}, Delimiter{}, []rune("abc"))
|
||||
pat3 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseIgnore, true, true, []Range{}, Delimiter{}, []rune("abc"))
|
||||
clearPatternCache()
|
||||
pat4 := BuildPattern(true, false, CaseIgnore, true, true, []Range{}, Delimiter{}, []rune("Abc"))
|
||||
pat4 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseIgnore, true, true, []Range{}, Delimiter{}, []rune("Abc"))
|
||||
clearPatternCache()
|
||||
pat5 := BuildPattern(true, false, CaseRespect, true, true, []Range{}, Delimiter{}, []rune("abc"))
|
||||
pat5 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseRespect, true, true, []Range{}, Delimiter{}, []rune("abc"))
|
||||
clearPatternCache()
|
||||
pat6 := BuildPattern(true, false, CaseRespect, true, true, []Range{}, Delimiter{}, []rune("Abc"))
|
||||
pat6 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseRespect, true, true, []Range{}, Delimiter{}, []rune("Abc"))
|
||||
|
||||
if string(pat1.text) != "abc" || pat1.caseSensitive != false ||
|
||||
string(pat2.text) != "Abc" || pat2.caseSensitive != true ||
|
||||
@@ -120,7 +132,7 @@ func TestCaseSensitivity(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOrigTextAndTransformed(t *testing.T) {
|
||||
pattern := BuildPattern(true, true, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("jg"))
|
||||
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("jg"))
|
||||
tokens := Tokenize(util.RunesToChars([]rune("junegunn")), Delimiter{})
|
||||
trans := Transform(tokens, []Range{Range{1, 1}})
|
||||
|
||||
@@ -133,24 +145,29 @@ func TestOrigTextAndTransformed(t *testing.T) {
|
||||
transformed: trans},
|
||||
}
|
||||
pattern.extended = extended
|
||||
matches := pattern.matchChunk(&chunk, nil) // No cache
|
||||
if matches[0].item.text.ToString() != "junegunn" || string(*matches[0].item.origText) != "junegunn.choi" ||
|
||||
!reflect.DeepEqual(matches[0].item.transformed, trans) {
|
||||
matches := pattern.matchChunk(&chunk, nil, slab) // No cache
|
||||
if !(matches[0].item.text.ToString() == "junegunn" &&
|
||||
string(*matches[0].item.origText) == "junegunn.choi" &&
|
||||
reflect.DeepEqual(matches[0].item.transformed, trans)) {
|
||||
t.Error("Invalid match result", matches)
|
||||
}
|
||||
|
||||
match, offsets := pattern.MatchItem(chunk[0])
|
||||
if match.item.text.ToString() != "junegunn" || string(*match.item.origText) != "junegunn.choi" ||
|
||||
offsets[0][0] != 0 || offsets[0][1] != 5 ||
|
||||
!reflect.DeepEqual(match.item.transformed, trans) {
|
||||
t.Error("Invalid match result", match)
|
||||
match, offsets, pos := pattern.MatchItem(chunk[0], true, slab)
|
||||
if !(match.item.text.ToString() == "junegunn" &&
|
||||
string(*match.item.origText) == "junegunn.choi" &&
|
||||
offsets[0][0] == 0 && offsets[0][1] == 5 &&
|
||||
reflect.DeepEqual(match.item.transformed, trans)) {
|
||||
t.Error("Invalid match result", match, offsets, extended)
|
||||
}
|
||||
if !((*pos)[0] == 4 && (*pos)[1] == 0) {
|
||||
t.Error("Invalid pos array", *pos)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheKey(t *testing.T) {
|
||||
test := func(extended bool, patStr string, expected string, cacheable bool) {
|
||||
pat := BuildPattern(true, extended, CaseSmart, true, true, []Range{}, Delimiter{}, []rune(patStr))
|
||||
pat := BuildPattern(true, algo.FuzzyMatchV2, extended, CaseSmart, true, true, []Range{}, Delimiter{}, []rune(patStr))
|
||||
if pat.CacheKey() != expected {
|
||||
t.Errorf("Expected: %s, actual: %s", expected, pat.CacheKey())
|
||||
}
|
||||
|
@@ -19,8 +19,7 @@ type colorOffset struct {
|
||||
}
|
||||
|
||||
type rank struct {
|
||||
// byMatchLen, byBonus, ...
|
||||
points [5]uint16
|
||||
points [4]uint16
|
||||
index int32
|
||||
}
|
||||
|
||||
@@ -29,66 +28,49 @@ type Result struct {
|
||||
rank rank
|
||||
}
|
||||
|
||||
func buildResult(item *Item, offsets []Offset, bonus int, trimLen int) *Result {
|
||||
func buildResult(item *Item, offsets []Offset, score int, trimLen int) *Result {
|
||||
if len(offsets) > 1 {
|
||||
sort.Sort(ByOrder(offsets))
|
||||
}
|
||||
|
||||
result := Result{item: item, rank: rank{index: item.index}}
|
||||
|
||||
matchlen := 0
|
||||
prevEnd := 0
|
||||
minBegin := math.MaxInt32
|
||||
numChars := item.text.Length()
|
||||
minBegin := math.MaxUint16
|
||||
maxEnd := 0
|
||||
validOffsetFound := false
|
||||
for _, offset := range offsets {
|
||||
begin := int(offset[0])
|
||||
end := int(offset[1])
|
||||
if prevEnd > begin {
|
||||
begin = prevEnd
|
||||
}
|
||||
if end > prevEnd {
|
||||
prevEnd = end
|
||||
}
|
||||
if end > begin {
|
||||
if begin < minBegin {
|
||||
minBegin = begin
|
||||
}
|
||||
matchlen += end - begin
|
||||
b, e := int(offset[0]), int(offset[1])
|
||||
if b < e {
|
||||
minBegin = util.Min(b, minBegin)
|
||||
maxEnd = util.Max(e, maxEnd)
|
||||
validOffsetFound = true
|
||||
}
|
||||
}
|
||||
|
||||
for idx, criterion := range sortCriteria {
|
||||
var val uint16
|
||||
val := uint16(math.MaxUint16)
|
||||
switch criterion {
|
||||
case byMatchLen:
|
||||
if matchlen == 0 {
|
||||
val = math.MaxUint16
|
||||
} else {
|
||||
val = util.AsUint16(matchlen)
|
||||
}
|
||||
case byBonus:
|
||||
case byScore:
|
||||
// Higher is better
|
||||
val = math.MaxUint16 - util.AsUint16(bonus)
|
||||
val = math.MaxUint16 - util.AsUint16(score)
|
||||
case byLength:
|
||||
// If offsets is empty, trimLen will be 0, but we don't care
|
||||
val = util.AsUint16(trimLen)
|
||||
case byBegin:
|
||||
// We can't just look at item.offsets[0][0] because it can be an inverse term
|
||||
whitePrefixLen := 0
|
||||
for idx := 0; idx < numChars; idx++ {
|
||||
r := item.text.Get(idx)
|
||||
whitePrefixLen = idx
|
||||
if idx == minBegin || r != ' ' && r != '\t' {
|
||||
break
|
||||
if validOffsetFound {
|
||||
whitePrefixLen := 0
|
||||
for idx := 0; idx < numChars; idx++ {
|
||||
r := item.text.Get(idx)
|
||||
whitePrefixLen = idx
|
||||
if idx == minBegin || r != ' ' && r != '\t' {
|
||||
break
|
||||
}
|
||||
}
|
||||
val = util.AsUint16(minBegin - whitePrefixLen)
|
||||
}
|
||||
val = util.AsUint16(minBegin - whitePrefixLen)
|
||||
case byEnd:
|
||||
if prevEnd > 0 {
|
||||
val = util.AsUint16(1 + numChars - prevEnd)
|
||||
} else {
|
||||
// Empty offsets due to inverse terms.
|
||||
val = 1
|
||||
if validOffsetFound {
|
||||
val = util.AsUint16(1 + numChars - maxEnd)
|
||||
}
|
||||
}
|
||||
result.rank.points[idx] = val
|
||||
@@ -106,7 +88,7 @@ func (result *Result) Index() int32 {
|
||||
}
|
||||
|
||||
func minRank() rank {
|
||||
return rank{index: 0, points: [5]uint16{0, math.MaxUint16, 0, 0, 0}}
|
||||
return rank{index: 0, points: [4]uint16{math.MaxUint16, 0, 0, 0}}
|
||||
}
|
||||
|
||||
func (result *Result) colorOffsets(matchOffsets []Offset, color int, bold bool, current bool) []colorOffset {
|
||||
@@ -245,7 +227,7 @@ func (a ByRelevanceTac) Less(i, j int) bool {
|
||||
}
|
||||
|
||||
func compareRanks(irank rank, jrank rank, tac bool) bool {
|
||||
for idx := 0; idx < 5; idx++ {
|
||||
for idx := 0; idx < 4; idx++ {
|
||||
left := irank.points[idx]
|
||||
right := jrank.points[idx]
|
||||
if left < right {
|
||||
|
@@ -26,7 +26,7 @@ func TestOffsetSort(t *testing.T) {
|
||||
func TestRankComparison(t *testing.T) {
|
||||
rank := func(vals ...uint16) rank {
|
||||
return rank{
|
||||
points: [5]uint16{vals[0], 0, vals[1], vals[2], vals[3]},
|
||||
points: [4]uint16{vals[0], vals[1], vals[2], vals[3]},
|
||||
index: int32(vals[4])}
|
||||
}
|
||||
if compareRanks(rank(3, 0, 0, 0, 5), rank(2, 0, 0, 0, 7), false) ||
|
||||
@@ -47,11 +47,15 @@ func TestRankComparison(t *testing.T) {
|
||||
// Match length, string length, index
|
||||
func TestResultRank(t *testing.T) {
|
||||
// FIXME global
|
||||
sortCriteria = []criterion{byMatchLen, byBonus, byLength}
|
||||
sortCriteria = []criterion{byScore, byLength}
|
||||
|
||||
strs := [][]rune{[]rune("foo"), []rune("foobar"), []rune("bar"), []rune("baz")}
|
||||
item1 := buildResult(&Item{text: util.RunesToChars(strs[0]), index: 1}, []Offset{}, 2, 3)
|
||||
if item1.rank.points[0] != math.MaxUint16 || item1.rank.points[1] != math.MaxUint16-2 || item1.rank.points[2] != 3 || item1.item.index != 1 {
|
||||
if item1.rank.points[0] != math.MaxUint16-2 || // Bonus
|
||||
item1.rank.points[1] != 3 || // Length
|
||||
item1.rank.points[2] != 0 || // Unused
|
||||
item1.rank.points[3] != 0 || // Unused
|
||||
item1.item.index != 1 {
|
||||
t.Error(item1.rank)
|
||||
}
|
||||
// Only differ in index
|
||||
@@ -71,16 +75,16 @@ func TestResultRank(t *testing.T) {
|
||||
}
|
||||
|
||||
// Sort by relevance
|
||||
item3 := buildResult(&Item{index: 2}, []Offset{Offset{1, 3}, Offset{5, 7}}, 0, 0)
|
||||
item4 := buildResult(&Item{index: 2}, []Offset{Offset{1, 2}, Offset{6, 7}}, 0, 0)
|
||||
item5 := buildResult(&Item{index: 2}, []Offset{Offset{1, 3}, Offset{5, 7}}, 0, 0)
|
||||
item6 := buildResult(&Item{index: 2}, []Offset{Offset{1, 2}, Offset{6, 7}}, 0, 0)
|
||||
item3 := buildResult(&Item{index: 2}, []Offset{Offset{1, 3}, Offset{5, 7}}, 3, 0)
|
||||
item4 := buildResult(&Item{index: 2}, []Offset{Offset{1, 2}, Offset{6, 7}}, 4, 0)
|
||||
item5 := buildResult(&Item{index: 2}, []Offset{Offset{1, 3}, Offset{5, 7}}, 5, 0)
|
||||
item6 := buildResult(&Item{index: 2}, []Offset{Offset{1, 2}, Offset{6, 7}}, 6, 0)
|
||||
items = []*Result{item1, item2, item3, item4, item5, item6}
|
||||
sort.Sort(ByRelevance(items))
|
||||
if items[0] != item6 || items[1] != item4 ||
|
||||
items[2] != item5 || items[3] != item3 ||
|
||||
items[4] != item2 || items[5] != item1 {
|
||||
t.Error(items)
|
||||
if !(items[0] == item6 && items[1] == item5 &&
|
||||
items[2] == item4 && items[3] == item3 &&
|
||||
items[4] == item2 && items[5] == item1) {
|
||||
t.Error(items, item1, item2, item3, item4, item5, item6)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -18,6 +18,8 @@ import (
|
||||
"github.com/junegunn/go-runewidth"
|
||||
)
|
||||
|
||||
// import "github.com/pkg/profile"
|
||||
|
||||
type jumpMode int
|
||||
|
||||
const (
|
||||
@@ -61,6 +63,7 @@ type Terminal struct {
|
||||
reading bool
|
||||
jumping jumpMode
|
||||
jumpLabels string
|
||||
printer func(string)
|
||||
merger *Merger
|
||||
selected map[int32]selectedItem
|
||||
reqBox *util.EventBox
|
||||
@@ -73,6 +76,7 @@ type Terminal struct {
|
||||
initFunc func()
|
||||
suppress bool
|
||||
startChan chan bool
|
||||
slab *util.Slab
|
||||
}
|
||||
|
||||
type selectedItem struct {
|
||||
@@ -266,6 +270,7 @@ func NewTerminal(opts *Options, eventBox *util.EventBox) *Terminal {
|
||||
reading: true,
|
||||
jumping: jumpDisabled,
|
||||
jumpLabels: opts.JumpLabels,
|
||||
printer: opts.Printer,
|
||||
merger: EmptyMerger,
|
||||
selected: make(map[int32]selectedItem),
|
||||
reqBox: util.NewEventBox(),
|
||||
@@ -276,6 +281,7 @@ func NewTerminal(opts *Options, eventBox *util.EventBox) *Terminal {
|
||||
eventBox: eventBox,
|
||||
mutex: sync.Mutex{},
|
||||
suppress: true,
|
||||
slab: util.MakeSlab(slab16Size, slab32Size),
|
||||
startChan: make(chan bool, 1),
|
||||
initFunc: func() {
|
||||
C.Init(opts.Theme, opts.Black, opts.Mouse)
|
||||
@@ -343,21 +349,21 @@ func (t *Terminal) UpdateList(merger *Merger) {
|
||||
|
||||
func (t *Terminal) output() bool {
|
||||
if t.printQuery {
|
||||
fmt.Println(string(t.input))
|
||||
t.printer(string(t.input))
|
||||
}
|
||||
if len(t.expect) > 0 {
|
||||
fmt.Println(t.pressed)
|
||||
t.printer(t.pressed)
|
||||
}
|
||||
found := len(t.selected) > 0
|
||||
if !found {
|
||||
cnt := t.merger.Length()
|
||||
if cnt > 0 && cnt > t.cy {
|
||||
fmt.Println(t.current())
|
||||
t.printer(t.current())
|
||||
found = true
|
||||
}
|
||||
} else {
|
||||
for _, sel := range t.sortSelected() {
|
||||
fmt.Println(sel.text)
|
||||
t.printer(sel.text)
|
||||
}
|
||||
}
|
||||
return found
|
||||
@@ -674,14 +680,25 @@ func (t *Terminal) printHighlighted(result *Result, bold bool, col1 int, col2 in
|
||||
text := make([]rune, item.text.Length())
|
||||
copy(text, item.text.ToRunes())
|
||||
matchOffsets := []Offset{}
|
||||
var pos *[]int
|
||||
if t.merger.pattern != nil {
|
||||
_, matchOffsets = t.merger.pattern.MatchItem(item)
|
||||
_, matchOffsets, pos = t.merger.pattern.MatchItem(item, true, t.slab)
|
||||
}
|
||||
charOffsets := matchOffsets
|
||||
if pos != nil {
|
||||
charOffsets = make([]Offset, len(*pos))
|
||||
for idx, p := range *pos {
|
||||
offset := Offset{int32(p), int32(p + 1)}
|
||||
charOffsets[idx] = offset
|
||||
}
|
||||
sort.Sort(ByOrder(charOffsets))
|
||||
}
|
||||
var maxe int
|
||||
for _, offset := range matchOffsets {
|
||||
for _, offset := range charOffsets {
|
||||
maxe = util.Max(maxe, int(offset[1]))
|
||||
}
|
||||
offsets := result.colorOffsets(matchOffsets, col2, bold, current)
|
||||
|
||||
offsets := result.colorOffsets(charOffsets, col2, bold, current)
|
||||
maxWidth := t.window.Width - 3
|
||||
maxe = util.Constrain(maxe+util.Min(maxWidth/2-2, t.hscrollOff), 0, len(text))
|
||||
if overflow(text, maxWidth) {
|
||||
@@ -876,6 +893,7 @@ func (t *Terminal) current() string {
|
||||
|
||||
// Loop is called to start Terminal I/O
|
||||
func (t *Terminal) Loop() {
|
||||
// prof := profile.Start(profile.ProfilePath("/tmp/"))
|
||||
<-t.startChan
|
||||
{ // Late initialization
|
||||
intChan := make(chan os.Signal, 1)
|
||||
@@ -953,6 +971,7 @@ func (t *Terminal) Loop() {
|
||||
if code <= exitNoMatch && t.history != nil {
|
||||
t.history.append(string(t.input))
|
||||
}
|
||||
// prof.Stop()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
@@ -1011,7 +1030,7 @@ func (t *Terminal) Loop() {
|
||||
t.printPreview()
|
||||
case reqPrintQuery:
|
||||
C.Close()
|
||||
fmt.Println(string(t.input))
|
||||
t.printer(string(t.input))
|
||||
exit(exitOk)
|
||||
case reqQuit:
|
||||
C.Close()
|
||||
@@ -1062,8 +1081,9 @@ func (t *Terminal) Loop() {
|
||||
for key, ret := range t.expect {
|
||||
if keyMatch(key, event) {
|
||||
t.pressed = ret
|
||||
req(reqClose)
|
||||
break
|
||||
t.reqBox.Set(reqClose, nil)
|
||||
t.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1320,6 +1340,11 @@ func (t *Terminal) Loop() {
|
||||
if !doAction(action, mapkey) {
|
||||
continue
|
||||
}
|
||||
// Truncate the query if it's too long
|
||||
if len(t.input) > maxPatternLength {
|
||||
t.input = t.input[:maxPatternLength]
|
||||
t.cx = util.Constrain(t.cx, 0, maxPatternLength)
|
||||
}
|
||||
changed = string(previousInput) != string(t.input)
|
||||
} else {
|
||||
if mapkey == C.Rune {
|
||||
|
12
src/util/slab.go
Normal file
12
src/util/slab.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package util
|
||||
|
||||
type Slab struct {
|
||||
I16 []int16
|
||||
I32 []int32
|
||||
}
|
||||
|
||||
func MakeSlab(size16 int, size32 int) *Slab {
|
||||
return &Slab{
|
||||
I16: make([]int16, size16),
|
||||
I32: make([]int32, size32)}
|
||||
}
|
@@ -18,6 +18,22 @@ func Max(first int, second int) int {
|
||||
return second
|
||||
}
|
||||
|
||||
// Max16 returns the largest integer
|
||||
func Max16(first int16, second int16) int16 {
|
||||
if first >= second {
|
||||
return first
|
||||
}
|
||||
return second
|
||||
}
|
||||
|
||||
// Max32 returns the largest 32-bit integer
|
||||
func Max32(first int32, second int32) int32 {
|
||||
if first > second {
|
||||
return first
|
||||
}
|
||||
return second
|
||||
}
|
||||
|
||||
// Min returns the smallest integer
|
||||
func Min(first int, second int) int {
|
||||
if first <= second {
|
||||
@@ -34,14 +50,6 @@ func Min32(first int32, second int32) int32 {
|
||||
return second
|
||||
}
|
||||
|
||||
// Max32 returns the largest 32-bit integer
|
||||
func Max32(first int32, second int32) int32 {
|
||||
if first > second {
|
||||
return first
|
||||
}
|
||||
return second
|
||||
}
|
||||
|
||||
// Constrain32 limits the given 32-bit integer with the upper and lower bounds
|
||||
func Constrain32(val int32, min int32, max int32) int32 {
|
||||
if val < min {
|
||||
|
231
test/test_go.rb
231
test/test_go.rb
@@ -452,6 +452,15 @@ class TestGoFZF < TestBase
|
||||
assert_equal ['55', 'alt-z', '55'], readonce.split($/)
|
||||
end
|
||||
|
||||
def test_expect_printable_character_print_query
|
||||
tmux.send_keys "seq 1 100 | #{fzf '--expect=z --print-query'}", :Enter
|
||||
tmux.until { |lines| lines[-2].include? '100/100' }
|
||||
tmux.send_keys '55'
|
||||
tmux.until { |lines| lines[-2].include? '1/100' }
|
||||
tmux.send_keys 'z'
|
||||
assert_equal ['55', 'z', '55'], readonce.split($/)
|
||||
end
|
||||
|
||||
def test_expect_print_query_select_1
|
||||
tmux.send_keys "seq 1 100 | #{fzf '-q55 -1 --expect=alt-z --print-query'}", :Enter
|
||||
assert_equal ['55', '', '55'], readonce.split($/)
|
||||
@@ -517,162 +526,91 @@ class TestGoFZF < TestBase
|
||||
assert_equal input, `#{FZF} -f"!z" -x --tiebreak end < #{tempname}`.split($/)
|
||||
end
|
||||
|
||||
# Since 0.11.2
|
||||
def test_tiebreak_list
|
||||
input = %w[
|
||||
f-o-o-b-a-r
|
||||
foobar----
|
||||
--foobar
|
||||
----foobar
|
||||
foobar--
|
||||
--foobar--
|
||||
foobar
|
||||
def test_tiebreak_index_begin
|
||||
writelines tempname, [
|
||||
'xoxxxxxoxx',
|
||||
'xoxxxxxox',
|
||||
'xxoxxxoxx',
|
||||
'xxxoxoxxx',
|
||||
'xxxxoxox',
|
||||
' xxoxoxxx',
|
||||
]
|
||||
writelines tempname, input
|
||||
|
||||
assert_equal %w[
|
||||
foobar----
|
||||
--foobar
|
||||
----foobar
|
||||
foobar--
|
||||
--foobar--
|
||||
foobar
|
||||
f-o-o-b-a-r
|
||||
], `#{FZF} -ffb --tiebreak=index < #{tempname}`.split($/)
|
||||
assert_equal [
|
||||
'xxxxoxox',
|
||||
' xxoxoxxx',
|
||||
'xxxoxoxxx',
|
||||
'xxoxxxoxx',
|
||||
'xoxxxxxox',
|
||||
'xoxxxxxoxx',
|
||||
], `#{FZF} -foo < #{tempname}`.split($/)
|
||||
|
||||
by_length = %w[
|
||||
foobar
|
||||
--foobar
|
||||
foobar--
|
||||
foobar----
|
||||
----foobar
|
||||
--foobar--
|
||||
f-o-o-b-a-r
|
||||
]
|
||||
assert_equal by_length, `#{FZF} -ffb < #{tempname}`.split($/)
|
||||
assert_equal by_length, `#{FZF} -ffb --tiebreak=length < #{tempname}`.split($/)
|
||||
assert_equal [
|
||||
'xxxoxoxxx',
|
||||
'xxxxoxox',
|
||||
' xxoxoxxx',
|
||||
'xxoxxxoxx',
|
||||
'xoxxxxxoxx',
|
||||
'xoxxxxxox',
|
||||
], `#{FZF} -foo --tiebreak=index < #{tempname}`.split($/)
|
||||
|
||||
assert_equal %w[
|
||||
foobar
|
||||
foobar--
|
||||
--foobar
|
||||
foobar----
|
||||
--foobar--
|
||||
----foobar
|
||||
f-o-o-b-a-r
|
||||
], `#{FZF} -ffb --tiebreak=length,begin < #{tempname}`.split($/)
|
||||
# Note that --tiebreak=begin is now based on the first occurrence of the
|
||||
# first character on the pattern
|
||||
assert_equal [
|
||||
' xxoxoxxx',
|
||||
'xxxoxoxxx',
|
||||
'xxxxoxox',
|
||||
'xxoxxxoxx',
|
||||
'xoxxxxxoxx',
|
||||
'xoxxxxxox',
|
||||
], `#{FZF} -foo --tiebreak=begin < #{tempname}`.split($/)
|
||||
|
||||
assert_equal %w[
|
||||
foobar
|
||||
--foobar
|
||||
foobar--
|
||||
----foobar
|
||||
--foobar--
|
||||
foobar----
|
||||
f-o-o-b-a-r
|
||||
], `#{FZF} -ffb --tiebreak=length,end < #{tempname}`.split($/)
|
||||
|
||||
assert_equal %w[
|
||||
foobar----
|
||||
foobar--
|
||||
foobar
|
||||
--foobar
|
||||
--foobar--
|
||||
----foobar
|
||||
f-o-o-b-a-r
|
||||
], `#{FZF} -ffb --tiebreak=begin < #{tempname}`.split($/)
|
||||
|
||||
by_begin_end = %w[
|
||||
foobar
|
||||
foobar--
|
||||
foobar----
|
||||
--foobar
|
||||
--foobar--
|
||||
----foobar
|
||||
f-o-o-b-a-r
|
||||
]
|
||||
assert_equal by_begin_end, `#{FZF} -ffb --tiebreak=begin,length < #{tempname}`.split($/)
|
||||
assert_equal by_begin_end, `#{FZF} -ffb --tiebreak=begin,end < #{tempname}`.split($/)
|
||||
|
||||
assert_equal %w[
|
||||
--foobar
|
||||
----foobar
|
||||
foobar
|
||||
foobar--
|
||||
--foobar--
|
||||
foobar----
|
||||
f-o-o-b-a-r
|
||||
], `#{FZF} -ffb --tiebreak=end < #{tempname}`.split($/)
|
||||
|
||||
by_begin_end = %w[
|
||||
foobar
|
||||
--foobar
|
||||
----foobar
|
||||
foobar--
|
||||
--foobar--
|
||||
foobar----
|
||||
f-o-o-b-a-r
|
||||
]
|
||||
assert_equal by_begin_end, `#{FZF} -ffb --tiebreak=end,begin < #{tempname}`.split($/)
|
||||
assert_equal by_begin_end, `#{FZF} -ffb --tiebreak=end,length < #{tempname}`.split($/)
|
||||
assert_equal [
|
||||
' xxoxoxxx',
|
||||
'xxxoxoxxx',
|
||||
'xxxxoxox',
|
||||
'xxoxxxoxx',
|
||||
'xoxxxxxox',
|
||||
'xoxxxxxoxx',
|
||||
], `#{FZF} -foo --tiebreak=begin,length < #{tempname}`.split($/)
|
||||
end
|
||||
|
||||
def test_tiebreak_white_prefix
|
||||
def test_tiebreak_end
|
||||
writelines tempname, [
|
||||
'f o o b a r',
|
||||
' foo bar',
|
||||
' foobar',
|
||||
'----foo bar',
|
||||
'----foobar',
|
||||
' foo bar',
|
||||
' foobar--',
|
||||
' foobar',
|
||||
'--foo bar',
|
||||
'--foobar',
|
||||
'foobar',
|
||||
'xoxxxxxxxx',
|
||||
'xxoxxxxxxx',
|
||||
'xxxoxxxxxx',
|
||||
'xxxxoxxxx',
|
||||
'xxxxxoxxx',
|
||||
' xxxxoxxx',
|
||||
]
|
||||
|
||||
assert_equal [
|
||||
' foobar',
|
||||
' foobar',
|
||||
'foobar',
|
||||
' foobar--',
|
||||
'--foobar',
|
||||
'----foobar',
|
||||
' foo bar',
|
||||
' foo bar',
|
||||
'--foo bar',
|
||||
'----foo bar',
|
||||
'f o o b a r',
|
||||
], `#{FZF} -ffb < #{tempname}`.split($/)
|
||||
' xxxxoxxx',
|
||||
'xxxxoxxxx',
|
||||
'xxxxxoxxx',
|
||||
'xoxxxxxxxx',
|
||||
'xxoxxxxxxx',
|
||||
'xxxoxxxxxx',
|
||||
], `#{FZF} -fo < #{tempname}`.split($/)
|
||||
|
||||
assert_equal [
|
||||
' foobar',
|
||||
' foobar--',
|
||||
' foobar',
|
||||
'foobar',
|
||||
'--foobar',
|
||||
'----foobar',
|
||||
' foo bar',
|
||||
' foo bar',
|
||||
'--foo bar',
|
||||
'----foo bar',
|
||||
'f o o b a r',
|
||||
], `#{FZF} -ffb --tiebreak=begin < #{tempname}`.split($/)
|
||||
'xxxxxoxxx',
|
||||
' xxxxoxxx',
|
||||
'xxxxoxxxx',
|
||||
'xxxoxxxxxx',
|
||||
'xxoxxxxxxx',
|
||||
'xoxxxxxxxx',
|
||||
], `#{FZF} -fo --tiebreak=end < #{tempname}`.split($/)
|
||||
|
||||
assert_equal [
|
||||
' foobar',
|
||||
' foobar',
|
||||
'foobar',
|
||||
' foobar--',
|
||||
'--foobar',
|
||||
'----foobar',
|
||||
' foo bar',
|
||||
' foo bar',
|
||||
'--foo bar',
|
||||
'----foo bar',
|
||||
'f o o b a r',
|
||||
], `#{FZF} -ffb --tiebreak=begin,length < #{tempname}`.split($/)
|
||||
' xxxxoxxx',
|
||||
'xxxxxoxxx',
|
||||
'xxxxoxxxx',
|
||||
'xxxoxxxxxx',
|
||||
'xxoxxxxxxx',
|
||||
'xoxxxxxxxx',
|
||||
], `#{FZF} -fo --tiebreak=end,length,begin < #{tempname}`.split($/)
|
||||
end
|
||||
|
||||
def test_tiebreak_length_with_nth
|
||||
@@ -748,17 +686,6 @@ class TestGoFZF < TestBase
|
||||
assert_equal output, `#{FZF} -fi -n2,1..2 < #{tempname}`.split($/)
|
||||
end
|
||||
|
||||
def test_tiebreak_end_backward_scan
|
||||
input = %w[
|
||||
foobar-fb
|
||||
fubar
|
||||
]
|
||||
writelines tempname, input
|
||||
|
||||
assert_equal input.reverse, `#{FZF} -f fb < #{tempname}`.split($/)
|
||||
assert_equal input, `#{FZF} -f fb --tiebreak=end < #{tempname}`.split($/)
|
||||
end
|
||||
|
||||
def test_invalid_cache
|
||||
tmux.send_keys "(echo d; echo D; echo x) | #{fzf '-q d'}", :Enter
|
||||
tmux.until { |lines| lines[-2].include? '2/3' }
|
||||
|
Reference in New Issue
Block a user