mirror of
https://github.com/junegunn/fzf.git
synced 2025-08-01 12:42:01 -07:00
Compare commits
190 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
3222d62ddf | ||
|
aeb957a285 | ||
|
154cf22ffa | ||
|
51f532697e | ||
|
01b88539ba | ||
|
3066b206af | ||
|
04492bab10 | ||
|
8b0d0342d4 | ||
|
957c12e7d7 | ||
|
3b5ae0f8a2 | ||
|
1fc5659842 | ||
|
1acd2adce2 | ||
|
1bc223d4b3 | ||
|
bef405bfa5 | ||
|
0612074abe | ||
|
3bf51d8362 | ||
|
2c8479a7c5 | ||
|
8c8b5b313e | ||
|
66d55fd893 | ||
|
7fa5e6c861 | ||
|
00f96aae76 | ||
|
a749e6bd16 | ||
|
791076d366 | ||
|
37f43fbb35 | ||
|
401a5fd5ff | ||
|
1854922f0c | ||
|
2fc7c18747 | ||
|
8ef2420677 | ||
|
cf6f4d74c4 | ||
|
f44d40f6b4 | ||
|
1c81a58127 | ||
|
9baf7c4874 | ||
|
22b089e47e | ||
|
b166f18220 | ||
|
68600f6ecf | ||
|
4d4447779f | ||
|
639de4c27b | ||
|
d87390934e | ||
|
411ec2e557 | ||
|
f025602841 | ||
|
f958c9daf5 | ||
|
b86838c2b0 | ||
|
1f7d1f9b15 | ||
|
f8fdf9618a | ||
|
827a83efbc | ||
|
3e88849386 | ||
|
608c416207 | ||
|
62f6ff9d6c | ||
|
37dc273148 | ||
|
f7f01d109e | ||
|
01ee335521 | ||
|
0e0de29b87 | ||
|
babf877fd6 | ||
|
935272824e | ||
|
3a9532c8fd | ||
|
c4c92142a6 | ||
|
d4b6338102 | ||
|
8df7d962e6 | ||
|
41e916a511 | ||
|
d9c8a9a880 | ||
|
ddc7bb9064 | ||
|
1d4057c209 | ||
|
822b86942c | ||
|
1e74dbb937 | ||
|
7cef92fffe | ||
|
42e4992f06 | ||
|
a6066175c6 | ||
|
27444d6b1e | ||
|
d6a99c0391 | ||
|
f787f7e651 | ||
|
a7c9c08371 | ||
|
fccc93176b | ||
|
6439a138fe | ||
|
a9a29dff4f | ||
|
6a52f8b8dd | ||
|
a1049328d6 | ||
|
5c2b96bd00 | ||
|
c36413fdf6 | ||
|
52cf5af91c | ||
|
3a4e053af7 | ||
|
049bc9ec68 | ||
|
b461a555b8 | ||
|
0f87b2d1e1 | ||
|
0fb5b76c0d | ||
|
0c918dd87a | ||
|
05299a0fee | ||
|
b36b0a91f5 | ||
|
6081eac58a | ||
|
942ba749c7 | ||
|
f941012687 | ||
|
fed5e5d5af | ||
|
b864885753 | ||
|
64747c2324 | ||
|
34965edcda | ||
|
bd4377084d | ||
|
38a2076b89 | ||
|
5759d50d4a | ||
|
e455836cc9 | ||
|
8a90f26c8a | ||
|
24e1fabf2e | ||
|
c39c039e15 | ||
|
07f176f426 | ||
|
19339e3a6d | ||
|
3e1d6a7bcf | ||
|
2bbc12063c | ||
|
b8737b724b | ||
|
d91c3a2f5e | ||
|
fe5db5aadc | ||
|
cf9c957c66 | ||
|
68b60c6d19 | ||
|
3a644b16a4 | ||
|
95b34de339 | ||
|
6a431cbf49 | ||
|
56fb2f00b3 | ||
|
1c86aaf342 | ||
|
cfc0b18eaa | ||
|
412c211655 | ||
|
923feb69ab | ||
|
92dba7035a | ||
|
b8a3ba16a2 | ||
|
cd5e4d9402 | ||
|
f074709fc9 | ||
|
e0b29e437b | ||
|
bdb94fba7d | ||
|
2f364c62f4 | ||
|
7ed9f83662 | ||
|
f498a9b3fb | ||
|
13330738b8 | ||
|
e53535cc61 | ||
|
c62fc5e75c | ||
|
70245ad98c | ||
|
6d235bceee | ||
|
4adebfc856 | ||
|
faccc0a410 | ||
|
9078688baf | ||
|
9bd8b1d25f | ||
|
dd4be1da38 | ||
|
66f86e1870 | ||
|
4ab75b68dc | ||
|
73cb70dbb3 | ||
|
d082cccb6d | ||
|
88a80e3c2c | ||
|
24516bcf4d | ||
|
b4c4a642ed | ||
|
0231617857 | ||
|
7f64fba80f | ||
|
633aec38f5 | ||
|
d1b402a23c | ||
|
35a9aff8e1 | ||
|
988c9bd9be | ||
|
095f31b316 | ||
|
d86cee2a69 | ||
|
e986f20a85 | ||
|
c727ba1d99 | ||
|
bb70923cd8 | ||
|
772fa42dcb | ||
|
85ef3263fc | ||
|
4bde8de63f | ||
|
654a7df9b0 | ||
|
c3aa836ec0 | ||
|
95764bef6f | ||
|
63dbf48546 | ||
|
e2401350a3 | ||
|
e867355b2a | ||
|
b28c14b93a | ||
|
879ead210f | ||
|
2f6d23b91e | ||
|
5f63a7b587 | ||
|
d9ce797d88 | ||
|
12230f8043 | ||
|
0c8de1ca44 | ||
|
89687105f4 | ||
|
74d1694be9 | ||
|
935e986be5 | ||
|
e5ac2ebd7c | ||
|
8d6e13bf94 | ||
|
2ca704405a | ||
|
802c1c2937 | ||
|
3cb5fef6b6 | ||
|
6da2e0aa1e | ||
|
24f3ec7f33 | ||
|
a57b375b41 | ||
|
6cc9d53978 | ||
|
df32c05833 | ||
|
c0652adf4c | ||
|
6ea760a336 | ||
|
f704b94603 | ||
|
444a67cafa | ||
|
f91cbd5688 | ||
|
3073ca3e5a |
29
.github/ISSUE_TEMPLATE.md
vendored
Normal file
29
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
<!-- Check all that apply [x] -->
|
||||||
|
- Category
|
||||||
|
- [ ] fzf binary
|
||||||
|
- [ ] fzf-tmux script
|
||||||
|
- [ ] Key bindings
|
||||||
|
- [ ] Completion
|
||||||
|
- [ ] Vim
|
||||||
|
- [ ] Neovim
|
||||||
|
- [ ] Etc.
|
||||||
|
- OS
|
||||||
|
- [ ] Linux
|
||||||
|
- [ ] Mac OS X
|
||||||
|
- [ ] Windows
|
||||||
|
- [ ] Etc.
|
||||||
|
- Shell
|
||||||
|
- [ ] bash
|
||||||
|
- [ ] zsh
|
||||||
|
- [ ] fish
|
||||||
|
|
||||||
|
<!--
|
||||||
|
### Before submitting
|
||||||
|
|
||||||
|
- Make sure that you have the latest version of fzf
|
||||||
|
- If you use tmux, make sure $TERM is set to screen or screen-256color
|
||||||
|
- For more Vim stuff, check out https://github.com/junegunn/fzf.vim
|
||||||
|
|
||||||
|
Describe your problem or suggestion from here ...
|
||||||
|
-->
|
||||||
|
|
104
CHANGELOG.md
104
CHANGELOG.md
@@ -1,6 +1,110 @@
|
|||||||
CHANGELOG
|
CHANGELOG
|
||||||
=========
|
=========
|
||||||
|
|
||||||
|
0.15.4
|
||||||
|
------
|
||||||
|
- Added support for range expression in preview and execute action
|
||||||
|
- e.g. `ls -l | fzf --preview="echo user={3} when={-4..-2}; cat {-1}" --header-lines=1`
|
||||||
|
- `{q}` will be replaced to the single-quoted string of the current query
|
||||||
|
- Fixed to properly handle unicode whitespace characters
|
||||||
|
- Display scroll indicator in preview window
|
||||||
|
- Inverse search term will use exact matcher by default
|
||||||
|
- This is a breaking change, but I believe it makes much more sense. It is
|
||||||
|
almost impossible to predict which entries will be filtered out due to
|
||||||
|
a fuzzy inverse term. You can still perform inverse-fuzzy-match by
|
||||||
|
prepending `!'` to the term.
|
||||||
|
|
||||||
|
0.15.3
|
||||||
|
------
|
||||||
|
- Added support for more ANSI attributes: dim, underline, blink, and reverse
|
||||||
|
- Fixed race condition in `toggle-preview`
|
||||||
|
|
||||||
|
0.15.2
|
||||||
|
------
|
||||||
|
- Preview window is now scrollable
|
||||||
|
- With mouse scroll or with bindable actions
|
||||||
|
- `preview-up`
|
||||||
|
- `preview-down`
|
||||||
|
- `preview-page-up`
|
||||||
|
- `preview-page-down`
|
||||||
|
- Updated ANSI processor to support high intensity colors and ignore
|
||||||
|
some VT100-related escape sequences
|
||||||
|
|
||||||
|
0.15.1
|
||||||
|
------
|
||||||
|
- Fixed panic when the pattern occurs after 2^15-th column
|
||||||
|
- Fixed rendering delay when displaying extremely long lines
|
||||||
|
|
||||||
|
0.15.0
|
||||||
|
------
|
||||||
|
- Improved fuzzy search algorithm
|
||||||
|
- Added `--algo=[v1|v2]` option so one can still choose the old algorithm
|
||||||
|
which values the search performance over the quality of the result
|
||||||
|
- Advanced scoring criteria
|
||||||
|
- `--read0` to read input delimited by ASCII NUL character
|
||||||
|
- `--print0` to print output delimited by ASCII NUL character
|
||||||
|
|
||||||
|
0.13.5
|
||||||
|
------
|
||||||
|
- Memory and performance optimization
|
||||||
|
- Up to 2x performance with half the amount of memory
|
||||||
|
|
||||||
|
0.13.4
|
||||||
|
------
|
||||||
|
- Performance optimization
|
||||||
|
- Memory footprint for ascii string is reduced by 60%
|
||||||
|
- 15 to 20% improvement of query performance
|
||||||
|
- Up to 45% better performance of `--nth` with non-regex delimiters
|
||||||
|
- Fixed invalid handling of `hidden` property of `--preview-window`
|
||||||
|
|
||||||
|
0.13.3
|
||||||
|
------
|
||||||
|
- Fixed duplicate rendering of the last line in preview window
|
||||||
|
|
||||||
|
0.13.2
|
||||||
|
------
|
||||||
|
- Fixed race condition where preview window is not properly cleared
|
||||||
|
|
||||||
|
0.13.1
|
||||||
|
------
|
||||||
|
- Fixed UI issue with large `--preview` output with many ANSI codes
|
||||||
|
|
||||||
|
0.13.0
|
||||||
|
------
|
||||||
|
- Added preview feature
|
||||||
|
- `--preview CMD`
|
||||||
|
- `--preview-window POS[:SIZE][:hidden]`
|
||||||
|
- `{}` in execute action is now replaced to the single-quoted (instead of
|
||||||
|
double-quoted) string of the current line
|
||||||
|
- Fixed to ignore control characters for bracketed paste mode
|
||||||
|
|
||||||
|
0.12.2
|
||||||
|
------
|
||||||
|
|
||||||
|
- 256-color capability detection does not require `256` in `$TERM`
|
||||||
|
- Added `print-query` action
|
||||||
|
- More named keys for binding; <kbd>F1</kbd> ~ <kbd>F10</kbd>,
|
||||||
|
<kbd>ALT-/</kbd>, <kbd>ALT-space</kbd>, and <kbd>ALT-enter</kbd>
|
||||||
|
- Added `jump` and `jump-accept` actions that implement [EasyMotion][em]-like
|
||||||
|
movement
|
||||||
|
![][jump]
|
||||||
|
|
||||||
|
[em]: https://github.com/easymotion/vim-easymotion
|
||||||
|
[jump]: https://cloud.githubusercontent.com/assets/700826/15367574/b3999dc4-1d64-11e6-85da-28ceeb1a9bc2.png
|
||||||
|
|
||||||
|
0.12.1
|
||||||
|
------
|
||||||
|
|
||||||
|
- Ranking algorithm introduced in 0.12.0 is now universally applied
|
||||||
|
- Fixed invalid cache reference in exact mode
|
||||||
|
- Fixes and improvements in Vim plugin and shell extensions
|
||||||
|
|
||||||
|
0.12.0
|
||||||
|
------
|
||||||
|
|
||||||
|
- Enhanced ranking algorithm
|
||||||
|
- Minor bug fixes
|
||||||
|
|
||||||
0.11.4
|
0.11.4
|
||||||
------
|
------
|
||||||
|
|
||||||
|
122
README.md
122
README.md
@@ -10,18 +10,15 @@ Pros
|
|||||||
|
|
||||||
- No dependencies
|
- No dependencies
|
||||||
- Blazingly fast
|
- Blazingly fast
|
||||||
- e.g. `locate / | fzf`
|
|
||||||
- Flexible layout
|
|
||||||
- Runs in fullscreen or in horizontal/vertical split using tmux
|
|
||||||
- The most comprehensive feature set
|
- The most comprehensive feature set
|
||||||
- Try `fzf --help` and be surprised
|
- Flexible layout using tmux panes
|
||||||
- Batteries included
|
- Batteries included
|
||||||
- Vim/Neovim plugin, key bindings and fuzzy auto-completion
|
- Vim/Neovim plugin, key bindings and fuzzy auto-completion
|
||||||
|
|
||||||
Installation
|
Installation
|
||||||
------------
|
------------
|
||||||
|
|
||||||
fzf project consists of the followings:
|
fzf project consists of the following components:
|
||||||
|
|
||||||
- `fzf` executable
|
- `fzf` executable
|
||||||
- `fzf-tmux` script for launching fzf in a tmux pane
|
- `fzf-tmux` script for launching fzf in a tmux pane
|
||||||
@@ -30,12 +27,12 @@ fzf project consists of the followings:
|
|||||||
- Fuzzy auto-completion (bash, zsh)
|
- Fuzzy auto-completion (bash, zsh)
|
||||||
- Vim/Neovim plugin
|
- Vim/Neovim plugin
|
||||||
|
|
||||||
You can [download fzf executable][bin] alone, but it's recommended that you
|
You can [download fzf executable][bin] alone if you don't need the extra
|
||||||
install the extra stuff using the attached install script.
|
stuff.
|
||||||
|
|
||||||
[bin]: https://github.com/junegunn/fzf-bin/releases
|
[bin]: https://github.com/junegunn/fzf-bin/releases
|
||||||
|
|
||||||
#### Using git (recommended)
|
### Using git
|
||||||
|
|
||||||
Clone this repository and run
|
Clone this repository and run
|
||||||
[install](https://github.com/junegunn/fzf/blob/master/install) script.
|
[install](https://github.com/junegunn/fzf/blob/master/install) script.
|
||||||
@@ -45,7 +42,7 @@ git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
|
|||||||
~/.fzf/install
|
~/.fzf/install
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Using Homebrew
|
### Using Homebrew
|
||||||
|
|
||||||
On OS X, you can use [Homebrew](http://brew.sh/) to install fzf.
|
On OS X, you can use [Homebrew](http://brew.sh/) to install fzf.
|
||||||
|
|
||||||
@@ -56,26 +53,30 @@ brew install fzf
|
|||||||
/usr/local/opt/fzf/install
|
/usr/local/opt/fzf/install
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Install as Vim plugin
|
### Vim plugin
|
||||||
|
|
||||||
Once you have cloned the repository, add the following line to your .vimrc.
|
You can manually add the directory to `&runtimepath` as follows,
|
||||||
|
|
||||||
```vim
|
```vim
|
||||||
|
" If installed using git
|
||||||
set rtp+=~/.fzf
|
set rtp+=~/.fzf
|
||||||
|
|
||||||
|
" If installed using Homebrew
|
||||||
|
set rtp+=/usr/local/opt/fzf
|
||||||
```
|
```
|
||||||
|
|
||||||
Or you can have [vim-plug](https://github.com/junegunn/vim-plug) manage fzf
|
But it's recommended that you use a plugin manager like
|
||||||
(recommended):
|
[vim-plug](https://github.com/junegunn/vim-plug).
|
||||||
|
|
||||||
```vim
|
```vim
|
||||||
Plug 'junegunn/fzf', { 'dir': '~/.fzf', 'do': './install --all' }
|
Plug 'junegunn/fzf', { 'dir': '~/.fzf', 'do': './install --all' }
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Upgrading fzf
|
### Upgrading fzf
|
||||||
|
|
||||||
fzf is being actively developed and you might want to upgrade it once in a
|
fzf is being actively developed and you might want to upgrade it once in a
|
||||||
while. Please follow the instruction below depending on the installation
|
while. Please follow the instruction below depending on the installation
|
||||||
method.
|
method used.
|
||||||
|
|
||||||
- git: `cd ~/.fzf && git pull && ./install`
|
- git: `cd ~/.fzf && git pull && ./install`
|
||||||
- brew: `brew update; brew reinstall fzf`
|
- brew: `brew update; brew reinstall fzf`
|
||||||
@@ -112,16 +113,16 @@ vim $(fzf)
|
|||||||
|
|
||||||
Unless otherwise specified, fzf starts in "extended-search mode" where you can
|
Unless otherwise specified, fzf starts in "extended-search mode" where you can
|
||||||
type in multiple search terms delimited by spaces. e.g. `^music .mp3$ sbtrkt
|
type in multiple search terms delimited by spaces. e.g. `^music .mp3$ sbtrkt
|
||||||
!rmx`
|
!fire`
|
||||||
|
|
||||||
| Token | Match type | Description |
|
| Token | Match type | Description |
|
||||||
| -------- | -------------------- | -------------------------------- |
|
| -------- | -------------------------- | --------------------------------- |
|
||||||
| `sbtrkt` | fuzzy-match | Items that match `sbtrkt` |
|
| `sbtrkt` | fuzzy-match | Items that match `sbtrkt` |
|
||||||
| `^music` | prefix-exact-match | Items that start with `music` |
|
| `^music` | prefix-exact-match | Items that start with `music` |
|
||||||
| `.mp3$` | suffix-exact-match | Items that end with `.mp3` |
|
| `.mp3$` | suffix-exact-match | Items that end with `.mp3` |
|
||||||
| `'wild` | exact-match (quoted) | Items that include `wild` |
|
| `'wild` | exact-match (quoted) | Items that include `wild` |
|
||||||
| `!rmx` | inverse-fuzzy-match | Items that do not match `rmx` |
|
| `!fire` | inverse-exact-match | Items that do not include `fire` |
|
||||||
| `!'fire` | inverse-exact-match | Items that do not include `fire` |
|
| `!.mp3$` | inverse-suffix-exact-match | Items that do not end with `.mp3` |
|
||||||
|
|
||||||
If you don't prefer fuzzy matching and do not wish to "quote" every word,
|
If you don't prefer fuzzy matching and do not wish to "quote" every word,
|
||||||
start fzf with `-e` or `--exact` option. Note that when `--exact` is set,
|
start fzf with `-e` or `--exact` option. Note that when `--exact` is set,
|
||||||
@@ -151,27 +152,6 @@ Many useful examples can be found on [the wiki
|
|||||||
page](https://github.com/junegunn/fzf/wiki/examples). Feel free to add your
|
page](https://github.com/junegunn/fzf/wiki/examples). Feel free to add your
|
||||||
own as well.
|
own as well.
|
||||||
|
|
||||||
Key bindings for command line
|
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
The install script will setup the following key bindings for bash, zsh, and
|
|
||||||
fish.
|
|
||||||
|
|
||||||
- `CTRL-T` - Paste the selected files and directories onto the command line
|
|
||||||
- Set `FZF_CTRL_T_COMMAND` to override the default command
|
|
||||||
- `CTRL-R` - Paste the selected command from history onto the command line
|
|
||||||
- Sort is disabled by default to respect chronological ordering
|
|
||||||
- Press `CTRL-R` again to toggle sort
|
|
||||||
- `ALT-C` - cd into the selected directory
|
|
||||||
|
|
||||||
If you're on a tmux session, fzf will start in a split pane. You may disable
|
|
||||||
this tmux integration by setting `FZF_TMUX` to 0, or change the height of the
|
|
||||||
pane with `FZF_TMUX_HEIGHT` (e.g. `20`, `50%`).
|
|
||||||
|
|
||||||
If you use vi mode on bash, you need to add `set -o vi` *before* `source
|
|
||||||
~/.fzf.bash` in your .bashrc, so that it correctly sets up key bindings for vi
|
|
||||||
mode.
|
|
||||||
|
|
||||||
`fzf-tmux` script
|
`fzf-tmux` script
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
@@ -191,6 +171,31 @@ cat /usr/share/dict/words | fzf-tmux -l 20% --multi --reverse
|
|||||||
It will still work even when you're not on tmux, silently ignoring `-[udlr]`
|
It will still work even when you're not on tmux, silently ignoring `-[udlr]`
|
||||||
options, so you can invariably use `fzf-tmux` in your scripts.
|
options, so you can invariably use `fzf-tmux` in your scripts.
|
||||||
|
|
||||||
|
Key bindings for command line
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
The install script will setup the following key bindings for bash, zsh, and
|
||||||
|
fish.
|
||||||
|
|
||||||
|
- `CTRL-T` - Paste the selected files and directories onto the command line
|
||||||
|
- Set `FZF_CTRL_T_COMMAND` to override the default command
|
||||||
|
- Set `FZF_CTRL_T_OPTS` to pass additional options
|
||||||
|
- `CTRL-R` - Paste the selected command from history onto the command line
|
||||||
|
- Sort is disabled by default to respect chronological ordering
|
||||||
|
- Press `CTRL-R` again to toggle sort
|
||||||
|
- Set `FZF_CTRL_R_OPTS` to pass additional options
|
||||||
|
- `ALT-C` - cd into the selected directory
|
||||||
|
- Set `FZF_ALT_C_COMMAND` to override the default command
|
||||||
|
- Set `FZF_ALT_C_OPTS` to pass additional options
|
||||||
|
|
||||||
|
If you're on a tmux session, fzf will start in a split pane. You may disable
|
||||||
|
this tmux integration by setting `FZF_TMUX` to 0, or change the height of the
|
||||||
|
pane with `FZF_TMUX_HEIGHT` (e.g. `20`, `50%`).
|
||||||
|
|
||||||
|
If you use vi mode on bash, you need to add `set -o vi` *before* `source
|
||||||
|
~/.fzf.bash` in your .bashrc, so that it correctly sets up key bindings for vi
|
||||||
|
mode.
|
||||||
|
|
||||||
Fuzzy completion for bash and zsh
|
Fuzzy completion for bash and zsh
|
||||||
---------------------------------
|
---------------------------------
|
||||||
|
|
||||||
@@ -300,7 +305,7 @@ If you have set up fzf for Vim, `:FZF` command will be added.
|
|||||||
:FZF ~
|
:FZF ~
|
||||||
|
|
||||||
" With options
|
" With options
|
||||||
:FZF --no-sort -m /tmp
|
:FZF --no-sort --reverse --inline-info /tmp
|
||||||
|
|
||||||
" Bang version starts in fullscreen instead of using tmux pane or Neovim split
|
" Bang version starts in fullscreen instead of using tmux pane or Neovim split
|
||||||
:FZF!
|
:FZF!
|
||||||
@@ -316,10 +321,10 @@ customization.
|
|||||||
|
|
||||||
[fzf-config]: https://github.com/junegunn/fzf/wiki/Configuring-FZF-command-(vim)
|
[fzf-config]: https://github.com/junegunn/fzf/wiki/Configuring-FZF-command-(vim)
|
||||||
|
|
||||||
#### `fzf#run([options])`
|
#### `fzf#run`
|
||||||
|
|
||||||
For more advanced uses, you can use `fzf#run()` function with the following
|
For more advanced uses, you can use `fzf#run([options])` function with the
|
||||||
options.
|
following options.
|
||||||
|
|
||||||
| Option name | Type | Description |
|
| Option name | Type | Description |
|
||||||
| -------------------------- | ------------- | ---------------------------------------------------------------- |
|
| -------------------------- | ------------- | ---------------------------------------------------------------- |
|
||||||
@@ -338,12 +343,23 @@ options.
|
|||||||
Examples can be found on [the wiki
|
Examples can be found on [the wiki
|
||||||
page](https://github.com/junegunn/fzf/wiki/Examples-(vim)).
|
page](https://github.com/junegunn/fzf/wiki/Examples-(vim)).
|
||||||
|
|
||||||
|
#### `fzf#wrap`
|
||||||
|
|
||||||
|
`fzf#wrap([name string,] [opts dict,] [fullscreen boolean])` is a helper
|
||||||
|
function that decorates the options dictionary so that it understands
|
||||||
|
`g:fzf_layout`, `g:fzf_action`, and `g:fzf_history_dir` like `:FZF`.
|
||||||
|
|
||||||
|
```vim
|
||||||
|
command! -bang MyStuff
|
||||||
|
\ call fzf#run(fzf#wrap('my-stuff', {'dir': '~/my-stuff'}, <bang>0))
|
||||||
|
```
|
||||||
|
|
||||||
Tips
|
Tips
|
||||||
----
|
----
|
||||||
|
|
||||||
#### Rendering issues
|
#### Rendering issues
|
||||||
|
|
||||||
If you have any rendering issues, check the followings:
|
If you have any rendering issues, check the following:
|
||||||
|
|
||||||
1. Make sure `$TERM` is correctly set. fzf will use 256-color only if it
|
1. Make sure `$TERM` is correctly set. fzf will use 256-color only if it
|
||||||
contains `256` (e.g. `xterm-256color`)
|
contains `256` (e.g. `xterm-256color`)
|
||||||
@@ -375,6 +391,12 @@ fzf
|
|||||||
export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
|
export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you don't want to exclude hidden files, use the following command:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
export FZF_DEFAULT_COMMAND='ag --hidden --ignore .git -g ""'
|
||||||
|
```
|
||||||
|
|
||||||
#### `git ls-tree` for fast traversal
|
#### `git ls-tree` for fast traversal
|
||||||
|
|
||||||
If you're running fzf in a large git repository, `git ls-tree` can boost up the
|
If you're running fzf in a large git repository, `git ls-tree` can boost up the
|
||||||
|
101
bin/fzf-tmux
101
bin/fzf-tmux
@@ -2,24 +2,51 @@
|
|||||||
# fzf-tmux: starts fzf in a tmux pane
|
# fzf-tmux: starts fzf in a tmux pane
|
||||||
# usage: fzf-tmux [-u|-d [HEIGHT[%]]] [-l|-r [WIDTH[%]]] [--] [FZF OPTIONS]
|
# usage: fzf-tmux [-u|-d [HEIGHT[%]]] [-l|-r [WIDTH[%]]] [--] [FZF OPTIONS]
|
||||||
|
|
||||||
|
fail() {
|
||||||
|
>&2 echo "$1"
|
||||||
|
exit 2
|
||||||
|
}
|
||||||
|
|
||||||
|
fzf="$(command -v fzf 2> /dev/null)" || fzf="$(dirname "$0")/fzf"
|
||||||
|
[[ -x "$fzf" ]] || fail 'fzf executable not found'
|
||||||
|
|
||||||
args=()
|
args=()
|
||||||
opt=""
|
opt=""
|
||||||
skip=""
|
skip=""
|
||||||
swap=""
|
swap=""
|
||||||
close=""
|
close=""
|
||||||
term=""
|
term=""
|
||||||
while [ $# -gt 0 ]; do
|
[[ -n "$LINES" ]] && lines=$LINES || lines=$(tput lines)
|
||||||
|
|
||||||
|
help() {
|
||||||
|
>&2 echo 'usage: fzf-tmux [-u|-d [HEIGHT[%]]] [-l|-r [WIDTH[%]]] [--] [FZF OPTIONS]
|
||||||
|
|
||||||
|
Layout
|
||||||
|
-u [HEIGHT[%]] Split above (up)
|
||||||
|
-d [HEIGHT[%]] Split below (down)
|
||||||
|
-l [WIDTH[%]] Split left
|
||||||
|
-r [WIDTH[%]] Split right
|
||||||
|
|
||||||
|
(default: -d 50%)
|
||||||
|
'
|
||||||
|
exit
|
||||||
|
}
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
arg="$1"
|
arg="$1"
|
||||||
case "$arg" in
|
shift
|
||||||
|
[[ -z "$skip" ]] && case "$arg" in
|
||||||
-)
|
-)
|
||||||
term=1
|
term=1
|
||||||
;;
|
;;
|
||||||
|
--help)
|
||||||
|
help
|
||||||
|
;;
|
||||||
|
--version)
|
||||||
|
echo "fzf-tmux (with fzf $("$fzf" --version))"
|
||||||
|
exit
|
||||||
|
;;
|
||||||
-w*|-h*|-d*|-u*|-r*|-l*)
|
-w*|-h*|-d*|-u*|-r*|-l*)
|
||||||
if [ -n "$skip" ]; then
|
|
||||||
args+=("$1")
|
|
||||||
shift
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
if [[ "$arg" =~ ^.[lrw] ]]; then
|
if [[ "$arg" =~ ^.[lrw] ]]; then
|
||||||
opt="-h"
|
opt="-h"
|
||||||
if [[ "$arg" =~ ^.l ]]; then
|
if [[ "$arg" =~ ^.l ]]; then
|
||||||
@@ -35,35 +62,33 @@ while [ $# -gt 0 ]; do
|
|||||||
close="; tmux swap-pane -D"
|
close="; tmux swap-pane -D"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
if [ ${#arg} -gt 2 ]; then
|
if [[ ${#arg} -gt 2 ]]; then
|
||||||
size="${arg:2}"
|
size="${arg:2}"
|
||||||
else
|
else
|
||||||
shift
|
|
||||||
if [[ "$1" =~ ^[0-9]+%?$ ]]; then
|
if [[ "$1" =~ ^[0-9]+%?$ ]]; then
|
||||||
size="$1"
|
size="$1"
|
||||||
else
|
|
||||||
[ -n "$1" -a "$1" != "--" ] && args+=("$1")
|
|
||||||
shift
|
shift
|
||||||
|
else
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$size" =~ %$ ]]; then
|
if [[ "$size" =~ %$ ]]; then
|
||||||
size=${size:0:((${#size}-1))}
|
size=${size:0:((${#size}-1))}
|
||||||
if [ -n "$swap" ]; then
|
if [[ -n "$swap" ]]; then
|
||||||
opt="$opt -p $(( 100 - size ))"
|
opt="$opt -p $(( 100 - size ))"
|
||||||
else
|
else
|
||||||
opt="$opt -p $size"
|
opt="$opt -p $size"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
if [ -n "$swap" ]; then
|
if [[ -n "$swap" ]]; then
|
||||||
if [[ "$arg" =~ ^.l ]]; then
|
if [[ "$arg" =~ ^.l ]]; then
|
||||||
[ -n "$COLUMNS" ] && max=$COLUMNS || max=$(tput cols)
|
[[ -n "$COLUMNS" ]] && max=$COLUMNS || max=$(tput cols)
|
||||||
else
|
else
|
||||||
[ -n "$LINES" ] && max=$LINES || max=$(tput lines)
|
max=$lines
|
||||||
fi
|
fi
|
||||||
size=$(( max - size ))
|
size=$(( max - size ))
|
||||||
[ $size -lt 0 ] && size=0
|
[[ $size -lt 0 ]] && size=0
|
||||||
opt="$opt -l $size"
|
opt="$opt -l $size"
|
||||||
else
|
else
|
||||||
opt="$opt -l $size"
|
opt="$opt -l $size"
|
||||||
@@ -74,16 +99,17 @@ while [ $# -gt 0 ]; do
|
|||||||
# "--" can be used to separate fzf-tmux options from fzf options to
|
# "--" can be used to separate fzf-tmux options from fzf options to
|
||||||
# avoid conflicts
|
# avoid conflicts
|
||||||
skip=1
|
skip=1
|
||||||
|
continue
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
args+=("$1")
|
args+=("$arg")
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
shift
|
[[ -n "$skip" ]] && args+=("$arg")
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ -z "$TMUX" ]; then
|
if [[ -z "$TMUX" ]] || [[ "$lines" -le 15 ]]; then
|
||||||
fzf "${args[@]}"
|
"$fzf" "${args[@]}"
|
||||||
exit $?
|
exit $?
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -107,7 +133,7 @@ cleanup() {
|
|||||||
rm -f $argsf $fifo1 $fifo2 $fifo3
|
rm -f $argsf $fifo1 $fifo2 $fifo3
|
||||||
|
|
||||||
# Remove temp window if we were zoomed
|
# Remove temp window if we were zoomed
|
||||||
if [ -n "$zoomed" ]; then
|
if [[ -n "$zoomed" ]]; then
|
||||||
tmux swap-pane -t $original_window \; \
|
tmux swap-pane -t $original_window \; \
|
||||||
select-window -t $original_window \; \
|
select-window -t $original_window \; \
|
||||||
kill-window -t $tmp_window \; \
|
kill-window -t $tmp_window \; \
|
||||||
@@ -116,16 +142,9 @@ cleanup() {
|
|||||||
}
|
}
|
||||||
trap cleanup EXIT SIGINT SIGTERM
|
trap cleanup EXIT SIGINT SIGTERM
|
||||||
|
|
||||||
fail() {
|
|
||||||
>&2 echo "$1"
|
|
||||||
exit 2
|
|
||||||
}
|
|
||||||
fzf="$(which fzf 2> /dev/null)" || fzf="$(dirname "$0")/fzf"
|
|
||||||
[ -x "$fzf" ] || fail "fzf executable not found"
|
|
||||||
|
|
||||||
envs="env TERM=$TERM "
|
envs="env TERM=$TERM "
|
||||||
[ -n "$FZF_DEFAULT_OPTS" ] && envs="$envs FZF_DEFAULT_OPTS=$(printf %q "$FZF_DEFAULT_OPTS")"
|
[[ -n "$FZF_DEFAULT_OPTS" ]] && envs="$envs FZF_DEFAULT_OPTS=$(printf %q "$FZF_DEFAULT_OPTS")"
|
||||||
[ -n "$FZF_DEFAULT_COMMAND" ] && envs="$envs FZF_DEFAULT_COMMAND=$(printf %q "$FZF_DEFAULT_COMMAND")"
|
[[ -n "$FZF_DEFAULT_COMMAND" ]] && envs="$envs FZF_DEFAULT_COMMAND=$(printf %q "$FZF_DEFAULT_COMMAND")"
|
||||||
|
|
||||||
mkfifo -m o+w $fifo2
|
mkfifo -m o+w $fifo2
|
||||||
mkfifo -m o+w $fifo3
|
mkfifo -m o+w $fifo3
|
||||||
@@ -133,22 +152,26 @@ mkfifo -m o+w $fifo3
|
|||||||
# Build arguments to fzf
|
# Build arguments to fzf
|
||||||
opts=""
|
opts=""
|
||||||
for arg in "${args[@]}"; do
|
for arg in "${args[@]}"; do
|
||||||
|
arg="${arg//\\/\\\\}"
|
||||||
arg="${arg//\"/\\\"}"
|
arg="${arg//\"/\\\"}"
|
||||||
arg="${arg//\`/\\\`}"
|
arg="${arg//\`/\\\`}"
|
||||||
|
arg="${arg//$/\\$}"
|
||||||
opts="$opts \"$arg\""
|
opts="$opts \"$arg\""
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ -n "$term" -o -t 0 ]; then
|
if [[ -n "$term" ]] || [[ -t 0 ]]; then
|
||||||
cat <<< "$fzf $opts > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
cat <<< "\"$fzf\" $opts > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
||||||
tmux set-window-option -q synchronize-panes off \;\
|
TMUX=$(echo $TMUX | cut -d , -f 1,2) tmux set-window-option synchronize-panes off \;\
|
||||||
set-window-option -q remain-on-exit off \;\
|
set-window-option remain-on-exit off \;\
|
||||||
split-window $opt "cd $(printf %q "$PWD");$envs bash $argsf" $swap
|
split-window $opt "cd $(printf %q "$PWD");$envs bash $argsf" $swap \
|
||||||
|
> /dev/null 2>&1
|
||||||
else
|
else
|
||||||
mkfifo $fifo1
|
mkfifo $fifo1
|
||||||
cat <<< "$fzf $opts < $fifo1 > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
cat <<< "\"$fzf\" $opts < $fifo1 > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
||||||
tmux set-window-option -q synchronize-panes off \;\
|
TMUX=$(echo $TMUX | cut -d , -f 1,2) tmux set-window-option synchronize-panes off \;\
|
||||||
set-window-option -q remain-on-exit off \;\
|
set-window-option remain-on-exit off \;\
|
||||||
split-window $opt "$envs bash $argsf" $swap
|
split-window $opt "$envs bash $argsf" $swap \
|
||||||
|
> /dev/null 2>&1
|
||||||
cat <&0 > $fifo1 &
|
cat <&0 > $fifo1 &
|
||||||
fi
|
fi
|
||||||
cat $fifo2
|
cat $fifo2
|
||||||
|
121
install
121
install
@@ -2,13 +2,14 @@
|
|||||||
|
|
||||||
set -u
|
set -u
|
||||||
|
|
||||||
[[ "$@" =~ --pre ]] && version=0.11.4 pre=1 ||
|
[[ "$@" =~ --pre ]] && version=0.15.4 pre=1 ||
|
||||||
version=0.11.4 pre=0
|
version=0.15.4 pre=0
|
||||||
|
|
||||||
auto_completion=
|
auto_completion=
|
||||||
key_bindings=
|
key_bindings=
|
||||||
update_config=1
|
update_config=2
|
||||||
binary_arch=
|
binary_arch=
|
||||||
|
allow_legacy=
|
||||||
|
|
||||||
help() {
|
help() {
|
||||||
cat << EOF
|
cat << EOF
|
||||||
@@ -27,7 +28,7 @@ usage: $0 [OPTIONS]
|
|||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
for opt in $@; do
|
for opt in "$@"; do
|
||||||
case $opt in
|
case $opt in
|
||||||
--help)
|
--help)
|
||||||
help
|
help
|
||||||
@@ -37,6 +38,7 @@ for opt in $@; do
|
|||||||
auto_completion=1
|
auto_completion=1
|
||||||
key_bindings=1
|
key_bindings=1
|
||||||
update_config=1
|
update_config=1
|
||||||
|
allow_legacy=1
|
||||||
;;
|
;;
|
||||||
--key-bindings) key_bindings=1 ;;
|
--key-bindings) key_bindings=1 ;;
|
||||||
--no-key-bindings) key_bindings=0 ;;
|
--no-key-bindings) key_bindings=0 ;;
|
||||||
@@ -46,7 +48,7 @@ for opt in $@; do
|
|||||||
--no-update-rc) update_config=0 ;;
|
--no-update-rc) update_config=0 ;;
|
||||||
--32) binary_arch=386 ;;
|
--32) binary_arch=386 ;;
|
||||||
--64) binary_arch=amd64 ;;
|
--64) binary_arch=amd64 ;;
|
||||||
--bin) ;;
|
--bin|--pre) ;;
|
||||||
*)
|
*)
|
||||||
echo "unknown option: $opt"
|
echo "unknown option: $opt"
|
||||||
help
|
help
|
||||||
@@ -55,17 +57,14 @@ for opt in $@; do
|
|||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
cd $(dirname $BASH_SOURCE)
|
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||||
fzf_base="$(pwd)"
|
fzf_base="$(pwd)"
|
||||||
|
|
||||||
# If stdin is a tty, we are "interactive".
|
|
||||||
interactive=
|
|
||||||
[ -t 0 ] && interactive=yes
|
|
||||||
|
|
||||||
ask() {
|
ask() {
|
||||||
|
# If stdin is a tty, we are "interactive".
|
||||||
# non-interactive shell: wait for a linefeed
|
# non-interactive shell: wait for a linefeed
|
||||||
# interactive shell: continue after a single keypress
|
# interactive shell: continue after a single keypress
|
||||||
[ -n "$interactive" ] && read_n='-n 1' || read_n=
|
read_n=$([ -t 0 ] && echo "-n 1")
|
||||||
|
|
||||||
read -p "$1 ([y]/n) " $read_n -r
|
read -p "$1 ([y]/n) " $read_n -r
|
||||||
echo
|
echo
|
||||||
@@ -103,7 +102,7 @@ symlink() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
link_fzf_in_path() {
|
link_fzf_in_path() {
|
||||||
if which_fzf="$(which fzf 2> /dev/null)"; then
|
if which_fzf="$(command -v fzf)"; then
|
||||||
echo " - Found in \$PATH"
|
echo " - Found in \$PATH"
|
||||||
echo " - Creating symlink: $which_fzf -> bin/fzf"
|
echo " - Creating symlink: $which_fzf -> bin/fzf"
|
||||||
(cd "$fzf_base"/bin && rm -f fzf && ln -sf "$which_fzf" fzf)
|
(cd "$fzf_base"/bin && rm -f fzf && ln -sf "$which_fzf" fzf)
|
||||||
@@ -112,6 +111,14 @@ link_fzf_in_path() {
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try_curl() {
|
||||||
|
command -v curl > /dev/null && curl -fL $1 | tar -xz
|
||||||
|
}
|
||||||
|
|
||||||
|
try_wget() {
|
||||||
|
command -v wget > /dev/null && wget -O - $1 | tar -xz
|
||||||
|
}
|
||||||
|
|
||||||
download() {
|
download() {
|
||||||
echo "Downloading bin/fzf ..."
|
echo "Downloading bin/fzf ..."
|
||||||
if [ $pre = 0 ]; then
|
if [ $pre = 0 ]; then
|
||||||
@@ -131,14 +138,13 @@ download() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
local url=https://github.com/junegunn/fzf-bin/releases/download/$version/${1}.tgz
|
local url=https://github.com/junegunn/fzf-bin/releases/download/$version/${1}.tgz
|
||||||
if which curl > /dev/null; then
|
set -o pipefail
|
||||||
curl -fL $url | tar -xz
|
if ! (try_curl $url || try_wget $url); then
|
||||||
elif which wget > /dev/null; then
|
set +o pipefail
|
||||||
wget -O - $url | tar -xz
|
binary_error="Failed to download with curl and wget"
|
||||||
else
|
|
||||||
binary_error="curl or wget not found"
|
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
set +o pipefail
|
||||||
|
|
||||||
if [ ! -f $1 ]; then
|
if [ ! -f $1 ]; then
|
||||||
binary_error="Failed to download ${1}"
|
binary_error="Failed to download ${1}"
|
||||||
@@ -161,11 +167,14 @@ case "$archi" in
|
|||||||
esac
|
esac
|
||||||
|
|
||||||
install_ruby_fzf() {
|
install_ruby_fzf() {
|
||||||
|
if [ -z "$allow_legacy" ]; then
|
||||||
|
ask "Do you want to install legacy Ruby version instead?" && exit 1
|
||||||
|
fi
|
||||||
echo "Installing legacy Ruby version ..."
|
echo "Installing legacy Ruby version ..."
|
||||||
|
|
||||||
# ruby executable
|
# ruby executable
|
||||||
echo -n "Checking Ruby executable ... "
|
echo -n "Checking Ruby executable ... "
|
||||||
ruby=`which ruby`
|
ruby=$(command -v ruby)
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "ruby executable not found !!!"
|
echo "ruby executable not found !!!"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -173,7 +182,7 @@ install_ruby_fzf() {
|
|||||||
|
|
||||||
# System ruby is preferred
|
# System ruby is preferred
|
||||||
system_ruby=/usr/bin/ruby
|
system_ruby=/usr/bin/ruby
|
||||||
if [ -x $system_ruby -a $system_ruby != "$ruby" ]; then
|
if [ -x $system_ruby ] && [ $system_ruby != "$ruby" ]; then
|
||||||
$system_ruby --disable-gems -rcurses -e0 2> /dev/null
|
$system_ruby --disable-gems -rcurses -e0 2> /dev/null
|
||||||
[ $? -eq 0 ] && ruby=$system_ruby
|
[ $? -eq 0 ] && ruby=$system_ruby
|
||||||
fi
|
fi
|
||||||
@@ -232,22 +241,25 @@ cd "$fzf_base"
|
|||||||
if [ -n "$binary_error" ]; then
|
if [ -n "$binary_error" ]; then
|
||||||
if [ $binary_available -eq 0 ]; then
|
if [ $binary_available -eq 0 ]; then
|
||||||
echo "No prebuilt binary for $archi ..."
|
echo "No prebuilt binary for $archi ..."
|
||||||
if which go > /dev/null 2>&1; then
|
else
|
||||||
echo -n "Building binary (go get github.com/junegunn/fzf/src/fzf) ... "
|
echo " - $binary_error !!!"
|
||||||
if go get github.com/junegunn/fzf/src/fzf; then
|
fi
|
||||||
echo "OK"
|
if command -v go > /dev/null; then
|
||||||
link_fzf_in_path
|
echo -n "Building binary (go get -u github.com/junegunn/fzf/src/fzf) ... "
|
||||||
else
|
if [ -z "${GOPATH-}" ]; then
|
||||||
echo "Failed to build binary ..."
|
export GOPATH="${TMPDIR:-/tmp}/fzf-gopath"
|
||||||
install_ruby_fzf
|
mkdir -p "$GOPATH"
|
||||||
fi
|
fi
|
||||||
|
if go get -u github.com/junegunn/fzf/src/fzf; then
|
||||||
|
echo "OK"
|
||||||
|
cp "$GOPATH/bin/fzf" "$fzf_base/bin/"
|
||||||
else
|
else
|
||||||
echo "go executable not found. Cannot build binary ..."
|
echo "Failed to build binary ..."
|
||||||
install_ruby_fzf
|
install_ruby_fzf
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo " - $binary_error !!!"
|
echo "go executable not found. Cannot build binary ..."
|
||||||
exit 1
|
install_ruby_fzf
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -266,7 +278,9 @@ if [ -z "$key_bindings" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
echo
|
echo
|
||||||
for shell in bash zsh; do
|
has_zsh=$(command -v zsh > /dev/null && echo 1 || echo 0)
|
||||||
|
shells=$([ $has_zsh -eq 1 ] && echo "bash zsh" || echo "bash")
|
||||||
|
for shell in $shells; do
|
||||||
echo -n "Generate ~/.fzf.$shell ... "
|
echo -n "Generate ~/.fzf.$shell ... "
|
||||||
src=~/.fzf.${shell}
|
src=~/.fzf.${shell}
|
||||||
|
|
||||||
@@ -306,9 +320,8 @@ EOF
|
|||||||
done
|
done
|
||||||
|
|
||||||
# fish
|
# fish
|
||||||
has_fish=0
|
has_fish=$(command -v fish > /dev/null && echo 1 || echo 0)
|
||||||
if [ -n "$(which fish 2> /dev/null)" ]; then
|
if [ $has_fish -eq 1 ]; then
|
||||||
has_fish=1
|
|
||||||
echo -n "Update fish_user_paths ... "
|
echo -n "Update fish_user_paths ... "
|
||||||
fish << EOF
|
fish << EOF
|
||||||
echo \$fish_user_paths | grep $fzf_base/bin > /dev/null
|
echo \$fish_user_paths | grep $fzf_base/bin > /dev/null
|
||||||
@@ -337,8 +350,8 @@ fi
|
|||||||
append_line() {
|
append_line() {
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
local skip line file pat lno
|
local update line file pat lno
|
||||||
skip="$1"
|
update="$1"
|
||||||
line="$2"
|
line="$2"
|
||||||
file="$3"
|
file="$3"
|
||||||
pat="${4:-}"
|
pat="${4:-}"
|
||||||
@@ -354,7 +367,7 @@ append_line() {
|
|||||||
if [ -n "$lno" ]; then
|
if [ -n "$lno" ]; then
|
||||||
echo " - Already exists: line #$lno"
|
echo " - Already exists: line #$lno"
|
||||||
else
|
else
|
||||||
if [ $skip -eq 1 ]; then
|
if [ $update -eq 1 ]; then
|
||||||
echo >> "$file"
|
echo >> "$file"
|
||||||
echo "$line" >> "$file"
|
echo "$line" >> "$file"
|
||||||
echo " + Added"
|
echo " + Added"
|
||||||
@@ -366,26 +379,30 @@ append_line() {
|
|||||||
set +e
|
set +e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if [ $update_config -eq 2 ]; then
|
||||||
|
echo
|
||||||
|
ask "Do you want to update your shell configuration files?"
|
||||||
|
update_config=$?
|
||||||
|
fi
|
||||||
echo
|
echo
|
||||||
for shell in bash zsh; do
|
for shell in $shells; do
|
||||||
[ $shell = zsh ] && dest=${ZDOTDIR:-~}/.zshrc || dest=~/.bashrc
|
[ $shell = zsh ] && dest=${ZDOTDIR:-~}/.zshrc || dest=~/.bashrc
|
||||||
append_line $update_config "[ -f ~/.fzf.${shell} ] && source ~/.fzf.${shell}" "$dest" "~/.fzf.${shell}"
|
append_line $update_config "[ -f ~/.fzf.${shell} ] && source ~/.fzf.${shell}" "$dest" "~/.fzf.${shell}"
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ $key_bindings -eq 1 -a $has_fish -eq 1 ]; then
|
if [ $key_bindings -eq 1 ] && [ $has_fish -eq 1 ]; then
|
||||||
bind_file=~/.config/fish/functions/fish_user_key_bindings.fish
|
bind_file=~/.config/fish/functions/fish_user_key_bindings.fish
|
||||||
append_line $update_config "fzf_key_bindings" "$bind_file"
|
append_line $update_config "fzf_key_bindings" "$bind_file"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cat << EOF
|
if [ $update_config -eq 1 ]; then
|
||||||
Finished. Restart your shell or reload config file.
|
echo 'Finished. Restart your shell or reload config file.'
|
||||||
source ~/.bashrc # bash
|
echo ' source ~/.bashrc # bash'
|
||||||
source ${ZDOTDIR:-~}/.zshrc # zsh
|
[ $has_zsh -eq 1 ] && echo " source ${ZDOTDIR:-~}/.zshrc # zsh"
|
||||||
EOF
|
[ $has_fish -eq 1 ] && [ $key_bindings -eq 1 ] && echo ' fzf_key_bindings # fish'
|
||||||
[ $has_fish -eq 1 ] && echo " fzf_key_bindings # fish"; cat << EOF
|
echo
|
||||||
|
echo 'Use uninstall script to remove fzf.'
|
||||||
Use uninstall script to remove fzf.
|
echo
|
||||||
|
fi
|
||||||
For more information, see: https://github.com/junegunn/fzf
|
echo 'For more information, see: https://github.com/junegunn/fzf'
|
||||||
EOF
|
|
||||||
|
|
||||||
|
54
man/man1/fzf-tmux.1
Normal file
54
man/man1/fzf-tmux.1
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
.ig
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2016 Junegunn Choi
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
..
|
||||||
|
.TH fzf-tmux 1 "Oct 2016" "fzf 0.15.4" "fzf-tmux - open fzf in tmux split pane"
|
||||||
|
|
||||||
|
.SH NAME
|
||||||
|
fzf-tmux - open fzf in tmux split pane
|
||||||
|
|
||||||
|
.SH SYNOPSIS
|
||||||
|
.B fzf-tmux [-u|-d [HEIGHT[%]]] [-l|-r [WIDTH[%]]] [--] [FZF OPTIONS]
|
||||||
|
|
||||||
|
.SH DESCRIPTION
|
||||||
|
fzf-tmux is a wrapper script for fzf that opens fzf in a tmux split pane. It is
|
||||||
|
designed to work just like fzf except that it does not take up the whole
|
||||||
|
screen. You can safely use fzf-tmux instead of fzf in your scripts as the extra
|
||||||
|
options will be silently ignored if you're not on tmux.
|
||||||
|
|
||||||
|
.SH OPTIONS
|
||||||
|
.SS Layout
|
||||||
|
|
||||||
|
(default: \fB-d 50%\fR)
|
||||||
|
|
||||||
|
.TP
|
||||||
|
.B "-u [height[%]]"
|
||||||
|
Split above (up)
|
||||||
|
.TP
|
||||||
|
.B "-d [height[%]]"
|
||||||
|
Split below (down)
|
||||||
|
.TP
|
||||||
|
.B "-l [width[%]]"
|
||||||
|
Split left
|
||||||
|
.TP
|
||||||
|
.B "-r [width[%]]"
|
||||||
|
Split right
|
442
man/man1/fzf.1
442
man/man1/fzf.1
@@ -21,7 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
THE SOFTWARE.
|
THE SOFTWARE.
|
||||||
..
|
..
|
||||||
.TH fzf 1 "Mar 2016" "fzf 0.11.4" "fzf - a command-line fuzzy finder"
|
.TH fzf 1 "Oct 2016" "fzf 0.15.4" "fzf - a command-line fuzzy finder"
|
||||||
|
|
||||||
.SH NAME
|
.SH NAME
|
||||||
fzf - a command-line fuzzy finder
|
fzf - a command-line fuzzy finder
|
||||||
@@ -47,13 +47,23 @@ Case-insensitive match (default: smart-case match)
|
|||||||
.TP
|
.TP
|
||||||
.B "+i"
|
.B "+i"
|
||||||
Case-sensitive match
|
Case-sensitive match
|
||||||
|
.TP
|
||||||
|
.BI "--algo=" TYPE
|
||||||
|
Fuzzy matching algorithm (default: v2)
|
||||||
|
|
||||||
|
.br
|
||||||
|
.BR v2 " Optimal scoring algorithm (quality)"
|
||||||
|
.br
|
||||||
|
.BR v1 " Faster but not guaranteed to find the optimal result (performance)"
|
||||||
|
.br
|
||||||
|
|
||||||
.TP
|
.TP
|
||||||
.BI "-n, --nth=" "N[,..]"
|
.BI "-n, --nth=" "N[,..]"
|
||||||
Comma-separated list of field index expressions for limiting search scope.
|
Comma-separated list of field index expressions for limiting search scope.
|
||||||
See \fBFIELD INDEX EXPRESSION\fR for details.
|
See \fBFIELD INDEX EXPRESSION\fR for the details.
|
||||||
.TP
|
.TP
|
||||||
.BI "--with-nth=" "N[,..]"
|
.BI "--with-nth=" "N[,..]"
|
||||||
Transform each item using index expressions within finder
|
Transform the presentation of each line using field index expressions
|
||||||
.TP
|
.TP
|
||||||
.BI "-d, --delimiter=" "STR"
|
.BI "-d, --delimiter=" "STR"
|
||||||
Field delimiter regex for \fB--nth\fR and \fB--with-nth\fR (default: AWK-style)
|
Field delimiter regex for \fB--nth\fR and \fB--with-nth\fR (default: AWK-style)
|
||||||
@@ -64,6 +74,7 @@ Do not sort the result
|
|||||||
.TP
|
.TP
|
||||||
.B "--tac"
|
.B "--tac"
|
||||||
Reverse the order of the input
|
Reverse the order of the input
|
||||||
|
|
||||||
.RS
|
.RS
|
||||||
e.g. \fBhistory | fzf --tac --no-sort\fR
|
e.g. \fBhistory | fzf --tac --no-sort\fR
|
||||||
.RE
|
.RE
|
||||||
@@ -73,13 +84,13 @@ Comma-separated list of sort criteria to apply when the scores are tied.
|
|||||||
.br
|
.br
|
||||||
|
|
||||||
.br
|
.br
|
||||||
.BR length " Prefers item with shorter length"
|
.BR length " Prefers line with shorter length"
|
||||||
.br
|
.br
|
||||||
.BR begin " Prefers item with matched substring closer to the beginning"
|
.BR begin " Prefers line with matched substring closer to the beginning"
|
||||||
.br
|
.br
|
||||||
.BR end " Prefers item with matched substring closer to the end"
|
.BR end " Prefers line with matched substring closer to the end"
|
||||||
.br
|
.br
|
||||||
.BR index " Prefers item that appeared earlier in the input stream"
|
.BR index " Prefers line that appeared earlier in the input stream"
|
||||||
.br
|
.br
|
||||||
|
|
||||||
.br
|
.br
|
||||||
@@ -90,16 +101,86 @@ Comma-separated list of sort criteria to apply when the scores are tied.
|
|||||||
- \fBindex\fR is implicitly appended to the list when not specified
|
- \fBindex\fR is implicitly appended to the list when not specified
|
||||||
.br
|
.br
|
||||||
- Default is \fBlength\fR (or equivalently \fBlength\fR,index)
|
- Default is \fBlength\fR (or equivalently \fBlength\fR,index)
|
||||||
|
.br
|
||||||
|
- If \fBend\fR is found in the list, fzf will scan each line backwards
|
||||||
.SS Interface
|
.SS Interface
|
||||||
.TP
|
.TP
|
||||||
.B "-m, --multi"
|
.B "-m, --multi"
|
||||||
Enable multi-select with tab/shift-tab
|
Enable multi-select with tab/shift-tab
|
||||||
.TP
|
.TP
|
||||||
|
.B "--no-mouse"
|
||||||
|
Disable mouse
|
||||||
|
.TP
|
||||||
|
.BI "--bind=" "KEYBINDS"
|
||||||
|
Comma-separated list of custom key bindings. See \fBKEY BINDINGS\fR for the
|
||||||
|
details.
|
||||||
|
.TP
|
||||||
|
.B "--cycle"
|
||||||
|
Enable cyclic scroll
|
||||||
|
.TP
|
||||||
|
.B "--no-hscroll"
|
||||||
|
Disable horizontal scroll
|
||||||
|
.TP
|
||||||
|
.BI "--hscroll-off=" "COL"
|
||||||
|
Number of screen columns to keep to the right of the highlighted substring
|
||||||
|
(default: 10). Setting it to a large value will cause the text to be positioned
|
||||||
|
on the center of the screen.
|
||||||
|
.TP
|
||||||
|
.BI "--jump-labels=" "CHARS"
|
||||||
|
Label characters for \fBjump\fR and \fBjump-accept\fR
|
||||||
|
.SS Layout
|
||||||
|
.TP
|
||||||
|
.B "--reverse"
|
||||||
|
Reverse orientation
|
||||||
|
.TP
|
||||||
|
.BI "--margin=" MARGIN
|
||||||
|
Comma-separated expression for margins around the finder.
|
||||||
|
.br
|
||||||
|
|
||||||
|
.br
|
||||||
|
.RS
|
||||||
|
.BR TRBL " Same margin for top, right, bottom, and left"
|
||||||
|
.br
|
||||||
|
.BR TB,RL " Vertical, horizontal margin"
|
||||||
|
.br
|
||||||
|
.BR T,RL,B " Top, horizontal, bottom margin"
|
||||||
|
.br
|
||||||
|
.BR T,R,B,L " Top, right, bottom, left margin"
|
||||||
|
.br
|
||||||
|
|
||||||
|
.br
|
||||||
|
Each part can be given in absolute number or in percentage relative to the
|
||||||
|
terminal size with \fB%\fR suffix.
|
||||||
|
.br
|
||||||
|
|
||||||
|
.br
|
||||||
|
e.g. \fBfzf --margin 10%\fR
|
||||||
|
\fBfzf --margin 1,5%\fR
|
||||||
|
.RE
|
||||||
|
.TP
|
||||||
|
.B "--inline-info"
|
||||||
|
Display finder info inline with the query
|
||||||
|
.TP
|
||||||
|
.BI "--prompt=" "STR"
|
||||||
|
Input prompt (default: '> ')
|
||||||
|
.TP
|
||||||
|
.BI "--header=" "STR"
|
||||||
|
The given string will be printed as the sticky header. The lines are displayed
|
||||||
|
in the given order from top to bottom regardless of \fB--reverse\fR option, and
|
||||||
|
are not affected by \fB--with-nth\fR. ANSI color codes are processed even when
|
||||||
|
\fB--ansi\fR is not set.
|
||||||
|
.TP
|
||||||
|
.BI "--header-lines=" "N"
|
||||||
|
The first N lines of the input are treated as the sticky header. When
|
||||||
|
\fB--with-nth\fR is set, the lines are transformed just like the other
|
||||||
|
lines that follow.
|
||||||
|
.SS Display
|
||||||
|
.TP
|
||||||
.B "--ansi"
|
.B "--ansi"
|
||||||
Enable processing of ANSI color codes
|
Enable processing of ANSI color codes
|
||||||
.TP
|
.TP
|
||||||
.B "--no-mouse"
|
.BI "--tabstop=" SPACES
|
||||||
Disable mouse
|
Number of spaces for a tab character (default: 8)
|
||||||
.TP
|
.TP
|
||||||
.BI "--color=" "[BASE_SCHEME][,COLOR:ANSI]"
|
.BI "--color=" "[BASE_SCHEME][,COLOR:ANSI]"
|
||||||
Color configuration. The name of the base color scheme is followed by custom
|
Color configuration. The name of the base color scheme is followed by custom
|
||||||
@@ -137,172 +218,7 @@ e.g. \fBfzf --color=bg+:24\fR
|
|||||||
.TP
|
.TP
|
||||||
.B "--black"
|
.B "--black"
|
||||||
Use black background
|
Use black background
|
||||||
.TP
|
.SS History
|
||||||
.B "--reverse"
|
|
||||||
Reverse orientation
|
|
||||||
.TP
|
|
||||||
.BI "--margin=" MARGIN
|
|
||||||
Comma-separated expression for margins around the finder.
|
|
||||||
.br
|
|
||||||
|
|
||||||
.br
|
|
||||||
.RS
|
|
||||||
.BR TRBL " Same margin for top, right, bottom, and left"
|
|
||||||
.br
|
|
||||||
.BR TB,RL " Vertical, horizontal margin"
|
|
||||||
.br
|
|
||||||
.BR T,RL,B " Top, horizontal, bottom margin"
|
|
||||||
.br
|
|
||||||
.BR T,R,B,L " Top, right, bottom, left margin"
|
|
||||||
.br
|
|
||||||
|
|
||||||
.br
|
|
||||||
Each part can be given in absolute number or in percentage relative to the
|
|
||||||
terminal size with \fB%\fR suffix.
|
|
||||||
.br
|
|
||||||
|
|
||||||
.br
|
|
||||||
e.g. \fBfzf --margin 10%\fR
|
|
||||||
\fBfzf --margin 1,5%\fR
|
|
||||||
.RE
|
|
||||||
.TP
|
|
||||||
.BI "--tabstop=" SPACES
|
|
||||||
Number of spaces for a tab character (default: 8)
|
|
||||||
.TP
|
|
||||||
.B "--cycle"
|
|
||||||
Enable cyclic scroll
|
|
||||||
.TP
|
|
||||||
.B "--no-hscroll"
|
|
||||||
Disable horizontal scroll
|
|
||||||
.TP
|
|
||||||
.BI "--hscroll-off=" "COL"
|
|
||||||
Number of screen columns to keep to the right of the highlighted substring
|
|
||||||
(default: 10). Setting it to a large value will cause the text to be positioned
|
|
||||||
on the center of the screen.
|
|
||||||
.TP
|
|
||||||
.B "--inline-info"
|
|
||||||
Display finder info inline with the query
|
|
||||||
.TP
|
|
||||||
.BI "--prompt=" "STR"
|
|
||||||
Input prompt (default: '> ')
|
|
||||||
.TP
|
|
||||||
.BI "--toggle-sort=" "KEY"
|
|
||||||
Key to toggle sort. For the list of the allowed key names, see \fB--bind\fR.
|
|
||||||
.TP
|
|
||||||
.BI "--bind=" "KEYBINDS"
|
|
||||||
Comma-separated list of custom key bindings. Each key binding expression
|
|
||||||
follows the following format: \fBKEY:ACTION\fR
|
|
||||||
.RS
|
|
||||||
e.g. \fBfzf --bind=ctrl-j:accept,ctrl-k:kill-line\fR
|
|
||||||
.RE
|
|
||||||
|
|
||||||
.RS
|
|
||||||
.B AVAILABLE KEYS: (SYNONYMS)
|
|
||||||
\fIctrl-[a-z]\fR
|
|
||||||
\fIalt-[a-z]\fR
|
|
||||||
\fIf[1-4]\fR
|
|
||||||
\fIenter\fR (\fIreturn\fR \fIctrl-m\fR)
|
|
||||||
\fIspace\fR
|
|
||||||
\fIbspace\fR (\fIbs\fR)
|
|
||||||
\fIalt-bspace\fR (\fIalt-bs\fR)
|
|
||||||
\fItab\fR
|
|
||||||
\fIbtab\fR (\fIshift-tab\fR)
|
|
||||||
\fIesc\fR
|
|
||||||
\fIdel\fR
|
|
||||||
\fIup\fR
|
|
||||||
\fIdown\fR
|
|
||||||
\fIleft\fR
|
|
||||||
\fIright\fR
|
|
||||||
\fIhome\fR
|
|
||||||
\fIend\fR
|
|
||||||
\fIpgup\fR (\fIpage-up\fR)
|
|
||||||
\fIpgdn\fR (\fIpage-down\fR)
|
|
||||||
\fIshift-left\fR
|
|
||||||
\fIshift-right\fR
|
|
||||||
\fIdouble-click\fR
|
|
||||||
or any single character
|
|
||||||
.RE
|
|
||||||
|
|
||||||
.RS
|
|
||||||
\fBACTION: DEFAULT BINDINGS (NOTES):
|
|
||||||
\fBabort\fR \fIctrl-c ctrl-g ctrl-q esc\fR
|
|
||||||
\fBaccept\fR \fIenter double-click\fR
|
|
||||||
\fBbackward-char\fR \fIctrl-b left\fR
|
|
||||||
\fBbackward-delete-char\fR \fIctrl-h bspace\fR
|
|
||||||
\fBbackward-kill-word\fR \fIalt-bs\fR
|
|
||||||
\fBbackward-word\fR \fIalt-b shift-left\fR
|
|
||||||
\fBbeginning-of-line\fR \fIctrl-a home\fR
|
|
||||||
\fBcancel\fR
|
|
||||||
\fBclear-screen\fR \fIctrl-l\fR
|
|
||||||
\fBdelete-char\fR \fIdel\fR
|
|
||||||
\fBdelete-char/eof\fR \fIctrl-d\fR
|
|
||||||
\fBdeselect-all\fR
|
|
||||||
\fBdown\fR \fIctrl-j ctrl-n down\fR
|
|
||||||
\fBend-of-line\fR \fIctrl-e end\fR
|
|
||||||
\fBexecute(...)\fR (see below for the details)
|
|
||||||
\fBexecute-multi(...)\fR (see below for the details)
|
|
||||||
\fBforward-char\fR \fIctrl-f right\fR
|
|
||||||
\fBforward-word\fR \fIalt-f shift-right\fR
|
|
||||||
\fBignore\fR
|
|
||||||
\fBkill-line\fR
|
|
||||||
\fBkill-word\fR \fIalt-d\fR
|
|
||||||
\fBnext-history\fR (\fIctrl-n\fR on \fB--history\fR)
|
|
||||||
\fBpage-down\fR \fIpgdn\fR
|
|
||||||
\fBpage-up\fR \fIpgup\fR
|
|
||||||
\fBprevious-history\fR (\fIctrl-p\fR on \fB--history\fR)
|
|
||||||
\fBselect-all\fR
|
|
||||||
\fBtoggle\fR
|
|
||||||
\fBtoggle-all\fR
|
|
||||||
\fBtoggle-down\fR \fIctrl-i (tab)\fR
|
|
||||||
\fBtoggle-in\fR (\fB--reverse\fR ? \fBtoggle-up\fR : \fBtoggle-down\fR)
|
|
||||||
\fBtoggle-out\fR (\fB--reverse\fR ? \fBtoggle-down\fR : \fBtoggle-up\fR)
|
|
||||||
\fBtoggle-sort\fR (equivalent to \fB--toggle-sort\fR)
|
|
||||||
\fBtoggle-up\fR \fIbtab (shift-tab)\fR
|
|
||||||
\fBunix-line-discard\fR \fIctrl-u\fR
|
|
||||||
\fBunix-word-rubout\fR \fIctrl-w\fR
|
|
||||||
\fBup\fR \fIctrl-k ctrl-p up\fR
|
|
||||||
\fByank\fR \fIctrl-y\fR
|
|
||||||
.RE
|
|
||||||
|
|
||||||
.RS
|
|
||||||
With \fBexecute(...)\fR action, you can execute arbitrary commands without
|
|
||||||
leaving fzf. For example, you can turn fzf into a simple file browser by
|
|
||||||
binding \fBenter\fR key to \fBless\fR command like follows.
|
|
||||||
|
|
||||||
.RS
|
|
||||||
\fBfzf --bind "enter:execute(less {})"\fR
|
|
||||||
.RE
|
|
||||||
|
|
||||||
\fB{}\fR is the placeholder for the double-quoted string of the current line.
|
|
||||||
If the command contains parentheses, you can use any of the following
|
|
||||||
alternative notations to avoid parse errors.
|
|
||||||
|
|
||||||
\fBexecute[...]\fR
|
|
||||||
\fBexecute~...~\fR
|
|
||||||
\fBexecute!...!\fR
|
|
||||||
\fBexecute@...@\fR
|
|
||||||
\fBexecute#...#\fR
|
|
||||||
\fBexecute$...$\fR
|
|
||||||
\fBexecute%...%\fR
|
|
||||||
\fBexecute^...^\fR
|
|
||||||
\fBexecute&...&\fR
|
|
||||||
\fBexecute*...*\fR
|
|
||||||
\fBexecute;...;\fR
|
|
||||||
\fBexecute/.../\fR
|
|
||||||
\fBexecute|...|\fR
|
|
||||||
\fBexecute:...\fR
|
|
||||||
.RS
|
|
||||||
This is the special form that frees you from parse errors as it does not expect
|
|
||||||
the closing character. The catch is that it should be the last one in the
|
|
||||||
comma-separated list.
|
|
||||||
.RE
|
|
||||||
|
|
||||||
\fBexecute-multi(...)\fR is an alternative action that executes the command
|
|
||||||
with the selected entries when multi-select is enabled (\fB--multi\fR). With
|
|
||||||
this action, \fB{}\fR is replaced with the double-quoted strings of the
|
|
||||||
selected entries separated by spaces.
|
|
||||||
|
|
||||||
.RE
|
|
||||||
.TP
|
.TP
|
||||||
.BI "--history=" "HISTORY_FILE"
|
.BI "--history=" "HISTORY_FILE"
|
||||||
Load search history from the specified file and update the file on completion.
|
Load search history from the specified file and update the file on completion.
|
||||||
@@ -312,17 +228,40 @@ When enabled, \fBCTRL-N\fR and \fBCTRL-P\fR are automatically remapped to
|
|||||||
.BI "--history-size=" "N"
|
.BI "--history-size=" "N"
|
||||||
Maximum number of entries in the history file (default: 1000). The file is
|
Maximum number of entries in the history file (default: 1000). The file is
|
||||||
automatically truncated when the number of the lines exceeds the value.
|
automatically truncated when the number of the lines exceeds the value.
|
||||||
|
.SS Preview
|
||||||
.TP
|
.TP
|
||||||
.BI "--header=" "STR"
|
.BI "--preview=" "COMMAND"
|
||||||
The given string will be printed as the sticky header. The lines are displayed
|
Execute the given command for the current line and display the result on the
|
||||||
in the given order from top to bottom regardless of \fB--reverse\fR option, and
|
preview window. \fB{}\fR in the command is the placeholder that is replaced to
|
||||||
are not affected by \fB--with-nth\fR. ANSI color codes are processed even when
|
the single-quoted string of the current line. To transform the replacement
|
||||||
\fB--ansi\fR is not set.
|
string, specify field index expressions between the braces (See \fBFIELD INDEX
|
||||||
|
EXPRESSION\fR for the details). Also, \fB{q}\fR is replaced to the current
|
||||||
|
query string.
|
||||||
|
|
||||||
|
.RS
|
||||||
|
e.g. \fBfzf --preview="head -$LINES {}"\fR
|
||||||
|
\fBls -l | fzf --preview="echo user={3} when={-4..-2}; cat {-1}" --header-lines=1\fR
|
||||||
|
|
||||||
|
Note that you can escape a placeholder pattern by prepending a backslash.
|
||||||
|
.RE
|
||||||
.TP
|
.TP
|
||||||
.BI "--header-lines=" "N"
|
.BI "--preview-window=" "[POSITION][:SIZE[%]][:hidden]"
|
||||||
The first N lines of the input are treated as the sticky header. When
|
Determine the layout of the preview window. If the argument ends with
|
||||||
\fB--with-nth\fR is set, the lines are transformed just like the other
|
\fB:hidden\fR, the preview window will be hidden by default until
|
||||||
lines that follow.
|
\fBtoggle-preview\fR action is triggered.
|
||||||
|
|
||||||
|
.RS
|
||||||
|
.B POSITION: (default: right)
|
||||||
|
\fBup
|
||||||
|
\fBdown
|
||||||
|
\fBleft
|
||||||
|
\fBright
|
||||||
|
.RE
|
||||||
|
|
||||||
|
.RS
|
||||||
|
e.g. \fBfzf --preview="head {}" --preview-window=up:30%\fR
|
||||||
|
\fBfzf --preview="file {}" --preview-window=down:1\fR
|
||||||
|
.RE
|
||||||
.SS Scripting
|
.SS Scripting
|
||||||
.TP
|
.TP
|
||||||
.BI "-q, --query=" "STR"
|
.BI "-q, --query=" "STR"
|
||||||
@@ -347,18 +286,26 @@ the default enter key. When this option is set, fzf will print the name of the
|
|||||||
key pressed as the first line of its output (or as the second line if
|
key pressed as the first line of its output (or as the second line if
|
||||||
\fB--print-query\fR is also used). The line will be empty if fzf is completed
|
\fB--print-query\fR is also used). The line will be empty if fzf is completed
|
||||||
with the default enter key.
|
with the default enter key.
|
||||||
|
|
||||||
.RS
|
.RS
|
||||||
e.g. \fBfzf --expect=ctrl-v,ctrl-t,alt-s,f1,f2,~,@\fR
|
e.g. \fBfzf --expect=ctrl-v,ctrl-t,alt-s,f1,f2,~,@\fR
|
||||||
.RE
|
.RE
|
||||||
.TP
|
.TP
|
||||||
|
.B "--read0"
|
||||||
|
Read input delimited by ASCII NUL character instead of newline character
|
||||||
|
.TP
|
||||||
|
.B "--print0"
|
||||||
|
Print output delimited by ASCII NUL character instead of newline character
|
||||||
|
.TP
|
||||||
.B "--sync"
|
.B "--sync"
|
||||||
Synchronous search for multi-staged filtering. If specified, fzf will launch
|
Synchronous search for multi-staged filtering. If specified, fzf will launch
|
||||||
ncurses finder only after the input stream is complete.
|
ncurses finder only after the input stream is complete.
|
||||||
|
|
||||||
.RS
|
.RS
|
||||||
e.g. \fBfzf --multi | fzf --sync\fR
|
e.g. \fBfzf --multi | fzf --sync\fR
|
||||||
.RE
|
.RE
|
||||||
|
|
||||||
.SH ENVIRONMENT
|
.SH ENVIRONMENT VARIABLES
|
||||||
.TP
|
.TP
|
||||||
.B FZF_DEFAULT_COMMAND
|
.B FZF_DEFAULT_COMMAND
|
||||||
Default command to use when input is tty
|
Default command to use when input is tty
|
||||||
@@ -412,12 +359,12 @@ occurrences of the string.
|
|||||||
|
|
||||||
.SS Anchored-match
|
.SS Anchored-match
|
||||||
A term can be prefixed by \fB^\fR, or suffixed by \fB$\fR to become an
|
A term can be prefixed by \fB^\fR, or suffixed by \fB$\fR to become an
|
||||||
anchored-match term. Then fzf will search for the items that start with or end
|
anchored-match term. Then fzf will search for the lines that start with or end
|
||||||
with the given string. An anchored-match term is also an exact-match term.
|
with the given string. An anchored-match term is also an exact-match term.
|
||||||
|
|
||||||
.SS Negation
|
.SS Negation
|
||||||
If a term is prefixed by \fB!\fR, fzf will exclude the items that satisfy the
|
If a term is prefixed by \fB!\fR, fzf will exclude the lines that satisfy the
|
||||||
term from the result.
|
term from the result. In this case, fzf performs exact match by default.
|
||||||
|
|
||||||
.SS Exact-match by default
|
.SS Exact-match by default
|
||||||
If you don't prefer fuzzy matching and do not wish to "quote" (prefixing with
|
If you don't prefer fuzzy matching and do not wish to "quote" (prefixing with
|
||||||
@@ -431,6 +378,125 @@ query matches entries that start with \fBcore\fR and end with either \fBgo\fR,
|
|||||||
|
|
||||||
e.g. \fB^core go$ | rb$ | py$\fR
|
e.g. \fB^core go$ | rb$ | py$\fR
|
||||||
|
|
||||||
|
.SH KEY BINDINGS
|
||||||
|
You can customize key bindings of fzf with \fB--bind\fR option which takes
|
||||||
|
a comma-separated list of key binding expressions. Each key binding expression
|
||||||
|
follows the following format: \fBKEY:ACTION\fR
|
||||||
|
|
||||||
|
e.g. \fBfzf --bind=ctrl-j:accept,ctrl-k:kill-line\fR
|
||||||
|
|
||||||
|
.B AVAILABLE KEYS: (SYNONYMS)
|
||||||
|
\fIctrl-[a-z]\fR
|
||||||
|
\fIalt-[a-z]\fR
|
||||||
|
\fIf[1-10]\fR
|
||||||
|
\fIenter\fR (\fIreturn\fR \fIctrl-m\fR)
|
||||||
|
\fIspace\fR
|
||||||
|
\fIbspace\fR (\fIbs\fR)
|
||||||
|
\fIalt-enter\fR
|
||||||
|
\fIalt-space\fR
|
||||||
|
\fIalt-bspace\fR (\fIalt-bs\fR)
|
||||||
|
\fIalt-/\fR
|
||||||
|
\fItab\fR
|
||||||
|
\fIbtab\fR (\fIshift-tab\fR)
|
||||||
|
\fIesc\fR
|
||||||
|
\fIdel\fR
|
||||||
|
\fIup\fR
|
||||||
|
\fIdown\fR
|
||||||
|
\fIleft\fR
|
||||||
|
\fIright\fR
|
||||||
|
\fIhome\fR
|
||||||
|
\fIend\fR
|
||||||
|
\fIpgup\fR (\fIpage-up\fR)
|
||||||
|
\fIpgdn\fR (\fIpage-down\fR)
|
||||||
|
\fIshift-left\fR
|
||||||
|
\fIshift-right\fR
|
||||||
|
\fIdouble-click\fR
|
||||||
|
or any single character
|
||||||
|
|
||||||
|
\fBACTION: DEFAULT BINDINGS (NOTES):
|
||||||
|
\fBabort\fR \fIctrl-c ctrl-g ctrl-q esc\fR
|
||||||
|
\fBaccept\fR \fIenter double-click\fR
|
||||||
|
\fBbackward-char\fR \fIctrl-b left\fR
|
||||||
|
\fBbackward-delete-char\fR \fIctrl-h bspace\fR
|
||||||
|
\fBbackward-kill-word\fR \fIalt-bs\fR
|
||||||
|
\fBbackward-word\fR \fIalt-b shift-left\fR
|
||||||
|
\fBbeginning-of-line\fR \fIctrl-a home\fR
|
||||||
|
\fBcancel\fR
|
||||||
|
\fBclear-screen\fR \fIctrl-l\fR
|
||||||
|
\fBdelete-char\fR \fIdel\fR
|
||||||
|
\fBdelete-char/eof\fR \fIctrl-d\fR
|
||||||
|
\fBdeselect-all\fR
|
||||||
|
\fBdown\fR \fIctrl-j ctrl-n down\fR
|
||||||
|
\fBend-of-line\fR \fIctrl-e end\fR
|
||||||
|
\fBexecute(...)\fR (see below for the details)
|
||||||
|
\fBexecute-multi(...)\fR (see below for the details)
|
||||||
|
\fBforward-char\fR \fIctrl-f right\fR
|
||||||
|
\fBforward-word\fR \fIalt-f shift-right\fR
|
||||||
|
\fBignore\fR
|
||||||
|
\fBjump\fR (EasyMotion-like 2-keystroke movement)
|
||||||
|
\fBjump-accept\fR (jump and accept)
|
||||||
|
\fBkill-line\fR
|
||||||
|
\fBkill-word\fR \fIalt-d\fR
|
||||||
|
\fBnext-history\fR (\fIctrl-n\fR on \fB--history\fR)
|
||||||
|
\fBpage-down\fR \fIpgdn\fR
|
||||||
|
\fBpage-up\fR \fIpgup\fR
|
||||||
|
\fBpreview-down\fR
|
||||||
|
\fBpreview-up\fR
|
||||||
|
\fBpreview-page-down\fR
|
||||||
|
\fBpreview-page-up\fR
|
||||||
|
\fBprevious-history\fR (\fIctrl-p\fR on \fB--history\fR)
|
||||||
|
\fBprint-query\fR (print query and exit)
|
||||||
|
\fBselect-all\fR
|
||||||
|
\fBtoggle\fR
|
||||||
|
\fBtoggle-all\fR
|
||||||
|
\fBtoggle-down\fR \fIctrl-i (tab)\fR
|
||||||
|
\fBtoggle-in\fR (\fB--reverse\fR ? \fBtoggle-up\fR : \fBtoggle-down\fR)
|
||||||
|
\fBtoggle-out\fR (\fB--reverse\fR ? \fBtoggle-down\fR : \fBtoggle-up\fR)
|
||||||
|
\fBtoggle-preview\fR
|
||||||
|
\fBtoggle-sort\fR (equivalent to \fB--toggle-sort\fR)
|
||||||
|
\fBtoggle-up\fR \fIbtab (shift-tab)\fR
|
||||||
|
\fBunix-line-discard\fR \fIctrl-u\fR
|
||||||
|
\fBunix-word-rubout\fR \fIctrl-w\fR
|
||||||
|
\fBup\fR \fIctrl-k ctrl-p up\fR
|
||||||
|
\fByank\fR \fIctrl-y\fR
|
||||||
|
|
||||||
|
With \fBexecute(...)\fR action, you can execute arbitrary commands without
|
||||||
|
leaving fzf. For example, you can turn fzf into a simple file browser by
|
||||||
|
binding \fBenter\fR key to \fBless\fR command like follows.
|
||||||
|
|
||||||
|
\fBfzf --bind "enter:execute(less {})"\fR
|
||||||
|
|
||||||
|
You can use the same placeholder expressions as in \fB--preview\fR.
|
||||||
|
|
||||||
|
If the command contains parentheses, fzf may fail to parse the expression. In
|
||||||
|
that case, you can use any of the following alternative notations to avoid
|
||||||
|
parse errors.
|
||||||
|
|
||||||
|
\fBexecute[...]\fR
|
||||||
|
\fBexecute~...~\fR
|
||||||
|
\fBexecute!...!\fR
|
||||||
|
\fBexecute@...@\fR
|
||||||
|
\fBexecute#...#\fR
|
||||||
|
\fBexecute$...$\fR
|
||||||
|
\fBexecute%...%\fR
|
||||||
|
\fBexecute^...^\fR
|
||||||
|
\fBexecute&...&\fR
|
||||||
|
\fBexecute*...*\fR
|
||||||
|
\fBexecute;...;\fR
|
||||||
|
\fBexecute/.../\fR
|
||||||
|
\fBexecute|...|\fR
|
||||||
|
\fBexecute:...\fR
|
||||||
|
.RS
|
||||||
|
This is the special form that frees you from parse errors as it does not expect
|
||||||
|
the closing character. The catch is that it should be the last one in the
|
||||||
|
comma-separated list of key-action pairs.
|
||||||
|
.RE
|
||||||
|
|
||||||
|
\fBexecute-multi(...)\fR is an alternative action that executes the command
|
||||||
|
with the selected entries when multi-select is enabled (\fB--multi\fR). With
|
||||||
|
this action, \fB{}\fR is replaced with the quoted strings of the selected
|
||||||
|
entries separated by spaces.
|
||||||
|
|
||||||
.SH AUTHOR
|
.SH AUTHOR
|
||||||
Junegunn Choi (\fIjunegunn.c@gmail.com\fR)
|
Junegunn Choi (\fIjunegunn.c@gmail.com\fR)
|
||||||
|
|
||||||
|
376
plugin/fzf.vim
376
plugin/fzf.vim
@@ -21,7 +21,8 @@
|
|||||||
" OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
" OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
" WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
" WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
let s:default_height = '40%'
|
let s:default_layout = { 'down': '~40%' }
|
||||||
|
let s:layout_keys = ['window', 'up', 'down', 'left', 'right']
|
||||||
let s:fzf_go = expand('<sfile>:h:h').'/bin/fzf'
|
let s:fzf_go = expand('<sfile>:h:h').'/bin/fzf'
|
||||||
let s:install = expand('<sfile>:h:h').'/install'
|
let s:install = expand('<sfile>:h:h').'/install'
|
||||||
let s:installed = 0
|
let s:installed = 0
|
||||||
@@ -49,7 +50,7 @@ function! s:fzf_exec()
|
|||||||
throw 'fzf executable not found'
|
throw 'fzf executable not found'
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
return s:exec
|
return s:shellesc(s:exec)
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:tmux_enabled()
|
function! s:tmux_enabled()
|
||||||
@@ -74,7 +75,7 @@ function! s:shellesc(arg)
|
|||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:escape(path)
|
function! s:escape(path)
|
||||||
return escape(a:path, ' %#''"\')
|
return escape(a:path, ' $%#''"\')
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
" Upgrade legacy options
|
" Upgrade legacy options
|
||||||
@@ -104,11 +105,114 @@ function! s:warn(msg)
|
|||||||
echohl None
|
echohl None
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
|
function! s:has_any(dict, keys)
|
||||||
|
for key in a:keys
|
||||||
|
if has_key(a:dict, key)
|
||||||
|
return 1
|
||||||
|
endif
|
||||||
|
endfor
|
||||||
|
return 0
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
function! s:open(cmd, target)
|
||||||
|
if stridx('edit', a:cmd) == 0 && fnamemodify(a:target, ':p') ==# expand('%:p')
|
||||||
|
return
|
||||||
|
endif
|
||||||
|
execute a:cmd s:escape(a:target)
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
function! s:common_sink(action, lines) abort
|
||||||
|
if len(a:lines) < 2
|
||||||
|
return
|
||||||
|
endif
|
||||||
|
let key = remove(a:lines, 0)
|
||||||
|
let cmd = get(a:action, key, 'e')
|
||||||
|
if len(a:lines) > 1
|
||||||
|
augroup fzf_swap
|
||||||
|
autocmd SwapExists * let v:swapchoice='o'
|
||||||
|
\| call s:warn('fzf: E325: swap file exists: '.expand('<afile>'))
|
||||||
|
augroup END
|
||||||
|
endif
|
||||||
|
try
|
||||||
|
let empty = empty(expand('%')) && line('$') == 1 && empty(getline(1)) && !&modified
|
||||||
|
let autochdir = &autochdir
|
||||||
|
set noautochdir
|
||||||
|
for item in a:lines
|
||||||
|
if empty
|
||||||
|
execute 'e' s:escape(item)
|
||||||
|
let empty = 0
|
||||||
|
else
|
||||||
|
call s:open(cmd, item)
|
||||||
|
endif
|
||||||
|
if exists('#BufEnter') && isdirectory(item)
|
||||||
|
doautocmd BufEnter
|
||||||
|
endif
|
||||||
|
endfor
|
||||||
|
finally
|
||||||
|
let &autochdir = autochdir
|
||||||
|
silent! autocmd! fzf_swap
|
||||||
|
endtry
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
" [name string,] [opts dict,] [fullscreen boolean]
|
||||||
|
function! fzf#wrap(...)
|
||||||
|
let args = ['', {}, 0]
|
||||||
|
let expects = map(copy(args), 'type(v:val)')
|
||||||
|
let tidx = 0
|
||||||
|
for arg in copy(a:000)
|
||||||
|
let tidx = index(expects, type(arg), tidx)
|
||||||
|
if tidx < 0
|
||||||
|
throw 'invalid arguments (expected: [name string] [opts dict] [fullscreen boolean])'
|
||||||
|
endif
|
||||||
|
let args[tidx] = arg
|
||||||
|
let tidx += 1
|
||||||
|
unlet arg
|
||||||
|
endfor
|
||||||
|
let [name, opts, bang] = args
|
||||||
|
|
||||||
|
" Layout: g:fzf_layout (and deprecated g:fzf_height)
|
||||||
|
if bang
|
||||||
|
for key in s:layout_keys
|
||||||
|
if has_key(opts, key)
|
||||||
|
call remove(opts, key)
|
||||||
|
endif
|
||||||
|
endfor
|
||||||
|
elseif !s:has_any(opts, s:layout_keys)
|
||||||
|
if !exists('g:fzf_layout') && exists('g:fzf_height')
|
||||||
|
let opts.down = g:fzf_height
|
||||||
|
else
|
||||||
|
let opts = extend(opts, get(g:, 'fzf_layout', s:default_layout))
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
" History: g:fzf_history_dir
|
||||||
|
let opts.options = get(opts, 'options', '')
|
||||||
|
if len(name) && len(get(g:, 'fzf_history_dir', ''))
|
||||||
|
let dir = expand(g:fzf_history_dir)
|
||||||
|
if !isdirectory(dir)
|
||||||
|
call mkdir(dir, 'p')
|
||||||
|
endif
|
||||||
|
let opts.options = join(['--history', s:escape(dir.'/'.name), opts.options])
|
||||||
|
endif
|
||||||
|
|
||||||
|
" Action: g:fzf_action
|
||||||
|
if !s:has_any(opts, ['sink', 'sink*'])
|
||||||
|
let opts._action = get(g:, 'fzf_action', s:default_action)
|
||||||
|
let opts.options .= ' --expect='.join(keys(opts._action), ',')
|
||||||
|
function! opts.sink(lines) abort
|
||||||
|
return s:common_sink(self._action, a:lines)
|
||||||
|
endfunction
|
||||||
|
let opts['sink*'] = remove(opts, 'sink')
|
||||||
|
endif
|
||||||
|
|
||||||
|
return opts
|
||||||
|
endfunction
|
||||||
|
|
||||||
function! fzf#run(...) abort
|
function! fzf#run(...) abort
|
||||||
try
|
try
|
||||||
let oshell = &shell
|
let oshell = &shell
|
||||||
set shell=sh
|
set shell=sh
|
||||||
if has('nvim') && bufexists('term://*:FZF')
|
if has('nvim') && len(filter(range(1, bufnr('$')), 'bufname(v:val) =~# ";#FZF"'))
|
||||||
call s:warn('FZF is already running!')
|
call s:warn('FZF is already running!')
|
||||||
return []
|
return []
|
||||||
endif
|
endif
|
||||||
@@ -121,6 +225,12 @@ try
|
|||||||
throw v:exception
|
throw v:exception
|
||||||
endtry
|
endtry
|
||||||
|
|
||||||
|
if !has_key(dict, 'source') && !empty($FZF_DEFAULT_COMMAND)
|
||||||
|
let temps.source = tempname()
|
||||||
|
call writefile(split($FZF_DEFAULT_COMMAND, "\n"), temps.source)
|
||||||
|
let dict.source = (empty($SHELL) ? 'sh' : $SHELL) . ' ' . s:shellesc(temps.source)
|
||||||
|
endif
|
||||||
|
|
||||||
if has_key(dict, 'source')
|
if has_key(dict, 'source')
|
||||||
let source = dict.source
|
let source = dict.source
|
||||||
let type = type(source)
|
let type = type(source)
|
||||||
@@ -131,21 +241,21 @@ try
|
|||||||
call writefile(source, temps.input)
|
call writefile(source, temps.input)
|
||||||
let prefix = 'cat '.s:shellesc(temps.input).'|'
|
let prefix = 'cat '.s:shellesc(temps.input).'|'
|
||||||
else
|
else
|
||||||
throw 'Invalid source type'
|
throw 'invalid source type'
|
||||||
endif
|
endif
|
||||||
else
|
else
|
||||||
let prefix = ''
|
let prefix = ''
|
||||||
endif
|
endif
|
||||||
let tmux = !has('nvim') && s:tmux_enabled() && s:splittable(dict)
|
let tmux = (!has('nvim') || get(g:, 'fzf_prefer_tmux', 0)) && s:tmux_enabled() && s:splittable(dict)
|
||||||
let command = prefix.(tmux ? s:fzf_tmux(dict) : fzf_exec).' '.optstr.' > '.temps.result
|
let command = prefix.(tmux ? s:fzf_tmux(dict) : fzf_exec).' '.optstr.' > '.temps.result
|
||||||
|
|
||||||
if has('nvim')
|
if has('nvim') && !tmux
|
||||||
return s:execute_term(dict, command, temps)
|
return s:execute_term(dict, command, temps)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
let ret = tmux ? s:execute_tmux(dict, command, temps) : s:execute(dict, command, temps)
|
let lines = tmux ? s:execute_tmux(dict, command, temps) : s:execute(dict, command, temps)
|
||||||
call s:popd(dict, ret)
|
call s:callback(dict, lines)
|
||||||
return ret
|
return lines
|
||||||
finally
|
finally
|
||||||
let &shell = oshell
|
let &shell = oshell
|
||||||
endtry
|
endtry
|
||||||
@@ -166,16 +276,16 @@ function! s:fzf_tmux(dict)
|
|||||||
if s:present(a:dict, o)
|
if s:present(a:dict, o)
|
||||||
let spec = a:dict[o]
|
let spec = a:dict[o]
|
||||||
if (o == 'up' || o == 'down') && spec[0] == '~'
|
if (o == 'up' || o == 'down') && spec[0] == '~'
|
||||||
let size = '-'.o[0].s:calc_size(&lines, spec[1:], a:dict)
|
let size = '-'.o[0].s:calc_size(&lines, spec, a:dict)
|
||||||
else
|
else
|
||||||
" Legacy boolean option
|
" Legacy boolean option
|
||||||
let size = '-'.o[0].(spec == 1 ? '' : spec)
|
let size = '-'.o[0].(spec == 1 ? '' : substitute(spec, '^\~', '', ''))
|
||||||
endif
|
endif
|
||||||
break
|
break
|
||||||
endif
|
endif
|
||||||
endfor
|
endfor
|
||||||
return printf('LINES=%d COLUMNS=%d %s %s %s --',
|
return printf('LINES=%d COLUMNS=%d %s %s %s --',
|
||||||
\ &lines, &columns, s:fzf_tmux, size, (has_key(a:dict, 'source') ? '' : '-'))
|
\ &lines, &columns, s:shellesc(s:fzf_tmux), size, (has_key(a:dict, 'source') ? '' : '-'))
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:splittable(dict)
|
function! s:splittable(dict)
|
||||||
@@ -189,29 +299,24 @@ function! s:pushd(dict)
|
|||||||
return 1
|
return 1
|
||||||
endif
|
endif
|
||||||
let a:dict.prev_dir = cwd
|
let a:dict.prev_dir = cwd
|
||||||
execute 'chdir' s:escape(a:dict.dir)
|
execute 'lcd' s:escape(a:dict.dir)
|
||||||
let a:dict.dir = getcwd()
|
let a:dict.dir = getcwd()
|
||||||
return 1
|
return 1
|
||||||
endif
|
endif
|
||||||
return 0
|
return 0
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:popd(dict, lines)
|
augroup fzf_popd
|
||||||
" Since anything can be done in the sink function, there is no telling that
|
autocmd!
|
||||||
" the change of the working directory was made by &autochdir setting.
|
autocmd WinEnter * call s:dopopd()
|
||||||
"
|
augroup END
|
||||||
" We use the following heuristic to determine whether to restore CWD:
|
|
||||||
" - Always restore the current directory when &autochdir is disabled.
|
function! s:dopopd()
|
||||||
" FIXME This makes it impossible to change directory from inside the sink
|
if !exists('w:fzf_prev_dir') || exists('*haslocaldir') && !haslocaldir()
|
||||||
" function when &autochdir is not used.
|
return
|
||||||
" - In case of an error or an interrupt, a:lines will be empty.
|
|
||||||
" And it will be an array of a single empty string when fzf was finished
|
|
||||||
" without a match. In these cases, we presume that the change of the
|
|
||||||
" directory is not expected and should be undone.
|
|
||||||
if has_key(a:dict, 'prev_dir') &&
|
|
||||||
\ (!&autochdir || (empty(a:lines) || len(a:lines) == 1 && empty(a:lines[0])))
|
|
||||||
execute 'chdir' s:escape(remove(a:dict, 'prev_dir'))
|
|
||||||
endif
|
endif
|
||||||
|
execute 'lcd' s:escape(w:fzf_prev_dir)
|
||||||
|
unlet w:fzf_prev_dir
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:xterm_launcher()
|
function! s:xterm_launcher()
|
||||||
@@ -251,8 +356,9 @@ function! s:execute(dict, command, temps) abort
|
|||||||
let command = escaped
|
let command = escaped
|
||||||
endif
|
endif
|
||||||
execute 'silent !'.command
|
execute 'silent !'.command
|
||||||
|
let exit_status = v:shell_error
|
||||||
redraw!
|
redraw!
|
||||||
return s:exit_handler(v:shell_error, command) ? s:callback(a:dict, a:temps) : []
|
return s:exit_handler(exit_status, command) ? s:collect(a:temps) : []
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:execute_tmux(dict, command, temps) abort
|
function! s:execute_tmux(dict, command, temps) abort
|
||||||
@@ -263,15 +369,17 @@ function! s:execute_tmux(dict, command, temps) abort
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
call system(command)
|
call system(command)
|
||||||
|
let exit_status = v:shell_error
|
||||||
redraw!
|
redraw!
|
||||||
return s:exit_handler(v:shell_error, command) ? s:callback(a:dict, a:temps) : []
|
return s:exit_handler(exit_status, command) ? s:collect(a:temps) : []
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:calc_size(max, val, dict)
|
function! s:calc_size(max, val, dict)
|
||||||
if a:val =~ '%$'
|
let val = substitute(a:val, '^\~', '', '')
|
||||||
let size = a:max * str2nr(a:val[:-2]) / 100
|
if val =~ '%$'
|
||||||
|
let size = a:max * str2nr(val[:-2]) / 100
|
||||||
else
|
else
|
||||||
let size = min([a:max, str2nr(a:val)])
|
let size = min([a:max, str2nr(val)])
|
||||||
endif
|
endif
|
||||||
|
|
||||||
let srcsz = -1
|
let srcsz = -1
|
||||||
@@ -281,11 +389,12 @@ function! s:calc_size(max, val, dict)
|
|||||||
|
|
||||||
let opts = get(a:dict, 'options', '').$FZF_DEFAULT_OPTS
|
let opts = get(a:dict, 'options', '').$FZF_DEFAULT_OPTS
|
||||||
let margin = stridx(opts, '--inline-info') > stridx(opts, '--no-inline-info') ? 1 : 2
|
let margin = stridx(opts, '--inline-info') > stridx(opts, '--no-inline-info') ? 1 : 2
|
||||||
|
let margin += stridx(opts, '--header') > stridx(opts, '--no-header')
|
||||||
return srcsz >= 0 ? min([srcsz + margin, size]) : size
|
return srcsz >= 0 ? min([srcsz + margin, size]) : size
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:getpos()
|
function! s:getpos()
|
||||||
return {'tab': tabpagenr(), 'win': winnr(), 'cnt': winnr('$')}
|
return {'tab': tabpagenr(), 'win': winnr(), 'cnt': winnr('$'), 'tcnt': tabpagenr('$')}
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:split(dict)
|
function! s:split(dict)
|
||||||
@@ -294,88 +403,131 @@ function! s:split(dict)
|
|||||||
\ 'down': ['botright', 'resize', &lines],
|
\ 'down': ['botright', 'resize', &lines],
|
||||||
\ 'left': ['vertical topleft', 'vertical resize', &columns],
|
\ 'left': ['vertical topleft', 'vertical resize', &columns],
|
||||||
\ 'right': ['vertical botright', 'vertical resize', &columns] }
|
\ 'right': ['vertical botright', 'vertical resize', &columns] }
|
||||||
let s:ppos = s:getpos()
|
let ppos = s:getpos()
|
||||||
try
|
try
|
||||||
for [dir, triple] in items(directions)
|
for [dir, triple] in items(directions)
|
||||||
let val = get(a:dict, dir, '')
|
let val = get(a:dict, dir, '')
|
||||||
if !empty(val)
|
if !empty(val)
|
||||||
let [cmd, resz, max] = triple
|
let [cmd, resz, max] = triple
|
||||||
if (dir == 'up' || dir == 'down') && val[0] == '~'
|
if (dir == 'up' || dir == 'down') && val[0] == '~'
|
||||||
let sz = s:calc_size(max, val[1:], a:dict)
|
let sz = s:calc_size(max, val, a:dict)
|
||||||
else
|
else
|
||||||
let sz = s:calc_size(max, val, {})
|
let sz = s:calc_size(max, val, {})
|
||||||
endif
|
endif
|
||||||
execute cmd sz.'new'
|
execute cmd sz.'new'
|
||||||
execute resz sz
|
execute resz sz
|
||||||
return
|
return [ppos, {}]
|
||||||
endif
|
endif
|
||||||
endfor
|
endfor
|
||||||
if s:present(a:dict, 'window')
|
if s:present(a:dict, 'window')
|
||||||
execute a:dict.window
|
execute a:dict.window
|
||||||
else
|
else
|
||||||
tabnew
|
execute (tabpagenr()-1).'tabnew'
|
||||||
endif
|
endif
|
||||||
|
return [ppos, { '&l:wfw': &l:wfw, '&l:wfh': &l:wfh }]
|
||||||
finally
|
finally
|
||||||
setlocal winfixwidth winfixheight buftype=nofile bufhidden=wipe nobuflisted
|
setlocal winfixwidth winfixheight
|
||||||
endtry
|
endtry
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:execute_term(dict, command, temps) abort
|
function! s:execute_term(dict, command, temps) abort
|
||||||
call s:split(a:dict)
|
let winrest = winrestcmd()
|
||||||
|
let [ppos, winopts] = s:split(a:dict)
|
||||||
let fzf = { 'buf': bufnr('%'), 'dict': a:dict, 'temps': a:temps, 'name': 'FZF' }
|
let fzf = { 'buf': bufnr('%'), 'ppos': ppos, 'dict': a:dict, 'temps': a:temps,
|
||||||
let s:command = a:command
|
\ 'winopts': winopts, 'winrest': winrest, 'lines': &lines,
|
||||||
|
\ 'columns': &columns, 'command': a:command }
|
||||||
|
function! fzf.switch_back(inplace)
|
||||||
|
if a:inplace && bufnr('') == self.buf
|
||||||
|
" FIXME: Can't re-enter normal mode from terminal mode
|
||||||
|
" execute "normal! \<c-^>"
|
||||||
|
b #
|
||||||
|
" No other listed buffer
|
||||||
|
if bufnr('') == self.buf
|
||||||
|
enew
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
endfunction
|
||||||
function! fzf.on_exit(id, code)
|
function! fzf.on_exit(id, code)
|
||||||
let pos = s:getpos()
|
if s:getpos() == self.ppos " {'window': 'enew'}
|
||||||
let inplace = pos == s:ppos " {'window': 'enew'}
|
for [opt, val] in items(self.winopts)
|
||||||
if !inplace
|
execute 'let' opt '=' val
|
||||||
|
endfor
|
||||||
|
call self.switch_back(1)
|
||||||
|
else
|
||||||
if bufnr('') == self.buf
|
if bufnr('') == self.buf
|
||||||
" We use close instead of bd! since Vim does not close the split when
|
" We use close instead of bd! since Vim does not close the split when
|
||||||
" there's no other listed buffer (nvim +'set nobuflisted')
|
" there's no other listed buffer (nvim +'set nobuflisted')
|
||||||
close
|
close
|
||||||
endif
|
endif
|
||||||
if pos.tab == s:ppos.tab
|
execute 'tabnext' self.ppos.tab
|
||||||
wincmd p
|
execute self.ppos.win.'wincmd w'
|
||||||
endif
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
if !s:exit_handler(a:code, s:command, 1)
|
if bufexists(self.buf)
|
||||||
|
execute 'bd!' self.buf
|
||||||
|
endif
|
||||||
|
|
||||||
|
if &lines == self.lines && &columns == self.columns && s:getpos() == self.ppos
|
||||||
|
execute self.winrest
|
||||||
|
endif
|
||||||
|
|
||||||
|
if !s:exit_handler(a:code, self.command, 1)
|
||||||
return
|
return
|
||||||
endif
|
endif
|
||||||
|
|
||||||
call s:pushd(self.dict)
|
call s:pushd(self.dict)
|
||||||
let ret = []
|
let lines = s:collect(self.temps)
|
||||||
try
|
call s:callback(self.dict, lines)
|
||||||
let ret = s:callback(self.dict, self.temps)
|
call self.switch_back(s:getpos() == self.ppos)
|
||||||
|
|
||||||
if inplace && bufnr('') == self.buf
|
|
||||||
execute "normal! \<c-^>"
|
|
||||||
" No other listed buffer
|
|
||||||
if bufnr('') == self.buf
|
|
||||||
bd!
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
finally
|
|
||||||
call s:popd(self.dict, ret)
|
|
||||||
endtry
|
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
call s:pushd(a:dict)
|
try
|
||||||
call termopen(a:command, fzf)
|
if s:present(a:dict, 'dir')
|
||||||
call s:popd(a:dict, [])
|
execute 'lcd' s:escape(a:dict.dir)
|
||||||
setlocal nospell
|
endif
|
||||||
|
call termopen(a:command . ';#FZF', fzf)
|
||||||
|
finally
|
||||||
|
if s:present(a:dict, 'dir')
|
||||||
|
lcd -
|
||||||
|
endif
|
||||||
|
endtry
|
||||||
|
setlocal nospell bufhidden=wipe nobuflisted
|
||||||
setf fzf
|
setf fzf
|
||||||
startinsert
|
startinsert
|
||||||
return []
|
return []
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:callback(dict, temps) abort
|
function! s:collect(temps) abort
|
||||||
let lines = []
|
try
|
||||||
try
|
return filereadable(a:temps.result) ? readfile(a:temps.result) : []
|
||||||
if filereadable(a:temps.result)
|
finally
|
||||||
let lines = readfile(a:temps.result)
|
for tf in values(a:temps)
|
||||||
|
silent! call delete(tf)
|
||||||
|
endfor
|
||||||
|
endtry
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
function! s:callback(dict, lines) abort
|
||||||
|
" Since anything can be done in the sink function, there is no telling that
|
||||||
|
" the change of the working directory was made by &autochdir setting.
|
||||||
|
"
|
||||||
|
" We use the following heuristic to determine whether to restore CWD:
|
||||||
|
" - Always restore the current directory when &autochdir is disabled.
|
||||||
|
" FIXME This makes it impossible to change directory from inside the sink
|
||||||
|
" function when &autochdir is not used.
|
||||||
|
" - In case of an error or an interrupt, a:lines will be empty.
|
||||||
|
" And it will be an array of a single empty string when fzf was finished
|
||||||
|
" without a match. In these cases, we presume that the change of the
|
||||||
|
" directory is not expected and should be undone.
|
||||||
|
let popd = has_key(a:dict, 'prev_dir') &&
|
||||||
|
\ (!&autochdir || (empty(a:lines) || len(a:lines) == 1 && empty(a:lines[0])))
|
||||||
|
if popd
|
||||||
|
let w:fzf_prev_dir = a:dict.prev_dir
|
||||||
|
endif
|
||||||
|
|
||||||
|
try
|
||||||
if has_key(a:dict, 'sink')
|
if has_key(a:dict, 'sink')
|
||||||
for line in lines
|
for line in a:lines
|
||||||
if type(a:dict.sink) == 2
|
if type(a:dict.sink) == 2
|
||||||
call a:dict.sink(line)
|
call a:dict.sink(line)
|
||||||
else
|
else
|
||||||
@@ -384,76 +536,40 @@ try
|
|||||||
endfor
|
endfor
|
||||||
endif
|
endif
|
||||||
if has_key(a:dict, 'sink*')
|
if has_key(a:dict, 'sink*')
|
||||||
call a:dict['sink*'](lines)
|
call a:dict['sink*'](a:lines)
|
||||||
endif
|
endif
|
||||||
endif
|
catch
|
||||||
|
if stridx(v:exception, ':E325:') < 0
|
||||||
|
echoerr v:exception
|
||||||
|
endif
|
||||||
|
endtry
|
||||||
|
|
||||||
for tf in values(a:temps)
|
" We may have opened a new window or tab
|
||||||
silent! call delete(tf)
|
if popd
|
||||||
endfor
|
let w:fzf_prev_dir = a:dict.prev_dir
|
||||||
catch
|
call s:dopopd()
|
||||||
if stridx(v:exception, ':E325:') < 0
|
|
||||||
echoerr v:exception
|
|
||||||
endif
|
endif
|
||||||
finally
|
|
||||||
return lines
|
|
||||||
endtry
|
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
let s:default_action = {
|
let s:default_action = {
|
||||||
\ 'ctrl-m': 'e',
|
|
||||||
\ 'ctrl-t': 'tab split',
|
\ 'ctrl-t': 'tab split',
|
||||||
\ 'ctrl-x': 'split',
|
\ 'ctrl-x': 'split',
|
||||||
\ 'ctrl-v': 'vsplit' }
|
\ 'ctrl-v': 'vsplit' }
|
||||||
|
|
||||||
function! s:cmd_callback(lines) abort
|
|
||||||
if empty(a:lines)
|
|
||||||
return
|
|
||||||
endif
|
|
||||||
let key = remove(a:lines, 0)
|
|
||||||
let cmd = get(s:action, key, 'e')
|
|
||||||
if len(a:lines) > 1
|
|
||||||
augroup fzf_swap
|
|
||||||
autocmd SwapExists * let v:swapchoice='o'
|
|
||||||
\| call s:warn('fzf: E325: swap file exists: '.expand('<afile>'))
|
|
||||||
augroup END
|
|
||||||
endif
|
|
||||||
try
|
|
||||||
let empty = empty(expand('%')) && line('$') == 1 && empty(getline(1)) && !&modified
|
|
||||||
let autochdir = &autochdir
|
|
||||||
set noautochdir
|
|
||||||
for item in a:lines
|
|
||||||
if empty
|
|
||||||
execute 'e' s:escape(item)
|
|
||||||
let empty = 0
|
|
||||||
else
|
|
||||||
execute cmd s:escape(item)
|
|
||||||
endif
|
|
||||||
if exists('#BufEnter') && isdirectory(item)
|
|
||||||
doautocmd BufEnter
|
|
||||||
endif
|
|
||||||
endfor
|
|
||||||
finally
|
|
||||||
let &autochdir = autochdir
|
|
||||||
silent! autocmd! fzf_swap
|
|
||||||
endtry
|
|
||||||
endfunction
|
|
||||||
|
|
||||||
function! s:cmd(bang, ...) abort
|
function! s:cmd(bang, ...) abort
|
||||||
let s:action = get(g:, 'fzf_action', s:default_action)
|
let args = copy(a:000)
|
||||||
let args = extend(['--expect='.join(keys(s:action), ',')], a:000)
|
let opts = { 'options': '--multi ' }
|
||||||
let opts = {}
|
if len(args) && isdirectory(expand(args[-1]))
|
||||||
if len(args) > 0 && isdirectory(expand(args[-1]))
|
let opts.dir = substitute(substitute(remove(args, -1), '\\\(["'']\)', '\1', 'g'), '/*$', '/', '')
|
||||||
let opts.dir = substitute(remove(args, -1), '\\\(["'']\)', '\1', 'g')
|
let opts.options .= ' --prompt '.shellescape(opts.dir)
|
||||||
|
else
|
||||||
|
let opts.options .= ' --prompt '.shellescape(pathshorten(getcwd()).'/')
|
||||||
endif
|
endif
|
||||||
if !a:bang
|
let opts.options .= ' '.join(args)
|
||||||
let opts.down = get(g:, 'fzf_height', get(g:, 'fzf_tmux_height', s:default_height))
|
call fzf#run(fzf#wrap('FZF', opts, a:bang))
|
||||||
endif
|
|
||||||
call fzf#run(extend({'options': join(args), 'sink*': function('<sid>cmd_callback')}, opts))
|
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
command! -nargs=* -complete=dir -bang FZF call s:cmd(<bang>0, <f-args>)
|
command! -nargs=* -complete=dir -bang FZF call s:cmd(<bang>0, <f-args>)
|
||||||
|
|
||||||
let &cpo = s:cpo_save
|
let &cpo = s:cpo_save
|
||||||
unlet s:cpo_save
|
unlet s:cpo_save
|
||||||
|
|
||||||
|
@@ -14,7 +14,7 @@
|
|||||||
if ! declare -f _fzf_compgen_path > /dev/null; then
|
if ! declare -f _fzf_compgen_path > /dev/null; then
|
||||||
_fzf_compgen_path() {
|
_fzf_compgen_path() {
|
||||||
echo "$1"
|
echo "$1"
|
||||||
\find -L "$1" \
|
command find -L "$1" \
|
||||||
-name .git -prune -o -name .svn -prune -o \( -type d -o -type f -o -type l \) \
|
-name .git -prune -o -name .svn -prune -o \( -type d -o -type f -o -type l \) \
|
||||||
-a -not -path "$1" -print 2> /dev/null | sed 's@^\./@@'
|
-a -not -path "$1" -print 2> /dev/null | sed 's@^\./@@'
|
||||||
}
|
}
|
||||||
@@ -22,7 +22,7 @@ fi
|
|||||||
|
|
||||||
if ! declare -f _fzf_compgen_dir > /dev/null; then
|
if ! declare -f _fzf_compgen_dir > /dev/null; then
|
||||||
_fzf_compgen_dir() {
|
_fzf_compgen_dir() {
|
||||||
\find -L "$1" \
|
command find -L "$1" \
|
||||||
-name .git -prune -o -name .svn -prune -o -type d \
|
-name .git -prune -o -name .svn -prune -o -type d \
|
||||||
-a -not -path "$1" -print 2> /dev/null | sed 's@^\./@@'
|
-a -not -path "$1" -print 2> /dev/null | sed 's@^\./@@'
|
||||||
}
|
}
|
||||||
@@ -32,7 +32,7 @@ fi
|
|||||||
|
|
||||||
_fzf_orig_completion_filter() {
|
_fzf_orig_completion_filter() {
|
||||||
sed 's/^\(.*-F\) *\([^ ]*\).* \([^ ]*\)$/export _fzf_orig_completion_\3="\1 %s \3 #\2";/' |
|
sed 's/^\(.*-F\) *\([^ ]*\).* \([^ ]*\)$/export _fzf_orig_completion_\3="\1 %s \3 #\2";/' |
|
||||||
awk -F= '{gsub(/[^a-z0-9_= ;]/, "_", $1); print $1"="$2}'
|
awk -F= '{gsub(/[^A-Za-z0-9_= ;]/, "_", $1); print $1"="$2}'
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_opts_completion() {
|
_fzf_opts_completion() {
|
||||||
@@ -108,7 +108,7 @@ _fzf_handle_dynamic_completion() {
|
|||||||
elif [ -n "$_fzf_completion_loader" ]; then
|
elif [ -n "$_fzf_completion_loader" ]; then
|
||||||
_completion_loader "$@"
|
_completion_loader "$@"
|
||||||
ret=$?
|
ret=$?
|
||||||
eval "$(complete | \grep "\-F.* $orig_cmd$" | _fzf_orig_completion_filter)"
|
eval "$(complete | command grep "\-F.* $orig_cmd$" | _fzf_orig_completion_filter)"
|
||||||
source "${BASH_SOURCE[0]}"
|
source "${BASH_SOURCE[0]}"
|
||||||
return $ret
|
return $ret
|
||||||
fi
|
fi
|
||||||
@@ -117,7 +117,7 @@ _fzf_handle_dynamic_completion() {
|
|||||||
__fzf_generic_path_completion() {
|
__fzf_generic_path_completion() {
|
||||||
local cur base dir leftover matches trigger cmd fzf
|
local cur base dir leftover matches trigger cmd fzf
|
||||||
[ "${FZF_TMUX:-1}" != 0 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
[ "${FZF_TMUX:-1}" != 0 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
||||||
cmd=$(echo "${COMP_WORDS[0]}" | sed 's/[^a-z0-9_=]/_/g')
|
cmd="${COMP_WORDS[0]//[^A-Za-z0-9_=]/_}"
|
||||||
COMPREPLY=()
|
COMPREPLY=()
|
||||||
trigger=${FZF_COMPLETION_TRIGGER-'**'}
|
trigger=${FZF_COMPLETION_TRIGGER-'**'}
|
||||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||||
@@ -162,7 +162,7 @@ _fzf_complete() {
|
|||||||
type -t "$post" > /dev/null 2>&1 || post=cat
|
type -t "$post" > /dev/null 2>&1 || post=cat
|
||||||
[ "${FZF_TMUX:-1}" != 0 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
[ "${FZF_TMUX:-1}" != 0 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
||||||
|
|
||||||
cmd=$(echo "${COMP_WORDS[0]}" | sed 's/[^a-z0-9_=]/_/g')
|
cmd="${COMP_WORDS[0]//[^A-Za-z0-9_=]/_}"
|
||||||
trigger=${FZF_COMPLETION_TRIGGER-'**'}
|
trigger=${FZF_COMPLETION_TRIGGER-'**'}
|
||||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||||
if [[ "$cur" == *"$trigger" ]]; then
|
if [[ "$cur" == *"$trigger" ]]; then
|
||||||
@@ -213,15 +213,16 @@ _fzf_complete_kill() {
|
|||||||
|
|
||||||
_fzf_complete_telnet() {
|
_fzf_complete_telnet() {
|
||||||
_fzf_complete '+m' "$@" < <(
|
_fzf_complete '+m' "$@" < <(
|
||||||
\grep -v '^\s*\(#\|$\)' /etc/hosts | \grep -Fv '0.0.0.0' |
|
command grep -v '^\s*\(#\|$\)' /etc/hosts | command grep -Fv '0.0.0.0' |
|
||||||
awk '{if (length($2) > 0) {print $2}}' | sort -u
|
awk '{if (length($2) > 0) {print $2}}' | sort -u
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_complete_ssh() {
|
_fzf_complete_ssh() {
|
||||||
_fzf_complete '+m' "$@" < <(
|
_fzf_complete '+m' "$@" < <(
|
||||||
cat <(cat ~/.ssh/config /etc/ssh/ssh_config 2> /dev/null | \grep -i '^host' | \grep -v '*') \
|
cat <(cat ~/.ssh/config /etc/ssh/ssh_config 2> /dev/null | command grep -i '^host' | command grep -v '*') \
|
||||||
<(\grep -v '^\s*\(#\|$\)' /etc/hosts | \grep -Fv '0.0.0.0') |
|
<(command grep -oE '^[^ ]+' ~/.ssh/known_hosts | tr ',' '\n' | awk '{ print $1 " " $1 }') \
|
||||||
|
<(command grep -v '^\s*\(#\|$\)' /etc/hosts | command grep -Fv '0.0.0.0') |
|
||||||
awk '{if (length($2) > 0) {print $2}}' | sort -u
|
awk '{if (length($2) > 0) {print $2}}' | sort -u
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -262,8 +263,8 @@ x_cmds="kill ssh telnet unset unalias export"
|
|||||||
# Preserve existing completion
|
# Preserve existing completion
|
||||||
if [ "$_fzf_completion_loaded" != '0.11.3' ]; then
|
if [ "$_fzf_completion_loaded" != '0.11.3' ]; then
|
||||||
# Really wish I could use associative array but OSX comes with bash 3.2 :(
|
# Really wish I could use associative array but OSX comes with bash 3.2 :(
|
||||||
eval $(complete | \grep '\-F' | \grep -v _fzf_ |
|
eval $(complete | command grep '\-F' | command grep -v _fzf_ |
|
||||||
\grep -E " ($(echo $d_cmds $a_cmds $x_cmds | sed 's/ /|/g' | sed 's/+/\\+/g'))$" | _fzf_orig_completion_filter)
|
command grep -E " ($(echo $d_cmds $a_cmds $x_cmds | sed 's/ /|/g' | sed 's/+/\\+/g'))$" | _fzf_orig_completion_filter)
|
||||||
export _fzf_completion_loaded=0.11.3
|
export _fzf_completion_loaded=0.11.3
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -272,14 +273,15 @@ if type _completion_loader > /dev/null 2>&1; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
_fzf_defc() {
|
_fzf_defc() {
|
||||||
local cmd func opts orig_var orig
|
local cmd func opts orig_var orig def
|
||||||
cmd="$1"
|
cmd="$1"
|
||||||
func="$2"
|
func="$2"
|
||||||
opts="$3"
|
opts="$3"
|
||||||
orig_var="_fzf_orig_completion_$cmd"
|
orig_var="_fzf_orig_completion_${cmd//[^A-Za-z0-9_]/_}"
|
||||||
orig="${!orig_var}"
|
orig="${!orig_var}"
|
||||||
if [ -n "$orig" ]; then
|
if [ -n "$orig" ]; then
|
||||||
eval "$(printf "$orig" "$func")"
|
printf -v def "$orig" "$func"
|
||||||
|
eval "$def"
|
||||||
else
|
else
|
||||||
complete -F "$func" $opts "$cmd"
|
complete -F "$func" $opts "$cmd"
|
||||||
fi
|
fi
|
||||||
|
@@ -14,7 +14,7 @@
|
|||||||
if ! declare -f _fzf_compgen_path > /dev/null; then
|
if ! declare -f _fzf_compgen_path > /dev/null; then
|
||||||
_fzf_compgen_path() {
|
_fzf_compgen_path() {
|
||||||
echo "$1"
|
echo "$1"
|
||||||
\find -L "$1" \
|
command find -L "$1" \
|
||||||
-name .git -prune -o -name .svn -prune -o \( -type d -o -type f -o -type l \) \
|
-name .git -prune -o -name .svn -prune -o \( -type d -o -type f -o -type l \) \
|
||||||
-a -not -path "$1" -print 2> /dev/null | sed 's@^\./@@'
|
-a -not -path "$1" -print 2> /dev/null | sed 's@^\./@@'
|
||||||
}
|
}
|
||||||
@@ -22,7 +22,7 @@ fi
|
|||||||
|
|
||||||
if ! declare -f _fzf_compgen_dir > /dev/null; then
|
if ! declare -f _fzf_compgen_dir > /dev/null; then
|
||||||
_fzf_compgen_dir() {
|
_fzf_compgen_dir() {
|
||||||
\find -L "$1" \
|
command find -L "$1" \
|
||||||
-name .git -prune -o -name .svn -prune -o -type d \
|
-name .git -prune -o -name .svn -prune -o -type d \
|
||||||
-a -not -path "$1" -print 2> /dev/null | sed 's@^\./@@'
|
-a -not -path "$1" -print 2> /dev/null | sed 's@^\./@@'
|
||||||
}
|
}
|
||||||
@@ -31,7 +31,7 @@ fi
|
|||||||
###########################################################
|
###########################################################
|
||||||
|
|
||||||
__fzf_generic_path_completion() {
|
__fzf_generic_path_completion() {
|
||||||
local base lbuf compgen fzf_opts suffix tail fzf dir leftover matches nnm
|
local base lbuf compgen fzf_opts suffix tail fzf dir leftover matches
|
||||||
# (Q) flag removes a quoting level: "foo\ bar" => "foo bar"
|
# (Q) flag removes a quoting level: "foo\ bar" => "foo bar"
|
||||||
base=${(Q)1}
|
base=${(Q)1}
|
||||||
lbuf=$2
|
lbuf=$2
|
||||||
@@ -41,10 +41,7 @@ __fzf_generic_path_completion() {
|
|||||||
tail=$6
|
tail=$6
|
||||||
[ ${FZF_TMUX:-1} -eq 1 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
[ ${FZF_TMUX:-1} -eq 1 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
||||||
|
|
||||||
if ! setopt | \grep nonomatch > /dev/null; then
|
setopt localoptions nonomatch
|
||||||
nnm=1
|
|
||||||
setopt nonomatch
|
|
||||||
fi
|
|
||||||
dir="$base"
|
dir="$base"
|
||||||
while [ 1 ]; do
|
while [ 1 ]; do
|
||||||
if [ -z "$dir" -o -d ${~dir} ]; then
|
if [ -z "$dir" -o -d ${~dir} ]; then
|
||||||
@@ -54,19 +51,19 @@ __fzf_generic_path_completion() {
|
|||||||
[ "$dir" != "/" ] && dir="${dir/%\//}"
|
[ "$dir" != "/" ] && dir="${dir/%\//}"
|
||||||
dir=${~dir}
|
dir=${~dir}
|
||||||
matches=$(eval "$compgen $(printf %q "$dir")" | ${=fzf} ${=FZF_COMPLETION_OPTS} ${=fzf_opts} -q "$leftover" | while read item; do
|
matches=$(eval "$compgen $(printf %q "$dir")" | ${=fzf} ${=FZF_COMPLETION_OPTS} ${=fzf_opts} -q "$leftover" | while read item; do
|
||||||
printf "%q$suffix " "$item"
|
echo -n "${(q)item}$suffix "
|
||||||
done)
|
done)
|
||||||
matches=${matches% }
|
matches=${matches% }
|
||||||
if [ -n "$matches" ]; then
|
if [ -n "$matches" ]; then
|
||||||
LBUFFER="$lbuf$matches$tail"
|
LBUFFER="$lbuf$matches$tail"
|
||||||
fi
|
fi
|
||||||
zle redisplay
|
zle redisplay
|
||||||
|
typeset -f zle-line-init >/dev/null && zle zle-line-init
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
dir=$(dirname "$dir")
|
dir=$(dirname "$dir")
|
||||||
dir=${dir%/}/
|
dir=${dir%/}/
|
||||||
done
|
done
|
||||||
[ -n "$nnm" ] && unsetopt nonomatch
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_path_completion() {
|
_fzf_path_completion() {
|
||||||
@@ -80,7 +77,7 @@ _fzf_dir_completion() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_fzf_feed_fifo() (
|
_fzf_feed_fifo() (
|
||||||
rm -f "$1"
|
command rm -f "$1"
|
||||||
mkfifo "$1"
|
mkfifo "$1"
|
||||||
cat <&0 > "$1" &
|
cat <&0 > "$1" &
|
||||||
)
|
)
|
||||||
@@ -101,20 +98,22 @@ _fzf_complete() {
|
|||||||
LBUFFER="$lbuf$matches"
|
LBUFFER="$lbuf$matches"
|
||||||
fi
|
fi
|
||||||
zle redisplay
|
zle redisplay
|
||||||
rm -f "$fifo"
|
typeset -f zle-line-init >/dev/null && zle zle-line-init
|
||||||
|
command rm -f "$fifo"
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_complete_telnet() {
|
_fzf_complete_telnet() {
|
||||||
_fzf_complete '+m' "$@" < <(
|
_fzf_complete '+m' "$@" < <(
|
||||||
\grep -v '^\s*\(#\|$\)' /etc/hosts | \grep -Fv '0.0.0.0' |
|
command grep -v '^\s*\(#\|$\)' /etc/hosts | command grep -Fv '0.0.0.0' |
|
||||||
awk '{if (length($2) > 0) {print $2}}' | sort -u
|
awk '{if (length($2) > 0) {print $2}}' | sort -u
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_complete_ssh() {
|
_fzf_complete_ssh() {
|
||||||
_fzf_complete '+m' "$@" < <(
|
_fzf_complete '+m' "$@" < <(
|
||||||
cat <(cat ~/.ssh/config /etc/ssh/ssh_config 2> /dev/null | \grep -i '^host' | \grep -v '*') \
|
cat <(cat ~/.ssh/config /etc/ssh/ssh_config 2> /dev/null | command grep -i '^host' | command grep -v '*') \
|
||||||
<(\grep -v '^\s*\(#\|$\)' /etc/hosts | \grep -Fv '0.0.0.0') |
|
<(command grep -oE '^[^ ]+' ~/.ssh/known_hosts | tr ',' '\n' | awk '{ print $1 " " $1 }') \
|
||||||
|
<(command grep -v '^\s*\(#\|$\)' /etc/hosts | command grep -Fv '0.0.0.0') |
|
||||||
awk '{if (length($2) > 0) {print $2}}' | sort -u
|
awk '{if (length($2) > 0) {print $2}}' | sort -u
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -139,13 +138,13 @@ _fzf_complete_unalias() {
|
|||||||
|
|
||||||
fzf-completion() {
|
fzf-completion() {
|
||||||
local tokens cmd prefix trigger tail fzf matches lbuf d_cmds
|
local tokens cmd prefix trigger tail fzf matches lbuf d_cmds
|
||||||
setopt localoptions noshwordsplit
|
setopt localoptions noshwordsplit noksh_arrays
|
||||||
|
|
||||||
# http://zsh.sourceforge.net/FAQ/zshfaq03.html
|
# http://zsh.sourceforge.net/FAQ/zshfaq03.html
|
||||||
# http://zsh.sourceforge.net/Doc/Release/Expansion.html#Parameter-Expansion-Flags
|
# http://zsh.sourceforge.net/Doc/Release/Expansion.html#Parameter-Expansion-Flags
|
||||||
tokens=(${(z)LBUFFER})
|
tokens=(${(z)LBUFFER})
|
||||||
if [ ${#tokens} -lt 1 ]; then
|
if [ ${#tokens} -lt 1 ]; then
|
||||||
eval "zle ${fzf_default_completion:-expand-or-complete}"
|
zle ${fzf_default_completion:-expand-or-complete}
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -164,6 +163,7 @@ fzf-completion() {
|
|||||||
LBUFFER="$LBUFFER$matches"
|
LBUFFER="$LBUFFER$matches"
|
||||||
fi
|
fi
|
||||||
zle redisplay
|
zle redisplay
|
||||||
|
typeset -f zle-line-init >/dev/null && zle zle-line-init
|
||||||
# Trigger sequence given
|
# Trigger sequence given
|
||||||
elif [ ${#tokens} -gt 1 -a "$tail" = "$trigger" ]; then
|
elif [ ${#tokens} -gt 1 -a "$tail" = "$trigger" ]; then
|
||||||
d_cmds=(${=FZF_COMPLETION_DIR_COMMANDS:-cd pushd rmdir})
|
d_cmds=(${=FZF_COMPLETION_DIR_COMMANDS:-cd pushd rmdir})
|
||||||
@@ -180,12 +180,15 @@ fzf-completion() {
|
|||||||
fi
|
fi
|
||||||
# Fall back to default completion
|
# Fall back to default completion
|
||||||
else
|
else
|
||||||
eval "zle ${fzf_default_completion:-expand-or-complete}"
|
zle ${fzf_default_completion:-expand-or-complete}
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
[ -z "$fzf_default_completion" ] &&
|
[ -z "$fzf_default_completion" ] && {
|
||||||
fzf_default_completion=$(bindkey '^I' | \grep -v undefined-key | awk '{print $2}')
|
binding=$(bindkey '^I')
|
||||||
|
[[ $binding =~ 'undefined-key' ]] || fzf_default_completion=$binding[(s: :w)2]
|
||||||
|
unset binding
|
||||||
|
}
|
||||||
|
|
||||||
zle -N fzf-completion
|
zle -N fzf-completion
|
||||||
bindkey '^I' fzf-completion
|
bindkey '^I' fzf-completion
|
||||||
|
@@ -5,7 +5,7 @@ __fzf_select__() {
|
|||||||
-o -type f -print \
|
-o -type f -print \
|
||||||
-o -type d -print \
|
-o -type d -print \
|
||||||
-o -type l -print 2> /dev/null | sed 1d | cut -b3-"}"
|
-o -type l -print 2> /dev/null | sed 1d | cut -b3-"}"
|
||||||
eval "$cmd" | fzf -m | while read -r item; do
|
eval "$cmd | fzf -m $FZF_CTRL_T_OPTS" | while read -r item; do
|
||||||
printf '%q ' "$item"
|
printf '%q ' "$item"
|
||||||
done
|
done
|
||||||
echo
|
echo
|
||||||
@@ -26,14 +26,16 @@ __fzf_select_tmux__() {
|
|||||||
height="-l $height"
|
height="-l $height"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
tmux split-window $height "cd $(printf %q "$PWD"); FZF_DEFAULT_OPTS=$(printf %q "$FZF_DEFAULT_OPTS") PATH=$(printf %q "$PATH") FZF_CTRL_T_COMMAND=$(printf %q "$FZF_CTRL_T_COMMAND") bash -c 'source \"${BASH_SOURCE[0]}\"; tmux send-keys -t $TMUX_PANE \"\$(__fzf_select__)\"'"
|
tmux split-window $height "cd $(printf %q "$PWD"); FZF_DEFAULT_OPTS=$(printf %q "$FZF_DEFAULT_OPTS") PATH=$(printf %q "$PATH") FZF_CTRL_T_COMMAND=$(printf %q "$FZF_CTRL_T_COMMAND") FZF_CTRL_T_OPTS=$(printf %q "$FZF_CTRL_T_OPTS") bash -c 'source \"${BASH_SOURCE[0]}\"; RESULT=\"\$(__fzf_select__)\"; tmux setb -b fzf \"\$RESULT\" \\; pasteb -b fzf -t $TMUX_PANE \\; deleteb -b fzf || tmux send-keys -t $TMUX_PANE \"\$RESULT\"'"
|
||||||
}
|
}
|
||||||
|
|
||||||
__fzf_select_tmux_auto__() {
|
fzf-file-widget() {
|
||||||
if [ "${FZF_TMUX:-1}" != 0 ] && [ ${LINES:-40} -gt 15 ]; then
|
if __fzf_use_tmux__; then
|
||||||
__fzf_select_tmux__
|
__fzf_select_tmux__
|
||||||
else
|
else
|
||||||
tmux send-keys -t "$TMUX_PANE" "$(__fzf_select__)"
|
local selected="$(__fzf_select__)"
|
||||||
|
READLINE_LINE="${READLINE_LINE:0:$READLINE_POINT}$selected${READLINE_LINE:$READLINE_POINT}"
|
||||||
|
READLINE_POINT=$(( READLINE_POINT + ${#selected} ))
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -41,7 +43,7 @@ __fzf_cd__() {
|
|||||||
local cmd dir
|
local cmd dir
|
||||||
cmd="${FZF_ALT_C_COMMAND:-"command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
cmd="${FZF_ALT_C_COMMAND:-"command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
||||||
-o -type d -print 2> /dev/null | sed 1d | cut -b3-"}"
|
-o -type d -print 2> /dev/null | sed 1d | cut -b3-"}"
|
||||||
dir=$(eval "$cmd" | $(__fzfcmd) +m) && printf 'cd %q' "$dir"
|
dir=$(eval "$cmd | $(__fzfcmd) +m $FZF_ALT_C_OPTS") && printf 'cd %q' "$dir"
|
||||||
}
|
}
|
||||||
|
|
||||||
__fzf_history__() (
|
__fzf_history__() (
|
||||||
@@ -49,8 +51,8 @@ __fzf_history__() (
|
|||||||
shopt -u nocaseglob nocasematch
|
shopt -u nocaseglob nocasematch
|
||||||
line=$(
|
line=$(
|
||||||
HISTTIMEFORMAT= history |
|
HISTTIMEFORMAT= history |
|
||||||
$(__fzfcmd) +s --tac +m -n2..,.. --tiebreak=index --toggle-sort=ctrl-r |
|
eval "$(__fzfcmd) +s --tac +m -n2..,.. --tiebreak=index --toggle-sort=ctrl-r $FZF_CTRL_R_OPTS" |
|
||||||
\grep '^ *[0-9]') &&
|
command grep '^ *[0-9]') &&
|
||||||
if [[ $- =~ H ]]; then
|
if [[ $- =~ H ]]; then
|
||||||
sed 's/^ *\([0-9]*\)\** .*/!\1/' <<< "$line"
|
sed 's/^ *\([0-9]*\)\** .*/!\1/' <<< "$line"
|
||||||
else
|
else
|
||||||
@@ -58,21 +60,21 @@ __fzf_history__() (
|
|||||||
fi
|
fi
|
||||||
)
|
)
|
||||||
|
|
||||||
__use_tmux=0
|
__fzf_use_tmux__() {
|
||||||
__use_tmux_auto=0
|
[ -n "$TMUX_PANE" ] && [ "${FZF_TMUX:-1}" != 0 ] && [ ${LINES:-40} -gt 15 ]
|
||||||
if [ -n "$TMUX_PANE" ]; then
|
}
|
||||||
[ "${FZF_TMUX:-1}" != 0 ] && [ ${LINES:-40} -gt 15 ] && __use_tmux=1
|
|
||||||
[ $BASH_VERSINFO -gt 3 ] && __use_tmux_auto=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$(set -o | \grep '^vi.*on')" ]; then
|
[ $BASH_VERSINFO -gt 3 ] && __use_bind_x=1 || __use_bind_x=0
|
||||||
|
__fzf_use_tmux__ && __use_tmux=1 || __use_tmux=0
|
||||||
|
|
||||||
|
if [[ ! -o vi ]]; then
|
||||||
# Required to refresh the prompt after fzf
|
# Required to refresh the prompt after fzf
|
||||||
bind '"\er": redraw-current-line'
|
bind '"\er": redraw-current-line'
|
||||||
bind '"\e^": history-expand-line'
|
bind '"\e^": history-expand-line'
|
||||||
|
|
||||||
# CTRL-T - Paste the selected file path into the command line
|
# CTRL-T - Paste the selected file path into the command line
|
||||||
if [ $__use_tmux_auto -eq 1 ]; then
|
if [ $__use_bind_x -eq 1 ]; then
|
||||||
bind -x '"\C-t": "__fzf_select_tmux_auto__"'
|
bind -x '"\C-t": "fzf-file-widget"'
|
||||||
elif [ $__use_tmux -eq 1 ]; then
|
elif [ $__use_tmux -eq 1 ]; then
|
||||||
bind '"\C-t": " \C-u \C-a\C-k$(__fzf_select_tmux__)\e\C-e\C-y\C-a\C-d\C-y\ey\C-h"'
|
bind '"\C-t": " \C-u \C-a\C-k$(__fzf_select_tmux__)\e\C-e\C-y\C-a\C-d\C-y\ey\C-h"'
|
||||||
else
|
else
|
||||||
@@ -80,35 +82,44 @@ if [ -z "$(set -o | \grep '^vi.*on')" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# CTRL-R - Paste the selected command from history into the command line
|
# CTRL-R - Paste the selected command from history into the command line
|
||||||
bind '"\C-r": " \C-e\C-u$(__fzf_history__)\e\C-e\e^\er"'
|
bind '"\C-r": " \C-e\C-u`__fzf_history__`\e\C-e\e^\er"'
|
||||||
|
|
||||||
# ALT-C - cd into the selected directory
|
# ALT-C - cd into the selected directory
|
||||||
bind '"\ec": " \C-e\C-u$(__fzf_cd__)\e\C-e\er\C-m"'
|
bind '"\ec": " \C-e\C-u`__fzf_cd__`\e\C-e\er\C-m"'
|
||||||
else
|
else
|
||||||
|
# We'd usually use "\e" to enter vi-movement-mode so we can do our magic,
|
||||||
|
# but this incurs a very noticeable delay of a half second or so,
|
||||||
|
# because many other commands start with "\e".
|
||||||
|
# Instead, we bind an unused key, "\C-x\C-a",
|
||||||
|
# to also enter vi-movement-mode,
|
||||||
|
# and then use that thereafter.
|
||||||
|
# (We imagine that "\C-x\C-a" is relatively unlikely to be in use.)
|
||||||
|
bind '"\C-x\C-a": vi-movement-mode'
|
||||||
|
|
||||||
bind '"\C-x\C-e": shell-expand-line'
|
bind '"\C-x\C-e": shell-expand-line'
|
||||||
bind '"\C-x\C-r": redraw-current-line'
|
bind '"\C-x\C-r": redraw-current-line'
|
||||||
bind '"\C-x^": history-expand-line'
|
bind '"\C-x^": history-expand-line'
|
||||||
|
|
||||||
# CTRL-T - Paste the selected file path into the command line
|
# CTRL-T - Paste the selected file path into the command line
|
||||||
# - FIXME: Selected items are attached to the end regardless of cursor position
|
# - FIXME: Selected items are attached to the end regardless of cursor position
|
||||||
if [ $__use_tmux_auto -eq 1 ]; then
|
if [ $__use_bind_x -eq 1 ]; then
|
||||||
bind -x '"\C-t": "__fzf_select_tmux_auto__"'
|
bind -x '"\C-t": "fzf-file-widget"'
|
||||||
elif [ $__use_tmux -eq 1 ]; then
|
elif [ $__use_tmux -eq 1 ]; then
|
||||||
bind '"\C-t": "\e$a \eddi$(__fzf_select_tmux__)\C-x\C-e\e0P$xa"'
|
bind '"\C-t": "\C-x\C-a$a \C-x\C-addi$(__fzf_select_tmux__)\C-x\C-e\C-x\C-a0P$xa"'
|
||||||
else
|
else
|
||||||
bind '"\C-t": "\e$a \eddi$(__fzf_select__)\C-x\C-e\e0Px$a \C-x\C-r\exa "'
|
bind '"\C-t": "\C-x\C-a$a \C-x\C-addi$(__fzf_select__)\C-x\C-e\C-x\C-a0Px$a \C-x\C-r\C-x\C-axa "'
|
||||||
fi
|
fi
|
||||||
bind -m vi-command '"\C-t": "i\C-t"'
|
bind -m vi-command '"\C-t": "i\C-t"'
|
||||||
|
|
||||||
# CTRL-R - Paste the selected command from history into the command line
|
# CTRL-R - Paste the selected command from history into the command line
|
||||||
bind '"\C-r": "\eddi$(__fzf_history__)\C-x\C-e\C-x^\e$a\C-x\C-r"'
|
bind '"\C-r": "\C-x\C-addi$(__fzf_history__)\C-x\C-e\C-x^\C-x\C-a$a\C-x\C-r"'
|
||||||
bind -m vi-command '"\C-r": "i\C-r"'
|
bind -m vi-command '"\C-r": "i\C-r"'
|
||||||
|
|
||||||
# ALT-C - cd into the selected directory
|
# ALT-C - cd into the selected directory
|
||||||
bind '"\ec": "\eddi$(__fzf_cd__)\C-x\C-e\C-x\C-r\C-m"'
|
bind '"\ec": "\C-x\C-addi$(__fzf_cd__)\C-x\C-e\C-x\C-r\C-m"'
|
||||||
bind -m vi-command '"\ec": "i\ec"'
|
bind -m vi-command '"\ec": "ddi$(__fzf_cd__)\C-x\C-e\C-x\C-r\C-m"'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
unset -v __use_tmux __use_tmux_auto
|
unset -v __use_tmux __use_bind_x
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
@@ -13,32 +13,31 @@ function fzf_key_bindings
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
function __fzf_ctrl_t
|
function fzf-file-widget
|
||||||
set -q FZF_CTRL_T_COMMAND; or set -l FZF_CTRL_T_COMMAND "
|
set -q FZF_CTRL_T_COMMAND; or set -l FZF_CTRL_T_COMMAND "
|
||||||
command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
||||||
-o -type f -print \
|
-o -type f -print \
|
||||||
-o -type d -print \
|
-o -type d -print \
|
||||||
-o -type l -print 2> /dev/null | sed 1d | cut -b3-"
|
-o -type l -print 2> /dev/null | sed 1d | cut -b3-"
|
||||||
eval "$FZF_CTRL_T_COMMAND | "(__fzfcmd)" -m > $TMPDIR/fzf.result"
|
eval "$FZF_CTRL_T_COMMAND | "(__fzfcmd)" -m $FZF_CTRL_T_OPTS > $TMPDIR/fzf.result"
|
||||||
and sleep 0
|
and for i in (seq 20); commandline -i (cat $TMPDIR/fzf.result | __fzf_escape) 2> /dev/null; and break; sleep 0.1; end
|
||||||
and commandline -i (cat $TMPDIR/fzf.result | __fzf_escape)
|
|
||||||
commandline -f repaint
|
commandline -f repaint
|
||||||
rm -f $TMPDIR/fzf.result
|
rm -f $TMPDIR/fzf.result
|
||||||
end
|
end
|
||||||
|
|
||||||
function __fzf_ctrl_r
|
function fzf-history-widget
|
||||||
history | eval (__fzfcmd) +s +m --tiebreak=index --toggle-sort=ctrl-r > $TMPDIR/fzf.result
|
history | eval (__fzfcmd) +s +m --tiebreak=index --toggle-sort=ctrl-r $FZF_CTRL_R_OPTS > $TMPDIR/fzf.result
|
||||||
and commandline (cat $TMPDIR/fzf.result)
|
and commandline (cat $TMPDIR/fzf.result)
|
||||||
commandline -f repaint
|
commandline -f repaint
|
||||||
rm -f $TMPDIR/fzf.result
|
rm -f $TMPDIR/fzf.result
|
||||||
end
|
end
|
||||||
|
|
||||||
function __fzf_alt_c
|
function fzf-cd-widget
|
||||||
set -q FZF_ALT_C_COMMAND; or set -l FZF_ALT_C_COMMAND "
|
set -q FZF_ALT_C_COMMAND; or set -l FZF_ALT_C_COMMAND "
|
||||||
command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
||||||
-o -type d -print 2> /dev/null | sed 1d | cut -b3-"
|
-o -type d -print 2> /dev/null | sed 1d | cut -b3-"
|
||||||
# Fish hangs if the command before pipe redirects (2> /dev/null)
|
# Fish hangs if the command before pipe redirects (2> /dev/null)
|
||||||
eval "$FZF_ALT_C_COMMAND | "(__fzfcmd)" +m > $TMPDIR/fzf.result"
|
eval "$FZF_ALT_C_COMMAND | "(__fzfcmd)" +m $FZF_ALT_C_OPTS > $TMPDIR/fzf.result"
|
||||||
[ (cat $TMPDIR/fzf.result | wc -l) -gt 0 ]
|
[ (cat $TMPDIR/fzf.result | wc -l) -gt 0 ]
|
||||||
and cd (cat $TMPDIR/fzf.result)
|
and cd (cat $TMPDIR/fzf.result)
|
||||||
commandline -f repaint
|
commandline -f repaint
|
||||||
@@ -59,14 +58,14 @@ function fzf_key_bindings
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
bind \ct '__fzf_ctrl_t'
|
bind \ct fzf-file-widget
|
||||||
bind \cr '__fzf_ctrl_r'
|
bind \cr fzf-history-widget
|
||||||
bind \ec '__fzf_alt_c'
|
bind \ec fzf-cd-widget
|
||||||
|
|
||||||
if bind -M insert > /dev/null 2>&1
|
if bind -M insert > /dev/null 2>&1
|
||||||
bind -M insert \ct '__fzf_ctrl_t'
|
bind -M insert \ct fzf-file-widget
|
||||||
bind -M insert \cr '__fzf_ctrl_r'
|
bind -M insert \cr fzf-history-widget
|
||||||
bind -M insert \ec '__fzf_alt_c'
|
bind -M insert \ec fzf-cd-widget
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@@ -8,10 +8,13 @@ __fsel() {
|
|||||||
-o -type f -print \
|
-o -type f -print \
|
||||||
-o -type d -print \
|
-o -type d -print \
|
||||||
-o -type l -print 2> /dev/null | sed 1d | cut -b3-"}"
|
-o -type l -print 2> /dev/null | sed 1d | cut -b3-"}"
|
||||||
eval "$cmd" | $(__fzfcmd) -m | while read item; do
|
setopt localoptions pipefail 2> /dev/null
|
||||||
printf '%q ' "$item"
|
eval "$cmd | $(__fzfcmd) -m $FZF_CTRL_T_OPTS" | while read item; do
|
||||||
|
echo -n "${(q)item} "
|
||||||
done
|
done
|
||||||
|
local ret=$?
|
||||||
echo
|
echo
|
||||||
|
return $ret
|
||||||
}
|
}
|
||||||
|
|
||||||
__fzfcmd() {
|
__fzfcmd() {
|
||||||
@@ -20,7 +23,10 @@ __fzfcmd() {
|
|||||||
|
|
||||||
fzf-file-widget() {
|
fzf-file-widget() {
|
||||||
LBUFFER="${LBUFFER}$(__fsel)"
|
LBUFFER="${LBUFFER}$(__fsel)"
|
||||||
|
local ret=$?
|
||||||
zle redisplay
|
zle redisplay
|
||||||
|
typeset -f zle-line-init >/dev/null && zle zle-line-init
|
||||||
|
return $ret
|
||||||
}
|
}
|
||||||
zle -N fzf-file-widget
|
zle -N fzf-file-widget
|
||||||
bindkey '^T' fzf-file-widget
|
bindkey '^T' fzf-file-widget
|
||||||
@@ -29,8 +35,12 @@ bindkey '^T' fzf-file-widget
|
|||||||
fzf-cd-widget() {
|
fzf-cd-widget() {
|
||||||
local cmd="${FZF_ALT_C_COMMAND:-"command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
local cmd="${FZF_ALT_C_COMMAND:-"command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
||||||
-o -type d -print 2> /dev/null | sed 1d | cut -b3-"}"
|
-o -type d -print 2> /dev/null | sed 1d | cut -b3-"}"
|
||||||
cd "${$(eval "$cmd" | $(__fzfcmd) +m):-.}"
|
setopt localoptions pipefail 2> /dev/null
|
||||||
|
cd "${$(eval "$cmd | $(__fzfcmd) +m $FZF_ALT_C_OPTS"):-.}"
|
||||||
|
local ret=$?
|
||||||
zle reset-prompt
|
zle reset-prompt
|
||||||
|
typeset -f zle-line-init >/dev/null && zle zle-line-init
|
||||||
|
return $ret
|
||||||
}
|
}
|
||||||
zle -N fzf-cd-widget
|
zle -N fzf-cd-widget
|
||||||
bindkey '\ec' fzf-cd-widget
|
bindkey '\ec' fzf-cd-widget
|
||||||
@@ -38,7 +48,9 @@ bindkey '\ec' fzf-cd-widget
|
|||||||
# CTRL-R - Paste the selected command from history into the command line
|
# CTRL-R - Paste the selected command from history into the command line
|
||||||
fzf-history-widget() {
|
fzf-history-widget() {
|
||||||
local selected num
|
local selected num
|
||||||
selected=( $(fc -l 1 | $(__fzfcmd) +s --tac +m -n2..,.. --tiebreak=index --toggle-sort=ctrl-r -q "${LBUFFER//$/\\$}") )
|
setopt localoptions noglobsubst pipefail 2> /dev/null
|
||||||
|
selected=( $(fc -l 1 | eval "$(__fzfcmd) +s --tac +m -n2..,.. --tiebreak=index --toggle-sort=ctrl-r $FZF_CTRL_R_OPTS -q ${(q)LBUFFER}") )
|
||||||
|
local ret=$?
|
||||||
if [ -n "$selected" ]; then
|
if [ -n "$selected" ]; then
|
||||||
num=$selected[1]
|
num=$selected[1]
|
||||||
if [ -n "$num" ]; then
|
if [ -n "$num" ]; then
|
||||||
@@ -46,6 +58,8 @@ fzf-history-widget() {
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
zle redisplay
|
zle redisplay
|
||||||
|
typeset -f zle-line-init >/dev/null && zle zle-line-init
|
||||||
|
return $ret
|
||||||
}
|
}
|
||||||
zle -N fzf-history-widget
|
zle -N fzf-history-widget
|
||||||
bindkey '^R' fzf-history-widget
|
bindkey '^R' fzf-history-widget
|
||||||
|
@@ -11,14 +11,18 @@ RUN cd / && curl \
|
|||||||
https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz | \
|
https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz | \
|
||||||
tar -xz && mv go go1.4
|
tar -xz && mv go go1.4
|
||||||
|
|
||||||
# Install Go 1.5
|
# Install Go 1.7
|
||||||
RUN cd / && curl \
|
RUN cd / && curl \
|
||||||
https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz | \
|
https://storage.googleapis.com/golang/go1.7.linux-amd64.tar.gz | \
|
||||||
tar -xz && mv go go1.5
|
tar -xz && mv go go1.7
|
||||||
|
|
||||||
|
# Install RPMs for building static 32-bit binary
|
||||||
|
RUN curl ftp://ftp.pbone.net/mirror/ftp.centos.org/6.8/os/i386/Packages/ncurses-static-5.7-4.20090207.el6.i686.rpm -o rpm && rpm -i rpm && \
|
||||||
|
curl ftp://ftp.pbone.net/mirror/ftp.centos.org/6.8/os/i386/Packages/gpm-static-1.20.6-12.el6.i686.rpm -o rpm && rpm -i rpm
|
||||||
|
|
||||||
ENV GOROOT_BOOTSTRAP /go1.4
|
ENV GOROOT_BOOTSTRAP /go1.4
|
||||||
ENV GOROOT /go1.5
|
ENV GOROOT /go1.7
|
||||||
ENV PATH /go1.5/bin:$PATH
|
ENV PATH /go1.7/bin:$PATH
|
||||||
|
|
||||||
# For i386 build
|
# For i386 build
|
||||||
RUN cd $GOROOT/src && GOARCH=386 ./make.bash
|
RUN cd $GOROOT/src && GOARCH=386 ./make.bash
|
||||||
|
37
src/Makefile
37
src/Makefile
@@ -7,10 +7,6 @@ else ifeq ($(UNAME_S),Linux)
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(shell uname -m),x86_64)
|
|
||||||
$(error "Build on $(UNAME_M) is not supported, yet.")
|
|
||||||
endif
|
|
||||||
|
|
||||||
SOURCES := $(wildcard *.go */*.go)
|
SOURCES := $(wildcard *.go */*.go)
|
||||||
ROOTDIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
|
ROOTDIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
|
||||||
BINDIR := $(shell dirname $(ROOTDIR))/bin
|
BINDIR := $(shell dirname $(ROOTDIR))/bin
|
||||||
@@ -26,15 +22,22 @@ RELEASE64 := fzf-$(VERSION)-$(GOOS)_amd64
|
|||||||
RELEASEARM7 := fzf-$(VERSION)-$(GOOS)_arm7
|
RELEASEARM7 := fzf-$(VERSION)-$(GOOS)_arm7
|
||||||
export GOPATH
|
export GOPATH
|
||||||
|
|
||||||
all: release
|
UNAME_M := $(shell uname -m)
|
||||||
|
ifeq ($(UNAME_M),x86_64)
|
||||||
|
BINARY := $(BINARY64)
|
||||||
|
else ifeq ($(UNAME_M),i686)
|
||||||
|
BINARY := $(BINARY32)
|
||||||
|
else
|
||||||
|
$(error "Build on $(UNAME_M) is not supported, yet.")
|
||||||
|
endif
|
||||||
|
|
||||||
release: test build
|
all: fzf/$(BINARY)
|
||||||
|
|
||||||
|
release: test fzf/$(BINARY32) fzf/$(BINARY64)
|
||||||
-cd fzf && cp $(BINARY32) $(RELEASE32) && tar -czf $(RELEASE32).tgz $(RELEASE32)
|
-cd fzf && cp $(BINARY32) $(RELEASE32) && tar -czf $(RELEASE32).tgz $(RELEASE32)
|
||||||
cd fzf && cp $(BINARY64) $(RELEASE64) && tar -czf $(RELEASE64).tgz $(RELEASE64) && \
|
cd fzf && cp $(BINARY64) $(RELEASE64) && tar -czf $(RELEASE64).tgz $(RELEASE64) && \
|
||||||
rm -f $(RELEASE32) $(RELEASE64)
|
rm -f $(RELEASE32) $(RELEASE64)
|
||||||
|
|
||||||
build: fzf/$(BINARY32) fzf/$(BINARY64)
|
|
||||||
|
|
||||||
$(SRCDIR):
|
$(SRCDIR):
|
||||||
mkdir -p $(shell dirname $(SRCDIR))
|
mkdir -p $(shell dirname $(SRCDIR))
|
||||||
ln -s $(ROOTDIR) $(SRCDIR)
|
ln -s $(ROOTDIR) $(SRCDIR)
|
||||||
@@ -44,7 +47,7 @@ deps: $(SRCDIR) $(SOURCES)
|
|||||||
|
|
||||||
android-build: $(SRCDIR)
|
android-build: $(SRCDIR)
|
||||||
cd $(SRCDIR) && GOARCH=arm GOARM=7 CGO_ENABLED=1 go get
|
cd $(SRCDIR) && GOARCH=arm GOARM=7 CGO_ENABLED=1 go get
|
||||||
cd $(SRCDIR)/fzf && GOARCH=arm GOARM=7 CGO_ENABLED=1 go build -a -ldflags="-extldflags=-pie" -o $(BINARYARM7)
|
cd $(SRCDIR)/fzf && GOARCH=arm GOARM=7 CGO_ENABLED=1 go build -a -ldflags="-w -extldflags=-pie" -o $(BINARYARM7)
|
||||||
cd $(SRCDIR)/fzf && cp $(BINARYARM7) $(RELEASEARM7) && tar -czf $(RELEASEARM7).tgz $(RELEASEARM7) && \
|
cd $(SRCDIR)/fzf && cp $(BINARYARM7) $(RELEASEARM7) && tar -czf $(RELEASEARM7).tgz $(RELEASEARM7) && \
|
||||||
rm -f $(RELEASEARM7)
|
rm -f $(RELEASEARM7)
|
||||||
|
|
||||||
@@ -54,20 +57,20 @@ test: deps
|
|||||||
install: $(BINDIR)/fzf
|
install: $(BINDIR)/fzf
|
||||||
|
|
||||||
uninstall:
|
uninstall:
|
||||||
rm -f $(BINDIR)/fzf $(BINDIR)/$(BINARY64)
|
rm -f $(BINDIR)/fzf $(BINDIR)/$(BINARY)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
cd fzf && rm -f fzf-*
|
cd fzf && rm -f fzf-*
|
||||||
|
|
||||||
fzf/$(BINARY32): deps
|
fzf/$(BINARY32): deps
|
||||||
cd fzf && GOARCH=386 CGO_ENABLED=1 go build -a -o $(BINARY32)
|
cd fzf && GOARCH=386 CGO_ENABLED=1 go build -a -ldflags -w -tags "$(TAGS)" -o $(BINARY32)
|
||||||
|
|
||||||
fzf/$(BINARY64): deps
|
fzf/$(BINARY64): deps
|
||||||
cd fzf && go build -a -tags "$(TAGS)" -o $(BINARY64)
|
cd fzf && go build -a -ldflags -w -tags "$(TAGS)" -o $(BINARY64)
|
||||||
|
|
||||||
$(BINDIR)/fzf: fzf/$(BINARY64) | $(BINDIR)
|
$(BINDIR)/fzf: fzf/$(BINARY) | $(BINDIR)
|
||||||
cp -f fzf/$(BINARY64) $(BINDIR)
|
cp -f fzf/$(BINARY) $(BINDIR)
|
||||||
cd $(BINDIR) && ln -sf $(BINARY64) fzf
|
cd $(BINDIR) && ln -sf $(BINARY) fzf
|
||||||
|
|
||||||
$(BINDIR):
|
$(BINDIR):
|
||||||
mkdir -p $@
|
mkdir -p $@
|
||||||
@@ -98,7 +101,7 @@ centos: docker-centos
|
|||||||
|
|
||||||
linux: docker-centos
|
linux: docker-centos
|
||||||
docker run $(DOCKEROPTS) junegunn/centos-sandbox \
|
docker run $(DOCKEROPTS) junegunn/centos-sandbox \
|
||||||
/bin/bash -ci 'cd /fzf/src; make TAGS=static'
|
/bin/bash -ci 'cd /fzf/src; make TAGS=static release'
|
||||||
|
|
||||||
ubuntu-android: docker-android
|
ubuntu-android: docker-android
|
||||||
docker run $(DOCKEROPTS) junegunn/android-sandbox \
|
docker run $(DOCKEROPTS) junegunn/android-sandbox \
|
||||||
@@ -108,6 +111,6 @@ android: docker-android
|
|||||||
docker run $(DOCKEROPTS) junegunn/android-sandbox \
|
docker run $(DOCKEROPTS) junegunn/android-sandbox \
|
||||||
/bin/bash -ci 'cd /fzf/src; GOOS=android make android-build'
|
/bin/bash -ci 'cd /fzf/src; GOOS=android make android-build'
|
||||||
|
|
||||||
.PHONY: all build deps release test install uninstall clean \
|
.PHONY: all deps release test install uninstall clean \
|
||||||
linux arch ubuntu centos docker-arch docker-ubuntu docker-centos \
|
linux arch ubuntu centos docker-arch docker-ubuntu docker-centos \
|
||||||
android-build docker-android ubuntu-android android
|
android-build docker-android ubuntu-android android
|
||||||
|
@@ -47,39 +47,12 @@ proportional to the number of CPU cores. On my MacBook Pro (Mid 2012), the new
|
|||||||
version was shown to be an order of magnitude faster on certain cases. It also
|
version was shown to be an order of magnitude faster on certain cases. It also
|
||||||
starts much faster though the difference may not be noticeable.
|
starts much faster though the difference may not be noticeable.
|
||||||
|
|
||||||
Differences with Ruby version
|
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
The Go version is designed to be perfectly compatible with the previous Ruby
|
|
||||||
version. The only behavioral difference is that the new version ignores the
|
|
||||||
numeric argument to `--sort=N` option and always sorts the result regardless
|
|
||||||
of the number of matches. The value was introduced to limit the response time
|
|
||||||
of the query, but the Go version is blazingly fast (almost instant response
|
|
||||||
even for 1M+ items) so I decided that it's no longer required.
|
|
||||||
|
|
||||||
System requirements
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
Currently, prebuilt binaries are provided only for OS X and Linux. The install
|
|
||||||
script will fall back to the legacy Ruby version on the other systems, but if
|
|
||||||
you have Go 1.4 installed, you can try building it yourself.
|
|
||||||
|
|
||||||
However, as pointed out in [golang.org/doc/install][req], the Go version may
|
|
||||||
not run on CentOS/RHEL 5.x, and if that's the case, the install script will
|
|
||||||
choose the Ruby version instead.
|
|
||||||
|
|
||||||
The Go version depends on [ncurses][ncurses] and some Unix system calls, so it
|
|
||||||
shouldn't run natively on Windows at the moment. But it won't be impossible to
|
|
||||||
support Windows by falling back to a cross-platform alternative such as
|
|
||||||
[termbox][termbox] only on Windows. If you're interested in making fzf work on
|
|
||||||
Windows, please let me know.
|
|
||||||
|
|
||||||
Build
|
Build
|
||||||
-----
|
-----
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
# Build fzf executables and tarballs
|
# Build fzf executables and tarballs
|
||||||
make
|
make release
|
||||||
|
|
||||||
# Install the executable to ../bin directory
|
# Install the executable to ../bin directory
|
||||||
make install
|
make install
|
||||||
@@ -88,16 +61,22 @@ make install
|
|||||||
make linux
|
make linux
|
||||||
```
|
```
|
||||||
|
|
||||||
Contribution
|
Test
|
||||||
------------
|
----
|
||||||
|
|
||||||
For the time being, I will not add or accept any new features until we can be
|
Unit tests can be run with `make test`. Integration tests are written in Ruby
|
||||||
sure that the implementation is stable and we have a sufficient number of test
|
script that should be run on tmux.
|
||||||
cases. However, fixes for obvious bugs and new test cases are welcome.
|
|
||||||
|
|
||||||
I also care much about the performance of the implementation, so please make
|
```sh
|
||||||
sure that your change does not result in performance regression. And please be
|
# Unit tests
|
||||||
noted that we don't have a quantitative measure of the performance yet.
|
make test
|
||||||
|
|
||||||
|
# Install the executable to ../bin directory
|
||||||
|
make install
|
||||||
|
|
||||||
|
# Integration tests
|
||||||
|
ruby ../test/test_go.rb
|
||||||
|
```
|
||||||
|
|
||||||
Third-party libraries used
|
Third-party libraries used
|
||||||
--------------------------
|
--------------------------
|
||||||
|
631
src/algo/algo.go
631
src/algo/algo.go
@@ -1,51 +1,507 @@
|
|||||||
package algo
|
package algo
|
||||||
|
|
||||||
|
/*
|
||||||
|
|
||||||
|
Algorithm
|
||||||
|
---------
|
||||||
|
|
||||||
|
FuzzyMatchV1 finds the first "fuzzy" occurrence of the pattern within the given
|
||||||
|
text in O(n) time where n is the length of the text. Once the position of the
|
||||||
|
last character is located, it traverses backwards to see if there's a shorter
|
||||||
|
substring that matches the pattern.
|
||||||
|
|
||||||
|
a_____b___abc__ To find "abc"
|
||||||
|
*-----*-----*> 1. Forward scan
|
||||||
|
<*** 2. Backward scan
|
||||||
|
|
||||||
|
The algorithm is simple and fast, but as it only sees the first occurrence,
|
||||||
|
it is not guaranteed to find the occurrence with the highest score.
|
||||||
|
|
||||||
|
a_____b__c__abc
|
||||||
|
*-----*--* ***
|
||||||
|
|
||||||
|
FuzzyMatchV2 implements a modified version of Smith-Waterman algorithm to find
|
||||||
|
the optimal solution (highest score) according to the scoring criteria. Unlike
|
||||||
|
the original algorithm, omission or mismatch of a character in the pattern is
|
||||||
|
not allowed.
|
||||||
|
|
||||||
|
Performance
|
||||||
|
-----------
|
||||||
|
|
||||||
|
The new V2 algorithm is slower than V1 as it examines all occurrences of the
|
||||||
|
pattern instead of stopping immediately after finding the first one. The time
|
||||||
|
complexity of the algorithm is O(nm) if a match is found and O(n) otherwise
|
||||||
|
where n is the length of the item and m is the length of the pattern. Thus, the
|
||||||
|
performance overhead may not be noticeable for a query with high selectivity.
|
||||||
|
However, if the performance is more important than the quality of the result,
|
||||||
|
you can still choose v1 algorithm with --algo=v1.
|
||||||
|
|
||||||
|
Scoring criteria
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- We prefer matches at special positions, such as the start of a word, or
|
||||||
|
uppercase character in camelCase words.
|
||||||
|
|
||||||
|
- That is, we prefer an occurrence of the pattern with more characters
|
||||||
|
matching at special positions, even if the total match length is longer.
|
||||||
|
e.g. "fuzzyfinder" vs. "fuzzy-finder" on "ff"
|
||||||
|
````````````
|
||||||
|
- Also, if the first character in the pattern appears at one of the special
|
||||||
|
positions, the bonus point for the position is multiplied by a constant
|
||||||
|
as it is extremely likely that the first character in the typed pattern
|
||||||
|
has more significance than the rest.
|
||||||
|
e.g. "fo-bar" vs. "foob-r" on "br"
|
||||||
|
``````
|
||||||
|
- But since fzf is still a fuzzy finder, not an acronym finder, we should also
|
||||||
|
consider the total length of the matched substring. This is why we have the
|
||||||
|
gap penalty. The gap penalty increases as the length of the gap (distance
|
||||||
|
between the matching characters) increases, so the effect of the bonus is
|
||||||
|
eventually cancelled at some point.
|
||||||
|
e.g. "fuzzyfinder" vs. "fuzzy-blurry-finder" on "ff"
|
||||||
|
```````````
|
||||||
|
- Consequently, it is crucial to find the right balance between the bonus
|
||||||
|
and the gap penalty. The parameters were chosen that the bonus is cancelled
|
||||||
|
when the gap size increases beyond 8 characters.
|
||||||
|
|
||||||
|
- The bonus mechanism can have the undesirable side effect where consecutive
|
||||||
|
matches are ranked lower than the ones with gaps.
|
||||||
|
e.g. "foobar" vs. "foo-bar" on "foob"
|
||||||
|
```````
|
||||||
|
- To correct this anomaly, we also give extra bonus point to each character
|
||||||
|
in a consecutive matching chunk.
|
||||||
|
e.g. "foobar" vs. "foo-bar" on "foob"
|
||||||
|
``````
|
||||||
|
- The amount of consecutive bonus is primarily determined by the bonus of the
|
||||||
|
first character in the chunk.
|
||||||
|
e.g. "foobar" vs. "out-of-bound" on "oob"
|
||||||
|
````````````
|
||||||
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/util"
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
var DEBUG bool
|
||||||
* String matching algorithms here do not use strings.ToLower to avoid
|
|
||||||
* performance penalty. And they assume pattern runes are given in lowercase
|
|
||||||
* letters when caseSensitive is false.
|
|
||||||
*
|
|
||||||
* In short: They try to do as little work as possible.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func runeAt(runes []rune, index int, max int, forward bool) rune {
|
func indexAt(index int, max int, forward bool) int {
|
||||||
if forward {
|
if forward {
|
||||||
return runes[index]
|
return index
|
||||||
}
|
}
|
||||||
return runes[max-index-1]
|
return max - index - 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// FuzzyMatch performs fuzzy-match
|
// Result contains the results of running a match function.
|
||||||
func FuzzyMatch(caseSensitive bool, forward bool, runes []rune, pattern []rune) (int, int) {
|
type Result struct {
|
||||||
if len(pattern) == 0 {
|
// TODO int32 should suffice
|
||||||
return 0, 0
|
Start int
|
||||||
|
End int
|
||||||
|
Score int
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
scoreMatch = 16
|
||||||
|
scoreGapStart = -3
|
||||||
|
scoreGapExtention = -1
|
||||||
|
|
||||||
|
// We prefer matches at the beginning of a word, but the bonus should not be
|
||||||
|
// too great to prevent the longer acronym matches from always winning over
|
||||||
|
// shorter fuzzy matches. The bonus point here was specifically chosen that
|
||||||
|
// the bonus is cancelled when the gap between the acronyms grows over
|
||||||
|
// 8 characters, which is approximately the average length of the words found
|
||||||
|
// in web2 dictionary and my file system.
|
||||||
|
bonusBoundary = scoreMatch / 2
|
||||||
|
|
||||||
|
// Although bonus point for non-word characters is non-contextual, we need it
|
||||||
|
// for computing bonus points for consecutive chunks starting with a non-word
|
||||||
|
// character.
|
||||||
|
bonusNonWord = scoreMatch / 2
|
||||||
|
|
||||||
|
// Edge-triggered bonus for matches in camelCase words.
|
||||||
|
// Compared to word-boundary case, they don't accompany single-character gaps
|
||||||
|
// (e.g. FooBar vs. foo-bar), so we deduct bonus point accordingly.
|
||||||
|
bonusCamel123 = bonusBoundary + scoreGapExtention
|
||||||
|
|
||||||
|
// Minimum bonus point given to characters in consecutive chunks.
|
||||||
|
// Note that bonus points for consecutive matches shouldn't have needed if we
|
||||||
|
// used fixed match score as in the original algorithm.
|
||||||
|
bonusConsecutive = -(scoreGapStart + scoreGapExtention)
|
||||||
|
|
||||||
|
// The first character in the typed pattern usually has more significance
|
||||||
|
// than the rest so it's important that it appears at special positions where
|
||||||
|
// bonus points are given. e.g. "to-go" vs. "ongoing" on "og" or on "ogo".
|
||||||
|
// The amount of the extra bonus should be limited so that the gap penalty is
|
||||||
|
// still respected.
|
||||||
|
bonusFirstCharMultiplier = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
type charClass int
|
||||||
|
|
||||||
|
const (
|
||||||
|
charNonWord charClass = iota
|
||||||
|
charLower
|
||||||
|
charUpper
|
||||||
|
charLetter
|
||||||
|
charNumber
|
||||||
|
)
|
||||||
|
|
||||||
|
func posArray(withPos bool, len int) *[]int {
|
||||||
|
if withPos {
|
||||||
|
pos := make([]int, 0, len)
|
||||||
|
return &pos
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func alloc16(offset int, slab *util.Slab, size int, clear bool) (int, []int16) {
|
||||||
|
if slab != nil && cap(slab.I16) > offset+size {
|
||||||
|
slice := slab.I16[offset : offset+size]
|
||||||
|
if clear {
|
||||||
|
for idx := range slice {
|
||||||
|
slice[idx] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return offset + size, slice
|
||||||
|
}
|
||||||
|
return offset, make([]int16, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func alloc32(offset int, slab *util.Slab, size int, clear bool) (int, []int32) {
|
||||||
|
if slab != nil && cap(slab.I32) > offset+size {
|
||||||
|
slice := slab.I32[offset : offset+size]
|
||||||
|
if clear {
|
||||||
|
for idx := range slice {
|
||||||
|
slice[idx] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return offset + size, slice
|
||||||
|
}
|
||||||
|
return offset, make([]int32, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func charClassOfAscii(char rune) charClass {
|
||||||
|
if char >= 'a' && char <= 'z' {
|
||||||
|
return charLower
|
||||||
|
} else if char >= 'A' && char <= 'Z' {
|
||||||
|
return charUpper
|
||||||
|
} else if char >= '0' && char <= '9' {
|
||||||
|
return charNumber
|
||||||
|
}
|
||||||
|
return charNonWord
|
||||||
|
}
|
||||||
|
|
||||||
|
func charClassOfNonAscii(char rune) charClass {
|
||||||
|
if unicode.IsLower(char) {
|
||||||
|
return charLower
|
||||||
|
} else if unicode.IsUpper(char) {
|
||||||
|
return charUpper
|
||||||
|
} else if unicode.IsNumber(char) {
|
||||||
|
return charNumber
|
||||||
|
} else if unicode.IsLetter(char) {
|
||||||
|
return charLetter
|
||||||
|
}
|
||||||
|
return charNonWord
|
||||||
|
}
|
||||||
|
|
||||||
|
func charClassOf(char rune) charClass {
|
||||||
|
if char <= unicode.MaxASCII {
|
||||||
|
return charClassOfAscii(char)
|
||||||
|
}
|
||||||
|
return charClassOfNonAscii(char)
|
||||||
|
}
|
||||||
|
|
||||||
|
func bonusFor(prevClass charClass, class charClass) int16 {
|
||||||
|
if prevClass == charNonWord && class != charNonWord {
|
||||||
|
// Word boundary
|
||||||
|
return bonusBoundary
|
||||||
|
} else if prevClass == charLower && class == charUpper ||
|
||||||
|
prevClass != charNumber && class == charNumber {
|
||||||
|
// camelCase letter123
|
||||||
|
return bonusCamel123
|
||||||
|
} else if class == charNonWord {
|
||||||
|
return bonusNonWord
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func bonusAt(input util.Chars, idx int) int16 {
|
||||||
|
if idx == 0 {
|
||||||
|
return bonusBoundary
|
||||||
|
}
|
||||||
|
return bonusFor(charClassOf(input.Get(idx-1)), charClassOf(input.Get(idx)))
|
||||||
|
}
|
||||||
|
|
||||||
|
type Algo func(caseSensitive bool, forward bool, input util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int)
|
||||||
|
|
||||||
|
func FuzzyMatchV2(caseSensitive bool, forward bool, input util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
|
// Assume that pattern is given in lowercase if case-insensitive.
|
||||||
|
// First check if there's a match and calculate bonus for each position.
|
||||||
|
// If the input string is too long, consider finding the matching chars in
|
||||||
|
// this phase as well (non-optimal alignment).
|
||||||
|
N := input.Length()
|
||||||
|
M := len(pattern)
|
||||||
|
switch M {
|
||||||
|
case 0:
|
||||||
|
return Result{0, 0, 0}, posArray(withPos, M)
|
||||||
|
case 1:
|
||||||
|
return ExactMatchNaive(caseSensitive, forward, input, pattern[0:1], withPos, slab)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since O(nm) algorithm can be prohibitively expensive for large input,
|
||||||
|
// we fall back to the greedy algorithm.
|
||||||
|
if slab != nil && N*M > cap(slab.I16) {
|
||||||
|
return FuzzyMatchV1(caseSensitive, forward, input, pattern, withPos, slab)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reuse pre-allocated integer slice to avoid unnecessary sweeping of garbages
|
||||||
|
offset16 := 0
|
||||||
|
offset32 := 0
|
||||||
|
// Bonus point for each position
|
||||||
|
offset16, B := alloc16(offset16, slab, N, false)
|
||||||
|
// The first occurrence of each character in the pattern
|
||||||
|
offset32, F := alloc32(offset32, slab, M, false)
|
||||||
|
// Rune array
|
||||||
|
offset32, T := alloc32(offset32, slab, N, false)
|
||||||
|
|
||||||
|
// Phase 1. Check if there's a match and calculate bonus for each point
|
||||||
|
pidx, lastIdx, prevClass := 0, 0, charNonWord
|
||||||
|
for idx := 0; idx < N; idx++ {
|
||||||
|
char := input.Get(idx)
|
||||||
|
var class charClass
|
||||||
|
if char <= unicode.MaxASCII {
|
||||||
|
class = charClassOfAscii(char)
|
||||||
|
} else {
|
||||||
|
class = charClassOfNonAscii(char)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !caseSensitive && class == charUpper {
|
||||||
|
if char <= unicode.MaxASCII {
|
||||||
|
char += 32
|
||||||
|
} else {
|
||||||
|
char = unicode.To(unicode.LowerCase, char)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
T[idx] = char
|
||||||
|
B[idx] = bonusFor(prevClass, class)
|
||||||
|
prevClass = class
|
||||||
|
|
||||||
|
if pidx < M {
|
||||||
|
if char == pattern[pidx] {
|
||||||
|
lastIdx = idx
|
||||||
|
F[pidx] = int32(idx)
|
||||||
|
pidx++
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if char == pattern[M-1] {
|
||||||
|
lastIdx = idx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pidx != M {
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 2. Fill in score matrix (H)
|
||||||
|
// Unlike the original algorithm, we do not allow omission.
|
||||||
|
width := lastIdx - int(F[0]) + 1
|
||||||
|
offset16, H := alloc16(offset16, slab, width*M, false)
|
||||||
|
|
||||||
|
// Possible length of consecutive chunk at each position.
|
||||||
|
offset16, C := alloc16(offset16, slab, width*M, false)
|
||||||
|
|
||||||
|
maxScore, maxScorePos := int16(0), 0
|
||||||
|
for i := 0; i < M; i++ {
|
||||||
|
I := i * width
|
||||||
|
inGap := false
|
||||||
|
for j := int(F[i]); j <= lastIdx; j++ {
|
||||||
|
j0 := j - int(F[0])
|
||||||
|
var s1, s2, consecutive int16
|
||||||
|
|
||||||
|
if j > int(F[i]) {
|
||||||
|
if inGap {
|
||||||
|
s2 = H[I+j0-1] + scoreGapExtention
|
||||||
|
} else {
|
||||||
|
s2 = H[I+j0-1] + scoreGapStart
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pattern[i] == T[j] {
|
||||||
|
var diag int16
|
||||||
|
if i > 0 && j0 > 0 {
|
||||||
|
diag = H[I-width+j0-1]
|
||||||
|
}
|
||||||
|
s1 = diag + scoreMatch
|
||||||
|
b := B[j]
|
||||||
|
if i > 0 {
|
||||||
|
// j > 0 if i > 0
|
||||||
|
consecutive = C[I-width+j0-1] + 1
|
||||||
|
// Break consecutive chunk
|
||||||
|
if b == bonusBoundary {
|
||||||
|
consecutive = 1
|
||||||
|
} else if consecutive > 1 {
|
||||||
|
b = util.Max16(b, util.Max16(bonusConsecutive, B[j-int(consecutive)+1]))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
consecutive = 1
|
||||||
|
b *= bonusFirstCharMultiplier
|
||||||
|
}
|
||||||
|
if s1+b < s2 {
|
||||||
|
s1 += B[j]
|
||||||
|
consecutive = 0
|
||||||
|
} else {
|
||||||
|
s1 += b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
C[I+j0] = consecutive
|
||||||
|
|
||||||
|
inGap = s1 < s2
|
||||||
|
score := util.Max16(util.Max16(s1, s2), 0)
|
||||||
|
if i == M-1 && (forward && score > maxScore || !forward && score >= maxScore) {
|
||||||
|
maxScore, maxScorePos = score, j
|
||||||
|
}
|
||||||
|
H[I+j0] = score
|
||||||
|
}
|
||||||
|
|
||||||
|
if DEBUG {
|
||||||
|
if i == 0 {
|
||||||
|
fmt.Print(" ")
|
||||||
|
for j := int(F[i]); j <= lastIdx; j++ {
|
||||||
|
fmt.Printf(" " + string(input.Get(j)) + " ")
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
fmt.Print(string(pattern[i]) + " ")
|
||||||
|
for idx := int(F[0]); idx < int(F[i]); idx++ {
|
||||||
|
fmt.Print(" 0 ")
|
||||||
|
}
|
||||||
|
for idx := int(F[i]); idx <= lastIdx; idx++ {
|
||||||
|
fmt.Printf("%2d ", H[i*width+idx-int(F[0])])
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
fmt.Print(" ")
|
||||||
|
for idx, p := range C[I : I+width] {
|
||||||
|
if idx+int(F[0]) < int(F[i]) {
|
||||||
|
p = 0
|
||||||
|
}
|
||||||
|
fmt.Printf("%2d ", p)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 3. (Optional) Backtrace to find character positions
|
||||||
|
pos := posArray(withPos, M)
|
||||||
|
j := int(F[0])
|
||||||
|
if withPos {
|
||||||
|
i := M - 1
|
||||||
|
j = maxScorePos
|
||||||
|
preferMatch := true
|
||||||
|
for {
|
||||||
|
I := i * width
|
||||||
|
j0 := j - int(F[0])
|
||||||
|
s := H[I+j0]
|
||||||
|
|
||||||
|
var s1, s2 int16
|
||||||
|
if i > 0 && j >= int(F[i]) {
|
||||||
|
s1 = H[I-width+j0-1]
|
||||||
|
}
|
||||||
|
if j > int(F[i]) {
|
||||||
|
s2 = H[I+j0-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if s > s1 && (s > s2 || s == s2 && preferMatch) {
|
||||||
|
*pos = append(*pos, j)
|
||||||
|
if i == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
preferMatch = C[I+j0] > 1 || I+width+j0+1 < len(C) && C[I+width+j0+1] > 0
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Start offset we return here is only relevant when begin tiebreak is used.
|
||||||
|
// However finding the accurate offset requires backtracking, and we don't
|
||||||
|
// want to pay extra cost for the option that has lost its importance.
|
||||||
|
return Result{j, maxScorePos + 1, int(maxScore)}, pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement the same sorting criteria as V2
|
||||||
|
func calculateScore(caseSensitive bool, text util.Chars, pattern []rune, sidx int, eidx int, withPos bool) (int, *[]int) {
|
||||||
|
pidx, score, inGap, consecutive, firstBonus := 0, 0, false, 0, int16(0)
|
||||||
|
pos := posArray(withPos, len(pattern))
|
||||||
|
prevClass := charNonWord
|
||||||
|
if sidx > 0 {
|
||||||
|
prevClass = charClassOf(text.Get(sidx - 1))
|
||||||
|
}
|
||||||
|
for idx := sidx; idx < eidx; idx++ {
|
||||||
|
char := text.Get(idx)
|
||||||
|
class := charClassOf(char)
|
||||||
|
if !caseSensitive {
|
||||||
|
if char >= 'A' && char <= 'Z' {
|
||||||
|
char += 32
|
||||||
|
} else if char > unicode.MaxASCII {
|
||||||
|
char = unicode.To(unicode.LowerCase, char)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if char == pattern[pidx] {
|
||||||
|
if withPos {
|
||||||
|
*pos = append(*pos, idx)
|
||||||
|
}
|
||||||
|
score += scoreMatch
|
||||||
|
bonus := bonusFor(prevClass, class)
|
||||||
|
if consecutive == 0 {
|
||||||
|
firstBonus = bonus
|
||||||
|
} else {
|
||||||
|
// Break consecutive chunk
|
||||||
|
if bonus == bonusBoundary {
|
||||||
|
firstBonus = bonus
|
||||||
|
}
|
||||||
|
bonus = util.Max16(util.Max16(bonus, firstBonus), bonusConsecutive)
|
||||||
|
}
|
||||||
|
if pidx == 0 {
|
||||||
|
score += int(bonus * bonusFirstCharMultiplier)
|
||||||
|
} else {
|
||||||
|
score += int(bonus)
|
||||||
|
}
|
||||||
|
inGap = false
|
||||||
|
consecutive++
|
||||||
|
pidx++
|
||||||
|
} else {
|
||||||
|
if inGap {
|
||||||
|
score += scoreGapExtention
|
||||||
|
} else {
|
||||||
|
score += scoreGapStart
|
||||||
|
}
|
||||||
|
inGap = true
|
||||||
|
consecutive = 0
|
||||||
|
firstBonus = 0
|
||||||
|
}
|
||||||
|
prevClass = class
|
||||||
|
}
|
||||||
|
return score, pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// FuzzyMatchV1 performs fuzzy-match
|
||||||
|
func FuzzyMatchV1(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
|
if len(pattern) == 0 {
|
||||||
|
return Result{0, 0, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// 0. (FIXME) How to find the shortest match?
|
|
||||||
// a_____b__c__abc
|
|
||||||
// ^^^^^^^^^^ ^^^
|
|
||||||
// 1. forward scan (abc)
|
|
||||||
// *-----*-----*>
|
|
||||||
// a_____b___abc__
|
|
||||||
// 2. reverse scan (cba)
|
|
||||||
// a_____b___abc__
|
|
||||||
// <***
|
|
||||||
pidx := 0
|
pidx := 0
|
||||||
sidx := -1
|
sidx := -1
|
||||||
eidx := -1
|
eidx := -1
|
||||||
|
|
||||||
lenRunes := len(runes)
|
lenRunes := text.Length()
|
||||||
lenPattern := len(pattern)
|
lenPattern := len(pattern)
|
||||||
|
|
||||||
for index := range runes {
|
for index := 0; index < lenRunes; index++ {
|
||||||
char := runeAt(runes, index, lenRunes, forward)
|
char := text.Get(indexAt(index, lenRunes, forward))
|
||||||
// This is considerably faster than blindly applying strings.ToLower to the
|
// This is considerably faster than blindly applying strings.ToLower to the
|
||||||
// whole string
|
// whole string
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
@@ -58,7 +514,7 @@ func FuzzyMatch(caseSensitive bool, forward bool, runes []rune, pattern []rune)
|
|||||||
char = unicode.To(unicode.LowerCase, char)
|
char = unicode.To(unicode.LowerCase, char)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pchar := runeAt(pattern, pidx, lenPattern, forward)
|
pchar := pattern[indexAt(pidx, lenPattern, forward)]
|
||||||
if char == pchar {
|
if char == pchar {
|
||||||
if sidx < 0 {
|
if sidx < 0 {
|
||||||
sidx = index
|
sidx = index
|
||||||
@@ -73,7 +529,8 @@ func FuzzyMatch(caseSensitive bool, forward bool, runes []rune, pattern []rune)
|
|||||||
if sidx >= 0 && eidx >= 0 {
|
if sidx >= 0 && eidx >= 0 {
|
||||||
pidx--
|
pidx--
|
||||||
for index := eidx - 1; index >= sidx; index-- {
|
for index := eidx - 1; index >= sidx; index-- {
|
||||||
char := runeAt(runes, index, lenRunes, forward)
|
tidx := indexAt(index, lenRunes, forward)
|
||||||
|
char := text.Get(tidx)
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
if char >= 'A' && char <= 'Z' {
|
if char >= 'A' && char <= 'Z' {
|
||||||
char += 32
|
char += 32
|
||||||
@@ -82,7 +539,8 @@ func FuzzyMatch(caseSensitive bool, forward bool, runes []rune, pattern []rune)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pchar := runeAt(pattern, pidx, lenPattern, forward)
|
pidx_ := indexAt(pidx, lenPattern, forward)
|
||||||
|
pchar := pattern[pidx_]
|
||||||
if char == pchar {
|
if char == pchar {
|
||||||
if pidx--; pidx < 0 {
|
if pidx--; pidx < 0 {
|
||||||
sidx = index
|
sidx = index
|
||||||
@@ -90,12 +548,15 @@ func FuzzyMatch(caseSensitive bool, forward bool, runes []rune, pattern []rune)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if forward {
|
|
||||||
return sidx, eidx
|
if !forward {
|
||||||
|
sidx, eidx = lenRunes-eidx, lenRunes-sidx
|
||||||
}
|
}
|
||||||
return lenRunes - eidx, lenRunes - sidx
|
|
||||||
|
score, pos := calculateScore(caseSensitive, text, pattern, sidx, eidx, withPos)
|
||||||
|
return Result{sidx, eidx, score}, pos
|
||||||
}
|
}
|
||||||
return -1, -1
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExactMatchNaive is a basic string searching algorithm that handles case
|
// ExactMatchNaive is a basic string searching algorithm that handles case
|
||||||
@@ -103,23 +564,28 @@ func FuzzyMatch(caseSensitive bool, forward bool, runes []rune, pattern []rune)
|
|||||||
// of strings.ToLower + strings.Index for typical fzf use cases where input
|
// of strings.ToLower + strings.Index for typical fzf use cases where input
|
||||||
// strings and patterns are not very long.
|
// strings and patterns are not very long.
|
||||||
//
|
//
|
||||||
// We might try to implement better algorithms in the future:
|
// Since 0.15.0, this function searches for the match with the highest
|
||||||
// http://en.wikipedia.org/wiki/String_searching_algorithm
|
// bonus point, instead of stopping immediately after finding the first match.
|
||||||
func ExactMatchNaive(caseSensitive bool, forward bool, runes []rune, pattern []rune) (int, int) {
|
// The solution is much cheaper since there is only one possible alignment of
|
||||||
|
// the pattern.
|
||||||
|
func ExactMatchNaive(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
if len(pattern) == 0 {
|
if len(pattern) == 0 {
|
||||||
return 0, 0
|
return Result{0, 0, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
lenRunes := len(runes)
|
lenRunes := text.Length()
|
||||||
lenPattern := len(pattern)
|
lenPattern := len(pattern)
|
||||||
|
|
||||||
if lenRunes < lenPattern {
|
if lenRunes < lenPattern {
|
||||||
return -1, -1
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For simplicity, only look at the bonus at the first character position
|
||||||
pidx := 0
|
pidx := 0
|
||||||
|
bestPos, bonus, bestBonus := -1, int16(0), int16(-1)
|
||||||
for index := 0; index < lenRunes; index++ {
|
for index := 0; index < lenRunes; index++ {
|
||||||
char := runeAt(runes, index, lenRunes, forward)
|
index_ := indexAt(index, lenRunes, forward)
|
||||||
|
char := text.Get(index_)
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
if char >= 'A' && char <= 'Z' {
|
if char >= 'A' && char <= 'Z' {
|
||||||
char += 32
|
char += 32
|
||||||
@@ -127,73 +593,108 @@ func ExactMatchNaive(caseSensitive bool, forward bool, runes []rune, pattern []r
|
|||||||
char = unicode.To(unicode.LowerCase, char)
|
char = unicode.To(unicode.LowerCase, char)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pchar := runeAt(pattern, pidx, lenPattern, forward)
|
pidx_ := indexAt(pidx, lenPattern, forward)
|
||||||
|
pchar := pattern[pidx_]
|
||||||
if pchar == char {
|
if pchar == char {
|
||||||
|
if pidx_ == 0 {
|
||||||
|
bonus = bonusAt(text, index_)
|
||||||
|
}
|
||||||
pidx++
|
pidx++
|
||||||
if pidx == lenPattern {
|
if pidx == lenPattern {
|
||||||
if forward {
|
if bonus > bestBonus {
|
||||||
return index - lenPattern + 1, index + 1
|
bestPos, bestBonus = index, bonus
|
||||||
}
|
}
|
||||||
return lenRunes - (index + 1), lenRunes - (index - lenPattern + 1)
|
if bonus == bonusBoundary {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
index -= pidx - 1
|
||||||
|
pidx, bonus = 0, 0
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
index -= pidx
|
index -= pidx
|
||||||
pidx = 0
|
pidx, bonus = 0, 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return -1, -1
|
if bestPos >= 0 {
|
||||||
|
var sidx, eidx int
|
||||||
|
if forward {
|
||||||
|
sidx = bestPos - lenPattern + 1
|
||||||
|
eidx = bestPos + 1
|
||||||
|
} else {
|
||||||
|
sidx = lenRunes - (bestPos + 1)
|
||||||
|
eidx = lenRunes - (bestPos - lenPattern + 1)
|
||||||
|
}
|
||||||
|
score, _ := calculateScore(caseSensitive, text, pattern, sidx, eidx, false)
|
||||||
|
return Result{sidx, eidx, score}, nil
|
||||||
|
}
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrefixMatch performs prefix-match
|
// PrefixMatch performs prefix-match
|
||||||
func PrefixMatch(caseSensitive bool, forward bool, runes []rune, pattern []rune) (int, int) {
|
func PrefixMatch(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
if len(runes) < len(pattern) {
|
if len(pattern) == 0 {
|
||||||
return -1, -1
|
return Result{0, 0, 0}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if text.Length() < len(pattern) {
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for index, r := range pattern {
|
for index, r := range pattern {
|
||||||
char := runes[index]
|
char := text.Get(index)
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
char = unicode.ToLower(char)
|
char = unicode.ToLower(char)
|
||||||
}
|
}
|
||||||
if char != r {
|
if char != r {
|
||||||
return -1, -1
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0, len(pattern)
|
lenPattern := len(pattern)
|
||||||
|
score, _ := calculateScore(caseSensitive, text, pattern, 0, lenPattern, false)
|
||||||
|
return Result{0, lenPattern, score}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SuffixMatch performs suffix-match
|
// SuffixMatch performs suffix-match
|
||||||
func SuffixMatch(caseSensitive bool, forward bool, input []rune, pattern []rune) (int, int) {
|
func SuffixMatch(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
runes := util.TrimRight(input)
|
lenRunes := text.Length()
|
||||||
trimmedLen := len(runes)
|
trimmedLen := lenRunes - text.TrailingWhitespaces()
|
||||||
|
if len(pattern) == 0 {
|
||||||
|
return Result{trimmedLen, trimmedLen, 0}, nil
|
||||||
|
}
|
||||||
diff := trimmedLen - len(pattern)
|
diff := trimmedLen - len(pattern)
|
||||||
if diff < 0 {
|
if diff < 0 {
|
||||||
return -1, -1
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for index, r := range pattern {
|
for index, r := range pattern {
|
||||||
char := runes[index+diff]
|
char := text.Get(index + diff)
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
char = unicode.ToLower(char)
|
char = unicode.ToLower(char)
|
||||||
}
|
}
|
||||||
if char != r {
|
if char != r {
|
||||||
return -1, -1
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return trimmedLen - len(pattern), trimmedLen
|
lenPattern := len(pattern)
|
||||||
|
sidx := trimmedLen - lenPattern
|
||||||
|
eidx := trimmedLen
|
||||||
|
score, _ := calculateScore(caseSensitive, text, pattern, sidx, eidx, false)
|
||||||
|
return Result{sidx, eidx, score}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EqualMatch performs equal-match
|
// EqualMatch performs equal-match
|
||||||
func EqualMatch(caseSensitive bool, forward bool, runes []rune, pattern []rune) (int, int) {
|
func EqualMatch(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
if len(runes) != len(pattern) {
|
lenPattern := len(pattern)
|
||||||
return -1, -1
|
if text.Length() != lenPattern {
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
runesStr := string(runes)
|
runesStr := text.ToString()
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
runesStr = strings.ToLower(runesStr)
|
runesStr = strings.ToLower(runesStr)
|
||||||
}
|
}
|
||||||
if runesStr == string(pattern) {
|
if runesStr == string(pattern) {
|
||||||
return 0, len(pattern)
|
return Result{0, lenPattern, (scoreMatch+bonusBoundary)*lenPattern +
|
||||||
|
(bonusFirstCharMultiplier-1)*bonusBoundary}, nil
|
||||||
}
|
}
|
||||||
return -1, -1
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
|
@@ -1,69 +1,166 @@
|
|||||||
package algo
|
package algo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func assertMatch(t *testing.T, fun func(bool, bool, []rune, []rune) (int, int), caseSensitive bool, forward bool, input string, pattern string, sidx int, eidx int) {
|
func assertMatch(t *testing.T, fun Algo, caseSensitive, forward bool, input, pattern string, sidx int, eidx int, score int) {
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
pattern = strings.ToLower(pattern)
|
pattern = strings.ToLower(pattern)
|
||||||
}
|
}
|
||||||
s, e := fun(caseSensitive, forward, []rune(input), []rune(pattern))
|
res, pos := fun(caseSensitive, forward, util.RunesToChars([]rune(input)), []rune(pattern), true, nil)
|
||||||
if s != sidx {
|
var start, end int
|
||||||
t.Errorf("Invalid start index: %d (expected: %d, %s / %s)", s, sidx, input, pattern)
|
if pos == nil || len(*pos) == 0 {
|
||||||
|
start = res.Start
|
||||||
|
end = res.End
|
||||||
|
} else {
|
||||||
|
sort.Ints(*pos)
|
||||||
|
start = (*pos)[0]
|
||||||
|
end = (*pos)[len(*pos)-1] + 1
|
||||||
}
|
}
|
||||||
if e != eidx {
|
if start != sidx {
|
||||||
t.Errorf("Invalid end index: %d (expected: %d, %s / %s)", e, eidx, input, pattern)
|
t.Errorf("Invalid start index: %d (expected: %d, %s / %s)", start, sidx, input, pattern)
|
||||||
|
}
|
||||||
|
if end != eidx {
|
||||||
|
t.Errorf("Invalid end index: %d (expected: %d, %s / %s)", end, eidx, input, pattern)
|
||||||
|
}
|
||||||
|
if res.Score != score {
|
||||||
|
t.Errorf("Invalid score: %d (expected: %d, %s / %s)", res.Score, score, input, pattern)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFuzzyMatch(t *testing.T) {
|
func TestFuzzyMatch(t *testing.T) {
|
||||||
assertMatch(t, FuzzyMatch, false, true, "fooBarbaz", "oBZ", 2, 9)
|
for _, fn := range []Algo{FuzzyMatchV1, FuzzyMatchV2} {
|
||||||
assertMatch(t, FuzzyMatch, true, true, "fooBarbaz", "oBZ", -1, -1)
|
for _, forward := range []bool{true, false} {
|
||||||
assertMatch(t, FuzzyMatch, true, true, "fooBarbaz", "oBz", 2, 9)
|
assertMatch(t, fn, false, forward, "fooBarbaz1", "oBZ", 2, 9,
|
||||||
assertMatch(t, FuzzyMatch, true, true, "fooBarbaz", "fooBarbazz", -1, -1)
|
scoreMatch*3+bonusCamel123+scoreGapStart+scoreGapExtention*3)
|
||||||
|
assertMatch(t, fn, false, forward, "foo bar baz", "fbb", 0, 9,
|
||||||
|
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+
|
||||||
|
bonusBoundary*2+2*scoreGapStart+4*scoreGapExtention)
|
||||||
|
assertMatch(t, fn, false, forward, "/AutomatorDocument.icns", "rdoc", 9, 13,
|
||||||
|
scoreMatch*4+bonusCamel123+bonusConsecutive*2)
|
||||||
|
assertMatch(t, fn, false, forward, "/man1/zshcompctl.1", "zshc", 6, 10,
|
||||||
|
scoreMatch*4+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary*3)
|
||||||
|
assertMatch(t, fn, false, forward, "/.oh-my-zsh/cache", "zshc", 8, 13,
|
||||||
|
scoreMatch*4+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary*3+scoreGapStart)
|
||||||
|
assertMatch(t, fn, false, forward, "ab0123 456", "12356", 3, 10,
|
||||||
|
scoreMatch*5+bonusConsecutive*3+scoreGapStart+scoreGapExtention)
|
||||||
|
assertMatch(t, fn, false, forward, "abc123 456", "12356", 3, 10,
|
||||||
|
scoreMatch*5+bonusCamel123*bonusFirstCharMultiplier+bonusCamel123*2+bonusConsecutive+scoreGapStart+scoreGapExtention)
|
||||||
|
assertMatch(t, fn, false, forward, "foo/bar/baz", "fbb", 0, 9,
|
||||||
|
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+
|
||||||
|
bonusBoundary*2+2*scoreGapStart+4*scoreGapExtention)
|
||||||
|
assertMatch(t, fn, false, forward, "fooBarBaz", "fbb", 0, 7,
|
||||||
|
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+
|
||||||
|
bonusCamel123*2+2*scoreGapStart+2*scoreGapExtention)
|
||||||
|
assertMatch(t, fn, false, forward, "foo barbaz", "fbb", 0, 8,
|
||||||
|
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary+
|
||||||
|
scoreGapStart*2+scoreGapExtention*3)
|
||||||
|
assertMatch(t, fn, false, forward, "fooBar Baz", "foob", 0, 4,
|
||||||
|
scoreMatch*4+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary*3)
|
||||||
|
assertMatch(t, fn, false, forward, "xFoo-Bar Baz", "foo-b", 1, 6,
|
||||||
|
scoreMatch*5+bonusCamel123*bonusFirstCharMultiplier+bonusCamel123*2+
|
||||||
|
bonusNonWord+bonusBoundary)
|
||||||
|
|
||||||
|
assertMatch(t, fn, true, forward, "fooBarbaz", "oBz", 2, 9,
|
||||||
|
scoreMatch*3+bonusCamel123+scoreGapStart+scoreGapExtention*3)
|
||||||
|
assertMatch(t, fn, true, forward, "Foo/Bar/Baz", "FBB", 0, 9,
|
||||||
|
scoreMatch*3+bonusBoundary*(bonusFirstCharMultiplier+2)+
|
||||||
|
scoreGapStart*2+scoreGapExtention*4)
|
||||||
|
assertMatch(t, fn, true, forward, "FooBarBaz", "FBB", 0, 7,
|
||||||
|
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+bonusCamel123*2+
|
||||||
|
scoreGapStart*2+scoreGapExtention*2)
|
||||||
|
assertMatch(t, fn, true, forward, "FooBar Baz", "FooB", 0, 4,
|
||||||
|
scoreMatch*4+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary*2+
|
||||||
|
util.Max(bonusCamel123, bonusBoundary))
|
||||||
|
|
||||||
|
// Consecutive bonus updated
|
||||||
|
assertMatch(t, fn, true, forward, "foo-bar", "o-ba", 2, 6,
|
||||||
|
scoreMatch*4+bonusBoundary*3)
|
||||||
|
|
||||||
|
// Non-match
|
||||||
|
assertMatch(t, fn, true, forward, "fooBarbaz", "oBZ", -1, -1, 0)
|
||||||
|
assertMatch(t, fn, true, forward, "Foo Bar Baz", "fbb", -1, -1, 0)
|
||||||
|
assertMatch(t, fn, true, forward, "fooBarbaz", "fooBarbazz", -1, -1, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFuzzyMatchBackward(t *testing.T) {
|
func TestFuzzyMatchBackward(t *testing.T) {
|
||||||
assertMatch(t, FuzzyMatch, false, true, "foobar fb", "fb", 0, 4)
|
assertMatch(t, FuzzyMatchV1, false, true, "foobar fb", "fb", 0, 4,
|
||||||
assertMatch(t, FuzzyMatch, false, false, "foobar fb", "fb", 7, 9)
|
scoreMatch*2+bonusBoundary*bonusFirstCharMultiplier+
|
||||||
|
scoreGapStart+scoreGapExtention)
|
||||||
|
assertMatch(t, FuzzyMatchV1, false, false, "foobar fb", "fb", 7, 9,
|
||||||
|
scoreMatch*2+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExactMatchNaive(t *testing.T) {
|
func TestExactMatchNaive(t *testing.T) {
|
||||||
for _, dir := range []bool{true, false} {
|
for _, dir := range []bool{true, false} {
|
||||||
assertMatch(t, ExactMatchNaive, false, dir, "fooBarbaz", "oBA", 2, 5)
|
assertMatch(t, ExactMatchNaive, true, dir, "fooBarbaz", "oBA", -1, -1, 0)
|
||||||
assertMatch(t, ExactMatchNaive, true, dir, "fooBarbaz", "oBA", -1, -1)
|
assertMatch(t, ExactMatchNaive, true, dir, "fooBarbaz", "fooBarbazz", -1, -1, 0)
|
||||||
assertMatch(t, ExactMatchNaive, true, dir, "fooBarbaz", "fooBarbazz", -1, -1)
|
|
||||||
|
assertMatch(t, ExactMatchNaive, false, dir, "fooBarbaz", "oBA", 2, 5,
|
||||||
|
scoreMatch*3+bonusCamel123+bonusConsecutive)
|
||||||
|
assertMatch(t, ExactMatchNaive, false, dir, "/AutomatorDocument.icns", "rdoc", 9, 13,
|
||||||
|
scoreMatch*4+bonusCamel123+bonusConsecutive*2)
|
||||||
|
assertMatch(t, ExactMatchNaive, false, dir, "/man1/zshcompctl.1", "zshc", 6, 10,
|
||||||
|
scoreMatch*4+bonusBoundary*(bonusFirstCharMultiplier+3))
|
||||||
|
assertMatch(t, ExactMatchNaive, false, dir, "/.oh-my-zsh/cache", "zsh/c", 8, 13,
|
||||||
|
scoreMatch*5+bonusBoundary*(bonusFirstCharMultiplier+4))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExactMatchNaiveBackward(t *testing.T) {
|
func TestExactMatchNaiveBackward(t *testing.T) {
|
||||||
assertMatch(t, FuzzyMatch, false, true, "foobar foob", "oo", 1, 3)
|
assertMatch(t, ExactMatchNaive, false, true, "foobar foob", "oo", 1, 3,
|
||||||
assertMatch(t, FuzzyMatch, false, false, "foobar foob", "oo", 8, 10)
|
scoreMatch*2+bonusConsecutive)
|
||||||
|
assertMatch(t, ExactMatchNaive, false, false, "foobar foob", "oo", 8, 10,
|
||||||
|
scoreMatch*2+bonusConsecutive)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPrefixMatch(t *testing.T) {
|
func TestPrefixMatch(t *testing.T) {
|
||||||
|
score := (scoreMatch+bonusBoundary)*3 + bonusBoundary*(bonusFirstCharMultiplier-1)
|
||||||
|
|
||||||
for _, dir := range []bool{true, false} {
|
for _, dir := range []bool{true, false} {
|
||||||
assertMatch(t, PrefixMatch, false, dir, "fooBarbaz", "Foo", 0, 3)
|
assertMatch(t, PrefixMatch, true, dir, "fooBarbaz", "Foo", -1, -1, 0)
|
||||||
assertMatch(t, PrefixMatch, true, dir, "fooBarbaz", "Foo", -1, -1)
|
assertMatch(t, PrefixMatch, false, dir, "fooBarBaz", "baz", -1, -1, 0)
|
||||||
assertMatch(t, PrefixMatch, false, dir, "fooBarbaz", "baz", -1, -1)
|
assertMatch(t, PrefixMatch, false, dir, "fooBarbaz", "Foo", 0, 3, score)
|
||||||
|
assertMatch(t, PrefixMatch, false, dir, "foOBarBaZ", "foo", 0, 3, score)
|
||||||
|
assertMatch(t, PrefixMatch, false, dir, "f-oBarbaz", "f-o", 0, 3, score)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSuffixMatch(t *testing.T) {
|
func TestSuffixMatch(t *testing.T) {
|
||||||
for _, dir := range []bool{true, false} {
|
for _, dir := range []bool{true, false} {
|
||||||
assertMatch(t, SuffixMatch, false, dir, "fooBarbaz", "Foo", -1, -1)
|
assertMatch(t, SuffixMatch, true, dir, "fooBarbaz", "Baz", -1, -1, 0)
|
||||||
assertMatch(t, SuffixMatch, false, dir, "fooBarbaz", "baz", 6, 9)
|
assertMatch(t, SuffixMatch, false, dir, "fooBarbaz", "Foo", -1, -1, 0)
|
||||||
assertMatch(t, SuffixMatch, true, dir, "fooBarbaz", "Baz", -1, -1)
|
|
||||||
|
assertMatch(t, SuffixMatch, false, dir, "fooBarbaz", "baz", 6, 9,
|
||||||
|
scoreMatch*3+bonusConsecutive*2)
|
||||||
|
assertMatch(t, SuffixMatch, false, dir, "fooBarBaZ", "baz", 6, 9,
|
||||||
|
(scoreMatch+bonusCamel123)*3+bonusCamel123*(bonusFirstCharMultiplier-1))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyPattern(t *testing.T) {
|
func TestEmptyPattern(t *testing.T) {
|
||||||
for _, dir := range []bool{true, false} {
|
for _, dir := range []bool{true, false} {
|
||||||
assertMatch(t, FuzzyMatch, true, dir, "foobar", "", 0, 0)
|
assertMatch(t, FuzzyMatchV1, true, dir, "foobar", "", 0, 0, 0)
|
||||||
assertMatch(t, ExactMatchNaive, true, dir, "foobar", "", 0, 0)
|
assertMatch(t, FuzzyMatchV2, true, dir, "foobar", "", 0, 0, 0)
|
||||||
assertMatch(t, PrefixMatch, true, dir, "foobar", "", 0, 0)
|
assertMatch(t, ExactMatchNaive, true, dir, "foobar", "", 0, 0, 0)
|
||||||
assertMatch(t, SuffixMatch, true, dir, "foobar", "", 6, 6)
|
assertMatch(t, PrefixMatch, true, dir, "foobar", "", 0, 0, 0)
|
||||||
|
assertMatch(t, SuffixMatch, true, dir, "foobar", "", 6, 6, 0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLongString(t *testing.T) {
|
||||||
|
bytes := make([]byte, math.MaxUint16*2)
|
||||||
|
for i := range bytes {
|
||||||
|
bytes[i] = 'x'
|
||||||
|
}
|
||||||
|
bytes[math.MaxUint16] = 'z'
|
||||||
|
assertMatch(t, FuzzyMatchV2, true, true, string(bytes), "zx", math.MaxUint16, math.MaxUint16+2, scoreMatch*2+bonusConsecutive)
|
||||||
|
}
|
||||||
|
48
src/ansi.go
48
src/ansi.go
@@ -6,6 +6,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/curses"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ansiOffset struct {
|
type ansiOffset struct {
|
||||||
@@ -16,27 +18,27 @@ type ansiOffset struct {
|
|||||||
type ansiState struct {
|
type ansiState struct {
|
||||||
fg int
|
fg int
|
||||||
bg int
|
bg int
|
||||||
bold bool
|
attr curses.Attr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ansiState) colored() bool {
|
func (s *ansiState) colored() bool {
|
||||||
return s.fg != -1 || s.bg != -1 || s.bold
|
return s.fg != -1 || s.bg != -1 || s.attr > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ansiState) equals(t *ansiState) bool {
|
func (s *ansiState) equals(t *ansiState) bool {
|
||||||
if t == nil {
|
if t == nil {
|
||||||
return !s.colored()
|
return !s.colored()
|
||||||
}
|
}
|
||||||
return s.fg == t.fg && s.bg == t.bg && s.bold == t.bold
|
return s.fg == t.fg && s.bg == t.bg && s.attr == t.attr
|
||||||
}
|
}
|
||||||
|
|
||||||
var ansiRegex *regexp.Regexp
|
var ansiRegex *regexp.Regexp
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
ansiRegex = regexp.MustCompile("\x1b\\[[0-9;]*[mK]")
|
ansiRegex = regexp.MustCompile("\x1b.[0-9;]*.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func extractColor(str string, state *ansiState) (string, []ansiOffset, *ansiState) {
|
func extractColor(str string, state *ansiState, proc func(string, *ansiState) bool) (string, *[]ansiOffset, *ansiState) {
|
||||||
var offsets []ansiOffset
|
var offsets []ansiOffset
|
||||||
var output bytes.Buffer
|
var output bytes.Buffer
|
||||||
|
|
||||||
@@ -46,7 +48,11 @@ func extractColor(str string, state *ansiState) (string, []ansiOffset, *ansiStat
|
|||||||
|
|
||||||
idx := 0
|
idx := 0
|
||||||
for _, offset := range ansiRegex.FindAllStringIndex(str, -1) {
|
for _, offset := range ansiRegex.FindAllStringIndex(str, -1) {
|
||||||
output.WriteString(str[idx:offset[0]])
|
prev := str[idx:offset[0]]
|
||||||
|
output.WriteString(prev)
|
||||||
|
if proc != nil && !proc(prev, state) {
|
||||||
|
return "", nil, nil
|
||||||
|
}
|
||||||
newState := interpretCode(str[offset[0]:offset[1]], state)
|
newState := interpretCode(str[offset[0]:offset[1]], state)
|
||||||
|
|
||||||
if !newState.equals(state) {
|
if !newState.equals(state) {
|
||||||
@@ -77,18 +83,24 @@ func extractColor(str string, state *ansiState) (string, []ansiOffset, *ansiStat
|
|||||||
(&offsets[len(offsets)-1]).offset[1] = int32(utf8.RuneCount(output.Bytes()))
|
(&offsets[len(offsets)-1]).offset[1] = int32(utf8.RuneCount(output.Bytes()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return output.String(), offsets, state
|
if proc != nil {
|
||||||
|
proc(rest, state)
|
||||||
|
}
|
||||||
|
if len(offsets) == 0 {
|
||||||
|
return output.String(), nil, state
|
||||||
|
}
|
||||||
|
return output.String(), &offsets, state
|
||||||
}
|
}
|
||||||
|
|
||||||
func interpretCode(ansiCode string, prevState *ansiState) *ansiState {
|
func interpretCode(ansiCode string, prevState *ansiState) *ansiState {
|
||||||
// State
|
// State
|
||||||
var state *ansiState
|
var state *ansiState
|
||||||
if prevState == nil {
|
if prevState == nil {
|
||||||
state = &ansiState{-1, -1, false}
|
state = &ansiState{-1, -1, 0}
|
||||||
} else {
|
} else {
|
||||||
state = &ansiState{prevState.fg, prevState.bg, prevState.bold}
|
state = &ansiState{prevState.fg, prevState.bg, prevState.attr}
|
||||||
}
|
}
|
||||||
if ansiCode[len(ansiCode)-1] == 'K' {
|
if ansiCode[1] != '[' || ansiCode[len(ansiCode)-1] != 'm' {
|
||||||
return state
|
return state
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -98,7 +110,7 @@ func interpretCode(ansiCode string, prevState *ansiState) *ansiState {
|
|||||||
init := func() {
|
init := func() {
|
||||||
state.fg = -1
|
state.fg = -1
|
||||||
state.bg = -1
|
state.bg = -1
|
||||||
state.bold = false
|
state.attr = 0
|
||||||
state256 = 0
|
state256 = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,7 +134,15 @@ func interpretCode(ansiCode string, prevState *ansiState) *ansiState {
|
|||||||
case 49:
|
case 49:
|
||||||
state.bg = -1
|
state.bg = -1
|
||||||
case 1:
|
case 1:
|
||||||
state.bold = true
|
state.attr = curses.Bold
|
||||||
|
case 2:
|
||||||
|
state.attr = curses.Dim
|
||||||
|
case 4:
|
||||||
|
state.attr = curses.Underline
|
||||||
|
case 5:
|
||||||
|
state.attr = curses.Blink
|
||||||
|
case 7:
|
||||||
|
state.attr = curses.Reverse
|
||||||
case 0:
|
case 0:
|
||||||
init()
|
init()
|
||||||
default:
|
default:
|
||||||
@@ -130,6 +150,10 @@ func interpretCode(ansiCode string, prevState *ansiState) *ansiState {
|
|||||||
state.fg = num - 30
|
state.fg = num - 30
|
||||||
} else if num >= 40 && num <= 47 {
|
} else if num >= 40 && num <= 47 {
|
||||||
state.bg = num - 40
|
state.bg = num - 40
|
||||||
|
} else if num >= 90 && num <= 97 {
|
||||||
|
state.fg = num - 90 + 8
|
||||||
|
} else if num >= 100 && num <= 107 {
|
||||||
|
state.bg = num - 100 + 8
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case 1:
|
case 1:
|
||||||
|
102
src/ansi_test.go
102
src/ansi_test.go
@@ -3,21 +3,27 @@ package fzf
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/curses"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestExtractColor(t *testing.T) {
|
func TestExtractColor(t *testing.T) {
|
||||||
assert := func(offset ansiOffset, b int32, e int32, fg int, bg int, bold bool) {
|
assert := func(offset ansiOffset, b int32, e int32, fg int, bg int, bold bool) {
|
||||||
|
var attr curses.Attr
|
||||||
|
if bold {
|
||||||
|
attr = curses.Bold
|
||||||
|
}
|
||||||
if offset.offset[0] != b || offset.offset[1] != e ||
|
if offset.offset[0] != b || offset.offset[1] != e ||
|
||||||
offset.color.fg != fg || offset.color.bg != bg || offset.color.bold != bold {
|
offset.color.fg != fg || offset.color.bg != bg || offset.color.attr != attr {
|
||||||
t.Error(offset, b, e, fg, bg, bold)
|
t.Error(offset, b, e, fg, bg, attr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
src := "hello world"
|
src := "hello world"
|
||||||
var state *ansiState
|
var state *ansiState
|
||||||
clean := "\x1b[0m"
|
clean := "\x1b[0m"
|
||||||
check := func(assertion func(ansiOffsets []ansiOffset, state *ansiState)) {
|
check := func(assertion func(ansiOffsets *[]ansiOffset, state *ansiState)) {
|
||||||
output, ansiOffsets, newState := extractColor(src, state)
|
output, ansiOffsets, newState := extractColor(src, state, nil)
|
||||||
state = newState
|
state = newState
|
||||||
if output != "hello world" {
|
if output != "hello world" {
|
||||||
t.Errorf("Invalid output: {}", output)
|
t.Errorf("Invalid output: {}", output)
|
||||||
@@ -26,127 +32,127 @@ func TestExtractColor(t *testing.T) {
|
|||||||
assertion(ansiOffsets, state)
|
assertion(ansiOffsets, state)
|
||||||
}
|
}
|
||||||
|
|
||||||
check(func(offsets []ansiOffset, state *ansiState) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) > 0 {
|
if offsets != nil {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
state = nil
|
state = nil
|
||||||
src = "\x1b[0mhello world"
|
src = "\x1b[0mhello world"
|
||||||
check(func(offsets []ansiOffset, state *ansiState) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) > 0 {
|
if offsets != nil {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
state = nil
|
state = nil
|
||||||
src = "\x1b[1mhello world"
|
src = "\x1b[1mhello world"
|
||||||
check(func(offsets []ansiOffset, state *ansiState) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 0, 11, -1, -1, true)
|
assert((*offsets)[0], 0, 11, -1, -1, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
state = nil
|
state = nil
|
||||||
src = "\x1b[1mhello \x1b[mworld"
|
src = "\x1b[1mhello \x1b[mworld"
|
||||||
check(func(offsets []ansiOffset, state *ansiState) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 0, 6, -1, -1, true)
|
assert((*offsets)[0], 0, 6, -1, -1, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
state = nil
|
state = nil
|
||||||
src = "\x1b[1mhello \x1b[Kworld"
|
src = "\x1b[1mhello \x1b[Kworld"
|
||||||
check(func(offsets []ansiOffset, state *ansiState) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 0, 11, -1, -1, true)
|
assert((*offsets)[0], 0, 11, -1, -1, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
state = nil
|
state = nil
|
||||||
src = "hello \x1b[34;45;1mworld"
|
src = "hello \x1b[34;45;1mworld"
|
||||||
check(func(offsets []ansiOffset, state *ansiState) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 6, 11, 4, 5, true)
|
assert((*offsets)[0], 6, 11, 4, 5, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
state = nil
|
state = nil
|
||||||
src = "hello \x1b[34;45;1mwor\x1b[34;45;1mld"
|
src = "hello \x1b[34;45;1mwor\x1b[34;45;1mld"
|
||||||
check(func(offsets []ansiOffset, state *ansiState) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 6, 11, 4, 5, true)
|
assert((*offsets)[0], 6, 11, 4, 5, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
state = nil
|
state = nil
|
||||||
src = "hello \x1b[34;45;1mwor\x1b[0mld"
|
src = "hello \x1b[34;45;1mwor\x1b[0mld"
|
||||||
check(func(offsets []ansiOffset, state *ansiState) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 6, 9, 4, 5, true)
|
assert((*offsets)[0], 6, 9, 4, 5, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
state = nil
|
state = nil
|
||||||
src = "hello \x1b[34;48;5;233;1mwo\x1b[38;5;161mr\x1b[0ml\x1b[38;5;161md"
|
src = "hello \x1b[34;48;5;233;1mwo\x1b[38;5;161mr\x1b[0ml\x1b[38;5;161md"
|
||||||
check(func(offsets []ansiOffset, state *ansiState) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 3 {
|
if len(*offsets) != 3 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 6, 8, 4, 233, true)
|
assert((*offsets)[0], 6, 8, 4, 233, true)
|
||||||
assert(offsets[1], 8, 9, 161, 233, true)
|
assert((*offsets)[1], 8, 9, 161, 233, true)
|
||||||
assert(offsets[2], 10, 11, 161, -1, false)
|
assert((*offsets)[2], 10, 11, 161, -1, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
// {38,48};5;{38,48}
|
// {38,48};5;{38,48}
|
||||||
state = nil
|
state = nil
|
||||||
src = "hello \x1b[38;5;38;48;5;48;1mwor\x1b[38;5;48;48;5;38ml\x1b[0md"
|
src = "hello \x1b[38;5;38;48;5;48;1mwor\x1b[38;5;48;48;5;38ml\x1b[0md"
|
||||||
check(func(offsets []ansiOffset, state *ansiState) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 2 {
|
if len(*offsets) != 2 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 6, 9, 38, 48, true)
|
assert((*offsets)[0], 6, 9, 38, 48, true)
|
||||||
assert(offsets[1], 9, 10, 48, 38, true)
|
assert((*offsets)[1], 9, 10, 48, 38, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
src = "hello \x1b[32;1mworld"
|
src = "hello \x1b[32;1mworld"
|
||||||
check(func(offsets []ansiOffset, state *ansiState) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
if state.fg != 2 || state.bg != -1 || !state.bold {
|
if state.fg != 2 || state.bg != -1 || state.attr == 0 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 6, 11, 2, -1, true)
|
assert((*offsets)[0], 6, 11, 2, -1, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
src = "hello world"
|
src = "hello world"
|
||||||
check(func(offsets []ansiOffset, state *ansiState) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
if state.fg != 2 || state.bg != -1 || !state.bold {
|
if state.fg != 2 || state.bg != -1 || state.attr == 0 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 0, 11, 2, -1, true)
|
assert((*offsets)[0], 0, 11, 2, -1, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
src = "hello \x1b[0;38;5;200;48;5;100mworld"
|
src = "hello \x1b[0;38;5;200;48;5;100mworld"
|
||||||
check(func(offsets []ansiOffset, state *ansiState) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 2 {
|
if len(*offsets) != 2 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
if state.fg != 200 || state.bg != 100 || state.bold {
|
if state.fg != 200 || state.bg != 100 || state.attr > 0 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 0, 6, 2, -1, true)
|
assert((*offsets)[0], 0, 6, 2, -1, true)
|
||||||
assert(offsets[1], 6, 11, 200, 100, false)
|
assert((*offsets)[1], 6, 11, 200, 100, false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@@ -3,7 +3,7 @@ package fzf
|
|||||||
import "sync"
|
import "sync"
|
||||||
|
|
||||||
// queryCache associates strings to lists of items
|
// queryCache associates strings to lists of items
|
||||||
type queryCache map[string][]*Item
|
type queryCache map[string][]*Result
|
||||||
|
|
||||||
// ChunkCache associates Chunk and query string to lists of items
|
// ChunkCache associates Chunk and query string to lists of items
|
||||||
type ChunkCache struct {
|
type ChunkCache struct {
|
||||||
@@ -17,7 +17,7 @@ func NewChunkCache() ChunkCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add adds the list to the cache
|
// Add adds the list to the cache
|
||||||
func (cc *ChunkCache) Add(chunk *Chunk, key string, list []*Item) {
|
func (cc *ChunkCache) Add(chunk *Chunk, key string, list []*Result) {
|
||||||
if len(key) == 0 || !chunk.IsFull() || len(list) > queryCacheMax {
|
if len(key) == 0 || !chunk.IsFull() || len(list) > queryCacheMax {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -34,7 +34,7 @@ func (cc *ChunkCache) Add(chunk *Chunk, key string, list []*Item) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Find is called to lookup ChunkCache
|
// Find is called to lookup ChunkCache
|
||||||
func (cc *ChunkCache) Find(chunk *Chunk, key string) ([]*Item, bool) {
|
func (cc *ChunkCache) Find(chunk *Chunk, key string) ([]*Result, bool) {
|
||||||
if len(key) == 0 || !chunk.IsFull() {
|
if len(key) == 0 || !chunk.IsFull() {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
@@ -7,8 +7,8 @@ func TestChunkCache(t *testing.T) {
|
|||||||
chunk2 := make(Chunk, chunkSize)
|
chunk2 := make(Chunk, chunkSize)
|
||||||
chunk1p := &Chunk{}
|
chunk1p := &Chunk{}
|
||||||
chunk2p := &chunk2
|
chunk2p := &chunk2
|
||||||
items1 := []*Item{&Item{}}
|
items1 := []*Result{&Result{}}
|
||||||
items2 := []*Item{&Item{}, &Item{}}
|
items2 := []*Result{&Result{}, &Result{}}
|
||||||
cache.Add(chunk1p, "foo", items1)
|
cache.Add(chunk1p, "foo", items1)
|
||||||
cache.Add(chunk2p, "foo", items1)
|
cache.Add(chunk2p, "foo", items1)
|
||||||
cache.Add(chunk2p, "bar", items2)
|
cache.Add(chunk2p, "bar", items2)
|
||||||
|
@@ -3,14 +3,16 @@ package fzf
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestChunkList(t *testing.T) {
|
func TestChunkList(t *testing.T) {
|
||||||
// FIXME global
|
// FIXME global
|
||||||
sortCriteria = []criterion{byMatchLen, byLength}
|
sortCriteria = []criterion{byScore, byLength}
|
||||||
|
|
||||||
cl := NewChunkList(func(s []byte, i int) *Item {
|
cl := NewChunkList(func(s []byte, i int) *Item {
|
||||||
return &Item{text: []rune(string(s)), rank: buildEmptyRank(int32(i * 2))}
|
return &Item{text: util.ToChars(s), index: int32(i * 2)}
|
||||||
})
|
})
|
||||||
|
|
||||||
// Snapshot
|
// Snapshot
|
||||||
@@ -39,11 +41,8 @@ func TestChunkList(t *testing.T) {
|
|||||||
if len(*chunk1) != 2 {
|
if len(*chunk1) != 2 {
|
||||||
t.Error("Snapshot should contain only two items")
|
t.Error("Snapshot should contain only two items")
|
||||||
}
|
}
|
||||||
last := func(arr [5]int32) int32 {
|
if (*chunk1)[0].text.ToString() != "hello" || (*chunk1)[0].index != 0 ||
|
||||||
return arr[len(arr)-1]
|
(*chunk1)[1].text.ToString() != "world" || (*chunk1)[1].index != 2 {
|
||||||
}
|
|
||||||
if string((*chunk1)[0].text) != "hello" || last((*chunk1)[0].rank) != 0 ||
|
|
||||||
string((*chunk1)[1].text) != "world" || last((*chunk1)[1].rank) != 2 {
|
|
||||||
t.Error("Invalid data")
|
t.Error("Invalid data")
|
||||||
}
|
}
|
||||||
if chunk1.IsFull() {
|
if chunk1.IsFull() {
|
||||||
|
@@ -8,26 +8,34 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Current version
|
// Current version
|
||||||
version = "0.11.4"
|
version = "0.15.4"
|
||||||
|
|
||||||
// Core
|
// Core
|
||||||
coordinatorDelayMax time.Duration = 100 * time.Millisecond
|
coordinatorDelayMax time.Duration = 100 * time.Millisecond
|
||||||
coordinatorDelayStep time.Duration = 10 * time.Millisecond
|
coordinatorDelayStep time.Duration = 10 * time.Millisecond
|
||||||
|
|
||||||
// Reader
|
// Reader
|
||||||
defaultCommand = `find . -path '*/\.*' -prune -o -type f -print -o -type l -print 2> /dev/null | sed s/^..//`
|
defaultCommand = `find . -path '*/\.*' -prune -o -type f -print -o -type l -print 2> /dev/null | sed s/^..//`
|
||||||
|
readerBufferSize = 64 * 1024
|
||||||
|
|
||||||
// Terminal
|
// Terminal
|
||||||
initialDelay = 20 * time.Millisecond
|
initialDelay = 20 * time.Millisecond
|
||||||
initialDelayTac = 100 * time.Millisecond
|
initialDelayTac = 100 * time.Millisecond
|
||||||
spinnerDuration = 200 * time.Millisecond
|
spinnerDuration = 200 * time.Millisecond
|
||||||
|
maxPatternLength = 100
|
||||||
|
|
||||||
// Matcher
|
// Matcher
|
||||||
progressMinDuration = 200 * time.Millisecond
|
numPartitionsMultiplier = 8
|
||||||
|
maxPartitions = 32
|
||||||
|
progressMinDuration = 200 * time.Millisecond
|
||||||
|
|
||||||
// Capacity of each chunk
|
// Capacity of each chunk
|
||||||
chunkSize int = 100
|
chunkSize int = 100
|
||||||
|
|
||||||
|
// Pre-allocated memory slices to minimize GC
|
||||||
|
slab16Size int = 100 * 1024 // 200KB * 32 = 12.8MB
|
||||||
|
slab32Size int = 2048 // 8KB * 32 = 256KB
|
||||||
|
|
||||||
// Do not cache results of low selectivity queries
|
// Do not cache results of low selectivity queries
|
||||||
queryCacheMax int = chunkSize / 5
|
queryCacheMax int = chunkSize / 5
|
||||||
|
|
||||||
@@ -36,6 +44,9 @@ const (
|
|||||||
|
|
||||||
// History
|
// History
|
||||||
defaultHistoryMax int = 1000
|
defaultHistoryMax int = 1000
|
||||||
|
|
||||||
|
// Jump labels
|
||||||
|
defaultJumpLabels string = "asdfghjklqwertyuiopzxcvbnm1234567890ASDFGHJKLQWERTYUIOPZXCVBNM`~;:,<.>/?'\"!@#$%^&*()[{]}-_=+"
|
||||||
)
|
)
|
||||||
|
|
||||||
// fzf events
|
// fzf events
|
||||||
|
73
src/core.go
73
src/core.go
@@ -28,16 +28,11 @@ package fzf
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/util"
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func initProcs() {
|
|
||||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Reader -> EvtReadFin
|
Reader -> EvtReadFin
|
||||||
Reader -> EvtReadNew -> Matcher (restart)
|
Reader -> EvtReadNew -> Matcher (restart)
|
||||||
@@ -49,8 +44,6 @@ Matcher -> EvtHeader -> Terminal (update header)
|
|||||||
|
|
||||||
// Run starts fzf
|
// Run starts fzf
|
||||||
func Run(opts *Options) {
|
func Run(opts *Options) {
|
||||||
initProcs()
|
|
||||||
|
|
||||||
sort := opts.Sort > 0
|
sort := opts.Sort > 0
|
||||||
sortCriteria = opts.Criteria
|
sortCriteria = opts.Criteria
|
||||||
|
|
||||||
@@ -63,29 +56,29 @@ func Run(opts *Options) {
|
|||||||
eventBox := util.NewEventBox()
|
eventBox := util.NewEventBox()
|
||||||
|
|
||||||
// ANSI code processor
|
// ANSI code processor
|
||||||
ansiProcessor := func(data []byte) ([]rune, []ansiOffset) {
|
ansiProcessor := func(data []byte) (util.Chars, *[]ansiOffset) {
|
||||||
return util.BytesToRunes(data), nil
|
return util.ToChars(data), nil
|
||||||
}
|
}
|
||||||
ansiProcessorRunes := func(data []rune) ([]rune, []ansiOffset) {
|
ansiProcessorRunes := func(data []rune) (util.Chars, *[]ansiOffset) {
|
||||||
return data, nil
|
return util.RunesToChars(data), nil
|
||||||
}
|
}
|
||||||
if opts.Ansi {
|
if opts.Ansi {
|
||||||
if opts.Theme != nil {
|
if opts.Theme != nil {
|
||||||
var state *ansiState
|
var state *ansiState
|
||||||
ansiProcessor = func(data []byte) ([]rune, []ansiOffset) {
|
ansiProcessor = func(data []byte) (util.Chars, *[]ansiOffset) {
|
||||||
trimmed, offsets, newState := extractColor(string(data), state)
|
trimmed, offsets, newState := extractColor(string(data), state, nil)
|
||||||
state = newState
|
state = newState
|
||||||
return []rune(trimmed), offsets
|
return util.RunesToChars([]rune(trimmed)), offsets
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// When color is disabled but ansi option is given,
|
// When color is disabled but ansi option is given,
|
||||||
// we simply strip out ANSI codes from the input
|
// we simply strip out ANSI codes from the input
|
||||||
ansiProcessor = func(data []byte) ([]rune, []ansiOffset) {
|
ansiProcessor = func(data []byte) (util.Chars, *[]ansiOffset) {
|
||||||
trimmed, _, _ := extractColor(string(data), nil)
|
trimmed, _, _ := extractColor(string(data), nil, nil)
|
||||||
return []rune(trimmed), nil
|
return util.RunesToChars([]rune(trimmed)), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ansiProcessorRunes = func(data []rune) ([]rune, []ansiOffset) {
|
ansiProcessorRunes = func(data []rune) (util.Chars, *[]ansiOffset) {
|
||||||
return ansiProcessor([]byte(string(data)))
|
return ansiProcessor([]byte(string(data)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -100,29 +93,28 @@ func Run(opts *Options) {
|
|||||||
eventBox.Set(EvtHeader, header)
|
eventBox.Set(EvtHeader, header)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
runes, colors := ansiProcessor(data)
|
chars, colors := ansiProcessor(data)
|
||||||
return &Item{
|
return &Item{
|
||||||
text: runes,
|
index: int32(index),
|
||||||
colors: colors,
|
text: chars,
|
||||||
rank: buildEmptyRank(int32(index))}
|
colors: colors}
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
chunkList = NewChunkList(func(data []byte, index int) *Item {
|
chunkList = NewChunkList(func(data []byte, index int) *Item {
|
||||||
runes := util.BytesToRunes(data)
|
tokens := Tokenize(util.ToChars(data), opts.Delimiter)
|
||||||
tokens := Tokenize(runes, opts.Delimiter)
|
|
||||||
trans := Transform(tokens, opts.WithNth)
|
trans := Transform(tokens, opts.WithNth)
|
||||||
if len(header) < opts.HeaderLines {
|
if len(header) < opts.HeaderLines {
|
||||||
header = append(header, string(joinTokens(trans)))
|
header = append(header, string(joinTokens(trans)))
|
||||||
eventBox.Set(EvtHeader, header)
|
eventBox.Set(EvtHeader, header)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
textRunes := joinTokens(trans)
|
||||||
item := Item{
|
item := Item{
|
||||||
text: joinTokens(trans),
|
index: int32(index),
|
||||||
origText: &runes,
|
origText: &data,
|
||||||
colors: nil,
|
colors: nil}
|
||||||
rank: buildEmptyRank(int32(index))}
|
|
||||||
|
|
||||||
trimmed, colors := ansiProcessorRunes(item.text)
|
trimmed, colors := ansiProcessorRunes(textRunes)
|
||||||
item.text = trimmed
|
item.text = trimmed
|
||||||
item.colors = colors
|
item.colors = colors
|
||||||
return &item
|
return &item
|
||||||
@@ -151,27 +143,30 @@ func Run(opts *Options) {
|
|||||||
}
|
}
|
||||||
patternBuilder := func(runes []rune) *Pattern {
|
patternBuilder := func(runes []rune) *Pattern {
|
||||||
return BuildPattern(
|
return BuildPattern(
|
||||||
opts.Fuzzy, opts.Extended, opts.Case, forward,
|
opts.Fuzzy, opts.FuzzyAlgo, opts.Extended, opts.Case, forward,
|
||||||
opts.Nth, opts.Delimiter, runes)
|
opts.Filter == nil, opts.Nth, opts.Delimiter, runes)
|
||||||
}
|
}
|
||||||
matcher := NewMatcher(patternBuilder, sort, opts.Tac, eventBox)
|
matcher := NewMatcher(patternBuilder, sort, opts.Tac, eventBox)
|
||||||
|
|
||||||
// Filtering mode
|
// Filtering mode
|
||||||
if opts.Filter != nil {
|
if opts.Filter != nil {
|
||||||
if opts.PrintQuery {
|
if opts.PrintQuery {
|
||||||
fmt.Println(*opts.Filter)
|
opts.Printer(*opts.Filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
pattern := patternBuilder([]rune(*opts.Filter))
|
pattern := patternBuilder([]rune(*opts.Filter))
|
||||||
|
|
||||||
found := false
|
found := false
|
||||||
if streamingFilter {
|
if streamingFilter {
|
||||||
|
slab := util.MakeSlab(slab16Size, slab32Size)
|
||||||
reader := Reader{
|
reader := Reader{
|
||||||
func(runes []byte) bool {
|
func(runes []byte) bool {
|
||||||
item := chunkList.trans(runes, 0)
|
item := chunkList.trans(runes, 0)
|
||||||
if item != nil && pattern.MatchItem(item) {
|
if item != nil {
|
||||||
fmt.Println(string(item.text))
|
if result, _, _ := pattern.MatchItem(item, false, slab); result != nil {
|
||||||
found = true
|
opts.Printer(item.text.ToString())
|
||||||
|
found = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}, eventBox, opts.ReadZero}
|
}, eventBox, opts.ReadZero}
|
||||||
@@ -185,7 +180,7 @@ func Run(opts *Options) {
|
|||||||
chunks: snapshot,
|
chunks: snapshot,
|
||||||
pattern: pattern})
|
pattern: pattern})
|
||||||
for i := 0; i < merger.Length(); i++ {
|
for i := 0; i < merger.Length(); i++ {
|
||||||
fmt.Println(merger.Get(i).AsString(opts.Ansi))
|
opts.Printer(merger.Get(i).item.AsString(opts.Ansi))
|
||||||
found = true
|
found = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -259,13 +254,13 @@ func Run(opts *Options) {
|
|||||||
} else if val.final {
|
} else if val.final {
|
||||||
if opts.Exit0 && count == 0 || opts.Select1 && count == 1 {
|
if opts.Exit0 && count == 0 || opts.Select1 && count == 1 {
|
||||||
if opts.PrintQuery {
|
if opts.PrintQuery {
|
||||||
fmt.Println(opts.Query)
|
opts.Printer(opts.Query)
|
||||||
}
|
}
|
||||||
if len(opts.Expect) > 0 {
|
if len(opts.Expect) > 0 {
|
||||||
fmt.Println()
|
opts.Printer("")
|
||||||
}
|
}
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
fmt.Println(val.Get(i).AsString(opts.Ansi))
|
opts.Printer(val.Get(i).item.AsString(opts.Ansi))
|
||||||
}
|
}
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
os.Exit(exitOk)
|
os.Exit(exitOk)
|
||||||
|
@@ -23,6 +23,16 @@ import (
|
|||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
Bold = C.A_BOLD
|
||||||
|
Dim = C.A_DIM
|
||||||
|
Blink = C.A_BLINK
|
||||||
|
Reverse = C.A_REVERSE
|
||||||
|
Underline = C.A_UNDERLINE
|
||||||
|
)
|
||||||
|
|
||||||
|
type Attr C.int
|
||||||
|
|
||||||
// Types of user action
|
// Types of user action
|
||||||
const (
|
const (
|
||||||
Rune = iota
|
Rune = iota
|
||||||
@@ -80,7 +90,16 @@ const (
|
|||||||
F2
|
F2
|
||||||
F3
|
F3
|
||||||
F4
|
F4
|
||||||
|
F5
|
||||||
|
F6
|
||||||
|
F7
|
||||||
|
F8
|
||||||
|
F9
|
||||||
|
F10
|
||||||
|
|
||||||
|
AltEnter
|
||||||
|
AltSpace
|
||||||
|
AltSlash
|
||||||
AltBS
|
AltBS
|
||||||
AltA
|
AltA
|
||||||
AltB
|
AltB
|
||||||
@@ -104,11 +123,14 @@ const (
|
|||||||
ColCursor
|
ColCursor
|
||||||
ColSelected
|
ColSelected
|
||||||
ColHeader
|
ColHeader
|
||||||
ColUser
|
ColBorder
|
||||||
|
ColUser // Should be the last entry
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
doubleClickDuration = 500 * time.Millisecond
|
doubleClickDuration = 500 * time.Millisecond
|
||||||
|
colDefault = -1
|
||||||
|
colUndefined = -2
|
||||||
)
|
)
|
||||||
|
|
||||||
type ColorTheme struct {
|
type ColorTheme struct {
|
||||||
@@ -125,6 +147,7 @@ type ColorTheme struct {
|
|||||||
Cursor int16
|
Cursor int16
|
||||||
Selected int16
|
Selected int16
|
||||||
Header int16
|
Header int16
|
||||||
|
Border int16
|
||||||
}
|
}
|
||||||
|
|
||||||
type Event struct {
|
type Event struct {
|
||||||
@@ -145,7 +168,7 @@ type MouseEvent struct {
|
|||||||
var (
|
var (
|
||||||
_buf []byte
|
_buf []byte
|
||||||
_in *os.File
|
_in *os.File
|
||||||
_color func(int, bool) C.int
|
_color func(int, Attr) C.int
|
||||||
_colorMap map[int]int
|
_colorMap map[int]int
|
||||||
_prevDownTime time.Time
|
_prevDownTime time.Time
|
||||||
_clickY []int
|
_clickY []int
|
||||||
@@ -159,6 +182,49 @@ var (
|
|||||||
DarkBG int
|
DarkBG int
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Window struct {
|
||||||
|
win *C.WINDOW
|
||||||
|
Top int
|
||||||
|
Left int
|
||||||
|
Width int
|
||||||
|
Height int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWindow(top int, left int, width int, height int, border bool) *Window {
|
||||||
|
win := C.newwin(C.int(height), C.int(width), C.int(top), C.int(left))
|
||||||
|
if border {
|
||||||
|
attr := _color(ColBorder, 0)
|
||||||
|
C.wattron(win, attr)
|
||||||
|
C.box(win, 0, 0)
|
||||||
|
C.wattroff(win, attr)
|
||||||
|
}
|
||||||
|
return &Window{
|
||||||
|
win: win,
|
||||||
|
Top: top,
|
||||||
|
Left: left,
|
||||||
|
Width: width,
|
||||||
|
Height: height,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func EmptyTheme() *ColorTheme {
|
||||||
|
return &ColorTheme{
|
||||||
|
UseDefault: true,
|
||||||
|
Fg: colUndefined,
|
||||||
|
Bg: colUndefined,
|
||||||
|
DarkBg: colUndefined,
|
||||||
|
Prompt: colUndefined,
|
||||||
|
Match: colUndefined,
|
||||||
|
Current: colUndefined,
|
||||||
|
CurrentMatch: colUndefined,
|
||||||
|
Spinner: colUndefined,
|
||||||
|
Info: colUndefined,
|
||||||
|
Cursor: colUndefined,
|
||||||
|
Selected: colUndefined,
|
||||||
|
Header: colUndefined,
|
||||||
|
Border: colUndefined}
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
_prevDownTime = time.Unix(0, 0)
|
_prevDownTime = time.Unix(0, 0)
|
||||||
_clickY = []int{}
|
_clickY = []int{}
|
||||||
@@ -176,7 +242,8 @@ func init() {
|
|||||||
Info: C.COLOR_WHITE,
|
Info: C.COLOR_WHITE,
|
||||||
Cursor: C.COLOR_RED,
|
Cursor: C.COLOR_RED,
|
||||||
Selected: C.COLOR_MAGENTA,
|
Selected: C.COLOR_MAGENTA,
|
||||||
Header: C.COLOR_CYAN}
|
Header: C.COLOR_CYAN,
|
||||||
|
Border: C.COLOR_BLACK}
|
||||||
Dark256 = &ColorTheme{
|
Dark256 = &ColorTheme{
|
||||||
UseDefault: true,
|
UseDefault: true,
|
||||||
Fg: 15,
|
Fg: 15,
|
||||||
@@ -190,7 +257,8 @@ func init() {
|
|||||||
Info: 144,
|
Info: 144,
|
||||||
Cursor: 161,
|
Cursor: 161,
|
||||||
Selected: 168,
|
Selected: 168,
|
||||||
Header: 109}
|
Header: 109,
|
||||||
|
Border: 59}
|
||||||
Light256 = &ColorTheme{
|
Light256 = &ColorTheme{
|
||||||
UseDefault: true,
|
UseDefault: true,
|
||||||
Fg: 15,
|
Fg: 15,
|
||||||
@@ -204,25 +272,23 @@ func init() {
|
|||||||
Info: 101,
|
Info: 101,
|
||||||
Cursor: 161,
|
Cursor: 161,
|
||||||
Selected: 168,
|
Selected: 168,
|
||||||
Header: 31}
|
Header: 31,
|
||||||
|
Border: 145}
|
||||||
}
|
}
|
||||||
|
|
||||||
func attrColored(pair int, bold bool) C.int {
|
func attrColored(pair int, a Attr) C.int {
|
||||||
var attr C.int
|
var attr C.int
|
||||||
if pair > ColNormal {
|
if pair > ColNormal {
|
||||||
attr = C.COLOR_PAIR(C.int(pair))
|
attr = C.COLOR_PAIR(C.int(pair))
|
||||||
}
|
}
|
||||||
if bold {
|
return attr | C.int(a)
|
||||||
attr = attr | C.A_BOLD
|
|
||||||
}
|
|
||||||
return attr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func attrMono(pair int, bold bool) C.int {
|
func attrMono(pair int, a Attr) C.int {
|
||||||
var attr C.int
|
var attr C.int
|
||||||
switch pair {
|
switch pair {
|
||||||
case ColCurrent:
|
case ColCurrent:
|
||||||
if bold {
|
if a&C.A_BOLD == C.A_BOLD {
|
||||||
attr = C.A_REVERSE
|
attr = C.A_REVERSE
|
||||||
}
|
}
|
||||||
case ColMatch:
|
case ColMatch:
|
||||||
@@ -230,7 +296,7 @@ func attrMono(pair int, bold bool) C.int {
|
|||||||
case ColCurrentMatch:
|
case ColCurrentMatch:
|
||||||
attr = C.A_UNDERLINE | C.A_REVERSE
|
attr = C.A_UNDERLINE | C.A_REVERSE
|
||||||
}
|
}
|
||||||
if bold {
|
if a&C.A_BOLD == C.A_BOLD {
|
||||||
attr = attr | C.A_BOLD
|
attr = attr | C.A_BOLD
|
||||||
}
|
}
|
||||||
return attr
|
return attr
|
||||||
@@ -280,44 +346,59 @@ func Init(theme *ColorTheme, black bool, mouse bool) {
|
|||||||
|
|
||||||
if theme != nil {
|
if theme != nil {
|
||||||
C.start_color()
|
C.start_color()
|
||||||
initPairs(theme, black)
|
var baseTheme *ColorTheme
|
||||||
|
if C.tigetnum(C.CString("colors")) >= 256 {
|
||||||
|
baseTheme = Dark256
|
||||||
|
} else {
|
||||||
|
baseTheme = Default16
|
||||||
|
}
|
||||||
|
initPairs(baseTheme, theme, black)
|
||||||
_color = attrColored
|
_color = attrColored
|
||||||
} else {
|
} else {
|
||||||
_color = attrMono
|
_color = attrMono
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func initPairs(theme *ColorTheme, black bool) {
|
func override(a int16, b int16) C.short {
|
||||||
fg := C.short(theme.Fg)
|
if b == colUndefined {
|
||||||
bg := C.short(theme.Bg)
|
return C.short(a)
|
||||||
|
}
|
||||||
|
return C.short(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func initPairs(baseTheme *ColorTheme, theme *ColorTheme, black bool) {
|
||||||
|
fg := override(baseTheme.Fg, theme.Fg)
|
||||||
|
bg := override(baseTheme.Bg, theme.Bg)
|
||||||
if black {
|
if black {
|
||||||
bg = C.COLOR_BLACK
|
bg = C.COLOR_BLACK
|
||||||
} else if theme.UseDefault {
|
} else if theme.UseDefault {
|
||||||
fg = -1
|
fg = colDefault
|
||||||
bg = -1
|
bg = colDefault
|
||||||
C.use_default_colors()
|
C.use_default_colors()
|
||||||
}
|
}
|
||||||
if theme.UseDefault {
|
if theme.UseDefault {
|
||||||
FG = -1
|
FG = colDefault
|
||||||
BG = -1
|
BG = colDefault
|
||||||
} else {
|
} else {
|
||||||
FG = int(fg)
|
FG = int(fg)
|
||||||
BG = int(bg)
|
BG = int(bg)
|
||||||
C.assume_default_colors(C.int(theme.Fg), C.int(bg))
|
C.assume_default_colors(C.int(override(baseTheme.Fg, theme.Fg)), C.int(bg))
|
||||||
}
|
}
|
||||||
|
|
||||||
CurrentFG = int(theme.Current)
|
currentFG := override(baseTheme.Current, theme.Current)
|
||||||
DarkBG = int(theme.DarkBg)
|
darkBG := override(baseTheme.DarkBg, theme.DarkBg)
|
||||||
darkBG := C.short(DarkBG)
|
CurrentFG = int(currentFG)
|
||||||
C.init_pair(ColPrompt, C.short(theme.Prompt), bg)
|
DarkBG = int(darkBG)
|
||||||
C.init_pair(ColMatch, C.short(theme.Match), bg)
|
C.init_pair(ColPrompt, override(baseTheme.Prompt, theme.Prompt), bg)
|
||||||
C.init_pair(ColCurrent, C.short(theme.Current), darkBG)
|
C.init_pair(ColMatch, override(baseTheme.Match, theme.Match), bg)
|
||||||
C.init_pair(ColCurrentMatch, C.short(theme.CurrentMatch), darkBG)
|
C.init_pair(ColCurrent, currentFG, darkBG)
|
||||||
C.init_pair(ColSpinner, C.short(theme.Spinner), bg)
|
C.init_pair(ColCurrentMatch, override(baseTheme.CurrentMatch, theme.CurrentMatch), darkBG)
|
||||||
C.init_pair(ColInfo, C.short(theme.Info), bg)
|
C.init_pair(ColSpinner, override(baseTheme.Spinner, theme.Spinner), bg)
|
||||||
C.init_pair(ColCursor, C.short(theme.Cursor), darkBG)
|
C.init_pair(ColInfo, override(baseTheme.Info, theme.Info), bg)
|
||||||
C.init_pair(ColSelected, C.short(theme.Selected), darkBG)
|
C.init_pair(ColCursor, override(baseTheme.Cursor, theme.Cursor), darkBG)
|
||||||
C.init_pair(ColHeader, C.short(theme.Header), bg)
|
C.init_pair(ColSelected, override(baseTheme.Selected, theme.Selected), darkBG)
|
||||||
|
C.init_pair(ColHeader, override(baseTheme.Header, theme.Header), bg)
|
||||||
|
C.init_pair(ColBorder, override(baseTheme.Border, theme.Border), bg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Close() {
|
func Close() {
|
||||||
@@ -373,7 +454,9 @@ func mouseSequence(sz *int) Event {
|
|||||||
97, 101, 105, 113: // scroll-down / shift / cmd / ctrl
|
97, 101, 105, 113: // scroll-down / shift / cmd / ctrl
|
||||||
mod := _buf[3] >= 100
|
mod := _buf[3] >= 100
|
||||||
s := 1 - int(_buf[3]%2)*2
|
s := 1 - int(_buf[3]%2)*2
|
||||||
return Event{Mouse, 0, &MouseEvent{0, 0, s, false, false, mod}}
|
x := int(_buf[4] - 33)
|
||||||
|
y := int(_buf[5] - 33)
|
||||||
|
return Event{Mouse, 0, &MouseEvent{y, x, s, false, false, mod}}
|
||||||
}
|
}
|
||||||
return Event{Invalid, 0, nil}
|
return Event{Invalid, 0, nil}
|
||||||
}
|
}
|
||||||
@@ -384,6 +467,12 @@ func escSequence(sz *int) Event {
|
|||||||
}
|
}
|
||||||
*sz = 2
|
*sz = 2
|
||||||
switch _buf[1] {
|
switch _buf[1] {
|
||||||
|
case 13:
|
||||||
|
return Event{AltEnter, 0, nil}
|
||||||
|
case 32:
|
||||||
|
return Event{AltSpace, 0, nil}
|
||||||
|
case 47:
|
||||||
|
return Event{AltSlash, 0, nil}
|
||||||
case 98:
|
case 98:
|
||||||
return Event{AltB, 0, nil}
|
return Event{AltB, 0, nil}
|
||||||
case 100:
|
case 100:
|
||||||
@@ -429,6 +518,20 @@ func escSequence(sz *int) Event {
|
|||||||
*sz = 4
|
*sz = 4
|
||||||
switch _buf[2] {
|
switch _buf[2] {
|
||||||
case 50:
|
case 50:
|
||||||
|
if len(_buf) == 5 && _buf[4] == 126 {
|
||||||
|
*sz = 5
|
||||||
|
switch _buf[3] {
|
||||||
|
case 48:
|
||||||
|
return Event{F9, 0, nil}
|
||||||
|
case 49:
|
||||||
|
return Event{F10, 0, nil}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Bracketed paste mode \e[200~ / \e[201
|
||||||
|
if _buf[3] == 48 && (_buf[4] == 48 || _buf[4] == 49) && _buf[5] == 126 {
|
||||||
|
*sz = 6
|
||||||
|
return Event{Invalid, 0, nil}
|
||||||
|
}
|
||||||
return Event{Invalid, 0, nil} // INS
|
return Event{Invalid, 0, nil} // INS
|
||||||
case 51:
|
case 51:
|
||||||
return Event{Del, 0, nil}
|
return Event{Del, 0, nil}
|
||||||
@@ -442,6 +545,21 @@ func escSequence(sz *int) Event {
|
|||||||
switch _buf[3] {
|
switch _buf[3] {
|
||||||
case 126:
|
case 126:
|
||||||
return Event{Home, 0, nil}
|
return Event{Home, 0, nil}
|
||||||
|
case 53, 55, 56, 57:
|
||||||
|
if len(_buf) == 5 && _buf[4] == 126 {
|
||||||
|
*sz = 5
|
||||||
|
switch _buf[3] {
|
||||||
|
case 53:
|
||||||
|
return Event{F5, 0, nil}
|
||||||
|
case 55:
|
||||||
|
return Event{F6, 0, nil}
|
||||||
|
case 56:
|
||||||
|
return Event{F7, 0, nil}
|
||||||
|
case 57:
|
||||||
|
return Event{F8, 0, nil}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Event{Invalid, 0, nil}
|
||||||
case 59:
|
case 59:
|
||||||
if len(_buf) != 6 {
|
if len(_buf) != 6 {
|
||||||
return Event{Invalid, 0, nil}
|
return Event{Invalid, 0, nil}
|
||||||
@@ -511,17 +629,25 @@ func GetChar() Event {
|
|||||||
return Event{Rune, r, nil}
|
return Event{Rune, r, nil}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Move(y int, x int) {
|
func (w *Window) Close() {
|
||||||
C.move(C.int(y), C.int(x))
|
C.delwin(w.win)
|
||||||
}
|
}
|
||||||
|
|
||||||
func MoveAndClear(y int, x int) {
|
func (w *Window) Enclose(y int, x int) bool {
|
||||||
Move(y, x)
|
return bool(C.wenclose(w.win, C.int(y), C.int(x)))
|
||||||
C.clrtoeol()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Print(text string) {
|
func (w *Window) Move(y int, x int) {
|
||||||
C.addstr(C.CString(strings.Map(func(r rune) rune {
|
C.wmove(w.win, C.int(y), C.int(x))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Window) MoveAndClear(y int, x int) {
|
||||||
|
w.Move(y, x)
|
||||||
|
C.wclrtoeol(w.win)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Window) Print(text string) {
|
||||||
|
C.waddstr(w.win, C.CString(strings.Map(func(r rune) rune {
|
||||||
if r < 32 {
|
if r < 32 {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
@@ -529,11 +655,11 @@ func Print(text string) {
|
|||||||
}, text)))
|
}, text)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func CPrint(pair int, bold bool, text string) {
|
func (w *Window) CPrint(pair int, a Attr, text string) {
|
||||||
attr := _color(pair, bold)
|
attr := _color(pair, a)
|
||||||
C.attron(attr)
|
C.wattron(w.win, attr)
|
||||||
Print(text)
|
w.Print(text)
|
||||||
C.attroff(attr)
|
C.wattroff(w.win, attr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Clear() {
|
func Clear() {
|
||||||
@@ -548,6 +674,30 @@ func Refresh() {
|
|||||||
C.refresh()
|
C.refresh()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *Window) Erase() {
|
||||||
|
C.werase(w.win)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Window) Fill(str string) bool {
|
||||||
|
return C.waddstr(w.win, C.CString(str)) == C.OK
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Window) CFill(str string, fg int, bg int, a Attr) bool {
|
||||||
|
attr := _color(PairFor(fg, bg), a)
|
||||||
|
C.wattron(w.win, attr)
|
||||||
|
ret := w.Fill(str)
|
||||||
|
C.wattroff(w.win, attr)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Window) Refresh() {
|
||||||
|
C.wnoutrefresh(w.win)
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoUpdate() {
|
||||||
|
C.doupdate()
|
||||||
|
}
|
||||||
|
|
||||||
func PairFor(fg int, bg int) int {
|
func PairFor(fg int, bg int) int {
|
||||||
key := (fg << 8) + bg
|
key := (fg << 8) + bg
|
||||||
if found, prs := _colorMap[key]; prs {
|
if found, prs := _colorMap[key]; prs {
|
||||||
|
281
src/item.go
281
src/item.go
@@ -1,288 +1,39 @@
|
|||||||
package fzf
|
package fzf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"github.com/junegunn/fzf/src/util"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/curses"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Offset holds three 32-bit integers denoting the offsets of a matched substring
|
|
||||||
type Offset [3]int32
|
|
||||||
|
|
||||||
type colorOffset struct {
|
|
||||||
offset [2]int32
|
|
||||||
color int
|
|
||||||
bold bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Item represents each input line
|
// Item represents each input line
|
||||||
type Item struct {
|
type Item struct {
|
||||||
text []rune
|
index int32
|
||||||
origText *[]rune
|
text util.Chars
|
||||||
|
origText *[]byte
|
||||||
|
colors *[]ansiOffset
|
||||||
transformed []Token
|
transformed []Token
|
||||||
offsets []Offset
|
|
||||||
colors []ansiOffset
|
|
||||||
rank [5]int32
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort criteria to use. Never changes once fzf is started.
|
|
||||||
var sortCriteria []criterion
|
|
||||||
|
|
||||||
func isRankValid(rank [5]int32) bool {
|
|
||||||
// Exclude ordinal index
|
|
||||||
for _, r := range rank[:4] {
|
|
||||||
if r > 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildEmptyRank(index int32) [5]int32 {
|
|
||||||
return [5]int32{0, 0, 0, 0, index}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Index returns ordinal index of the Item
|
||||||
func (item *Item) Index() int32 {
|
func (item *Item) Index() int32 {
|
||||||
return item.rank[4]
|
return item.index
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rank calculates rank of the Item
|
// Colors returns ansiOffsets of the Item
|
||||||
func (item *Item) Rank(cache bool) [5]int32 {
|
func (item *Item) Colors() []ansiOffset {
|
||||||
if cache && isRankValid(item.rank) {
|
if item.colors == nil {
|
||||||
return item.rank
|
return []ansiOffset{}
|
||||||
}
|
}
|
||||||
matchlen := 0
|
return *item.colors
|
||||||
prevEnd := 0
|
|
||||||
lenSum := 0
|
|
||||||
minBegin := math.MaxInt32
|
|
||||||
for _, offset := range item.offsets {
|
|
||||||
begin := int(offset[0])
|
|
||||||
end := int(offset[1])
|
|
||||||
trimLen := int(offset[2])
|
|
||||||
lenSum += trimLen
|
|
||||||
if prevEnd > begin {
|
|
||||||
begin = prevEnd
|
|
||||||
}
|
|
||||||
if end > prevEnd {
|
|
||||||
prevEnd = end
|
|
||||||
}
|
|
||||||
if end > begin {
|
|
||||||
if begin < minBegin {
|
|
||||||
minBegin = begin
|
|
||||||
}
|
|
||||||
matchlen += end - begin
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if matchlen == 0 {
|
|
||||||
matchlen = math.MaxInt32
|
|
||||||
}
|
|
||||||
rank := buildEmptyRank(item.Index())
|
|
||||||
for idx, criterion := range sortCriteria {
|
|
||||||
var val int32
|
|
||||||
switch criterion {
|
|
||||||
case byMatchLen:
|
|
||||||
val = int32(matchlen)
|
|
||||||
case byLength:
|
|
||||||
// It is guaranteed that .transformed in not null in normal execution
|
|
||||||
if item.transformed != nil {
|
|
||||||
// If offsets is empty, lenSum will be 0, but we don't care
|
|
||||||
val = int32(lenSum)
|
|
||||||
} else {
|
|
||||||
val = int32(len(item.text))
|
|
||||||
}
|
|
||||||
case byBegin:
|
|
||||||
// We can't just look at item.offsets[0][0] because it can be an inverse term
|
|
||||||
whitePrefixLen := 0
|
|
||||||
for idx, r := range item.text {
|
|
||||||
whitePrefixLen = idx
|
|
||||||
if idx == minBegin || r != ' ' && r != '\t' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
val = int32(minBegin - whitePrefixLen)
|
|
||||||
case byEnd:
|
|
||||||
if prevEnd > 0 {
|
|
||||||
val = int32(1 + len(item.text) - prevEnd)
|
|
||||||
} else {
|
|
||||||
// Empty offsets due to inverse terms.
|
|
||||||
val = 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rank[idx] = val
|
|
||||||
}
|
|
||||||
if cache {
|
|
||||||
item.rank = rank
|
|
||||||
}
|
|
||||||
return rank
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsString returns the original string
|
// AsString returns the original string
|
||||||
func (item *Item) AsString(stripAnsi bool) string {
|
func (item *Item) AsString(stripAnsi bool) string {
|
||||||
return *item.StringPtr(stripAnsi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringPtr returns the pointer to the original string
|
|
||||||
func (item *Item) StringPtr(stripAnsi bool) *string {
|
|
||||||
if item.origText != nil {
|
if item.origText != nil {
|
||||||
if stripAnsi {
|
if stripAnsi {
|
||||||
trimmed, _, _ := extractColor(string(*item.origText), nil)
|
trimmed, _, _ := extractColor(string(*item.origText), nil, nil)
|
||||||
return &trimmed
|
return trimmed
|
||||||
}
|
}
|
||||||
orig := string(*item.origText)
|
return string(*item.origText)
|
||||||
return &orig
|
|
||||||
}
|
}
|
||||||
str := string(item.text)
|
return item.text.ToString()
|
||||||
return &str
|
|
||||||
}
|
|
||||||
|
|
||||||
func (item *Item) colorOffsets(color int, bold bool, current bool) []colorOffset {
|
|
||||||
if len(item.colors) == 0 {
|
|
||||||
var offsets []colorOffset
|
|
||||||
for _, off := range item.offsets {
|
|
||||||
|
|
||||||
offsets = append(offsets, colorOffset{offset: [2]int32{off[0], off[1]}, color: color, bold: bold})
|
|
||||||
}
|
|
||||||
return offsets
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find max column
|
|
||||||
var maxCol int32
|
|
||||||
for _, off := range item.offsets {
|
|
||||||
if off[1] > maxCol {
|
|
||||||
maxCol = off[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, ansi := range item.colors {
|
|
||||||
if ansi.offset[1] > maxCol {
|
|
||||||
maxCol = ansi.offset[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cols := make([]int, maxCol)
|
|
||||||
|
|
||||||
for colorIndex, ansi := range item.colors {
|
|
||||||
for i := ansi.offset[0]; i < ansi.offset[1]; i++ {
|
|
||||||
cols[i] = colorIndex + 1 // XXX
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, off := range item.offsets {
|
|
||||||
for i := off[0]; i < off[1]; i++ {
|
|
||||||
cols[i] = -1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sort.Sort(ByOrder(offsets))
|
|
||||||
|
|
||||||
// Merge offsets
|
|
||||||
// ------------ ---- -- ----
|
|
||||||
// ++++++++ ++++++++++
|
|
||||||
// --++++++++-- --++++++++++---
|
|
||||||
curr := 0
|
|
||||||
start := 0
|
|
||||||
var offsets []colorOffset
|
|
||||||
add := func(idx int) {
|
|
||||||
if curr != 0 && idx > start {
|
|
||||||
if curr == -1 {
|
|
||||||
offsets = append(offsets, colorOffset{
|
|
||||||
offset: [2]int32{int32(start), int32(idx)}, color: color, bold: bold})
|
|
||||||
} else {
|
|
||||||
ansi := item.colors[curr-1]
|
|
||||||
fg := ansi.color.fg
|
|
||||||
if fg == -1 {
|
|
||||||
if current {
|
|
||||||
fg = curses.CurrentFG
|
|
||||||
} else {
|
|
||||||
fg = curses.FG
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bg := ansi.color.bg
|
|
||||||
if bg == -1 {
|
|
||||||
if current {
|
|
||||||
bg = curses.DarkBG
|
|
||||||
} else {
|
|
||||||
bg = curses.BG
|
|
||||||
}
|
|
||||||
}
|
|
||||||
offsets = append(offsets, colorOffset{
|
|
||||||
offset: [2]int32{int32(start), int32(idx)},
|
|
||||||
color: curses.PairFor(fg, bg),
|
|
||||||
bold: ansi.color.bold || bold})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for idx, col := range cols {
|
|
||||||
if col != curr {
|
|
||||||
add(idx)
|
|
||||||
start = idx
|
|
||||||
curr = col
|
|
||||||
}
|
|
||||||
}
|
|
||||||
add(int(maxCol))
|
|
||||||
return offsets
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByOrder is for sorting substring offsets
|
|
||||||
type ByOrder []Offset
|
|
||||||
|
|
||||||
func (a ByOrder) Len() int {
|
|
||||||
return len(a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ByOrder) Swap(i, j int) {
|
|
||||||
a[i], a[j] = a[j], a[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ByOrder) Less(i, j int) bool {
|
|
||||||
ioff := a[i]
|
|
||||||
joff := a[j]
|
|
||||||
return (ioff[0] < joff[0]) || (ioff[0] == joff[0]) && (ioff[1] <= joff[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByRelevance is for sorting Items
|
|
||||||
type ByRelevance []*Item
|
|
||||||
|
|
||||||
func (a ByRelevance) Len() int {
|
|
||||||
return len(a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ByRelevance) Swap(i, j int) {
|
|
||||||
a[i], a[j] = a[j], a[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ByRelevance) Less(i, j int) bool {
|
|
||||||
irank := a[i].Rank(true)
|
|
||||||
jrank := a[j].Rank(true)
|
|
||||||
|
|
||||||
return compareRanks(irank, jrank, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByRelevanceTac is for sorting Items
|
|
||||||
type ByRelevanceTac []*Item
|
|
||||||
|
|
||||||
func (a ByRelevanceTac) Len() int {
|
|
||||||
return len(a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ByRelevanceTac) Swap(i, j int) {
|
|
||||||
a[i], a[j] = a[j], a[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ByRelevanceTac) Less(i, j int) bool {
|
|
||||||
irank := a[i].Rank(true)
|
|
||||||
jrank := a[j].Rank(true)
|
|
||||||
|
|
||||||
return compareRanks(irank, jrank, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func compareRanks(irank [5]int32, jrank [5]int32, tac bool) bool {
|
|
||||||
for idx := 0; idx < 4; idx++ {
|
|
||||||
left := irank[idx]
|
|
||||||
right := jrank[idx]
|
|
||||||
if left < right {
|
|
||||||
return true
|
|
||||||
} else if left > right {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return (irank[4] <= jrank[4]) != tac
|
|
||||||
}
|
}
|
||||||
|
113
src/item_test.go
113
src/item_test.go
@@ -1,108 +1,23 @@
|
|||||||
package fzf
|
package fzf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/curses"
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestOffsetSort(t *testing.T) {
|
func TestStringPtr(t *testing.T) {
|
||||||
offsets := []Offset{
|
orig := []byte("\x1b[34mfoo")
|
||||||
Offset{3, 5}, Offset{2, 7},
|
text := []byte("\x1b[34mbar")
|
||||||
Offset{1, 3}, Offset{2, 9}}
|
item := Item{origText: &orig, text: util.ToChars(text)}
|
||||||
sort.Sort(ByOrder(offsets))
|
if item.AsString(true) != "foo" || item.AsString(false) != string(orig) {
|
||||||
|
t.Fail()
|
||||||
if offsets[0][0] != 1 || offsets[0][1] != 3 ||
|
}
|
||||||
offsets[1][0] != 2 || offsets[1][1] != 7 ||
|
if item.AsString(true) != "foo" {
|
||||||
offsets[2][0] != 2 || offsets[2][1] != 9 ||
|
t.Fail()
|
||||||
offsets[3][0] != 3 || offsets[3][1] != 5 {
|
}
|
||||||
t.Error("Invalid order:", offsets)
|
item.origText = nil
|
||||||
|
if item.AsString(true) != string(text) || item.AsString(false) != string(text) {
|
||||||
|
t.Fail()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRankComparison(t *testing.T) {
|
|
||||||
if compareRanks([5]int32{3, 0, 0, 0, 5}, [5]int32{2, 0, 0, 0, 7}, false) ||
|
|
||||||
!compareRanks([5]int32{3, 0, 0, 0, 5}, [5]int32{3, 0, 0, 0, 6}, false) ||
|
|
||||||
!compareRanks([5]int32{1, 2, 0, 0, 3}, [5]int32{1, 3, 0, 0, 2}, false) ||
|
|
||||||
!compareRanks([5]int32{0, 0, 0, 0, 0}, [5]int32{0, 0, 0, 0, 0}, false) {
|
|
||||||
t.Error("Invalid order")
|
|
||||||
}
|
|
||||||
|
|
||||||
if compareRanks([5]int32{3, 0, 0, 0, 5}, [5]int32{2, 0, 0, 0, 7}, true) ||
|
|
||||||
!compareRanks([5]int32{3, 0, 0, 0, 5}, [5]int32{3, 0, 0, 0, 6}, false) ||
|
|
||||||
!compareRanks([5]int32{1, 2, 0, 0, 3}, [5]int32{1, 3, 0, 0, 2}, true) ||
|
|
||||||
!compareRanks([5]int32{0, 0, 0, 0, 0}, [5]int32{0, 0, 0, 0, 0}, false) {
|
|
||||||
t.Error("Invalid order (tac)")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match length, string length, index
|
|
||||||
func TestItemRank(t *testing.T) {
|
|
||||||
// FIXME global
|
|
||||||
sortCriteria = []criterion{byMatchLen, byLength}
|
|
||||||
|
|
||||||
strs := [][]rune{[]rune("foo"), []rune("foobar"), []rune("bar"), []rune("baz")}
|
|
||||||
item1 := Item{text: strs[0], offsets: []Offset{}, rank: [5]int32{0, 0, 0, 0, 1}}
|
|
||||||
rank1 := item1.Rank(true)
|
|
||||||
if rank1[0] != math.MaxInt32 || rank1[1] != 3 || rank1[4] != 1 {
|
|
||||||
t.Error(item1.Rank(true))
|
|
||||||
}
|
|
||||||
// Only differ in index
|
|
||||||
item2 := Item{text: strs[0], offsets: []Offset{}}
|
|
||||||
|
|
||||||
items := []*Item{&item1, &item2}
|
|
||||||
sort.Sort(ByRelevance(items))
|
|
||||||
if items[0] != &item2 || items[1] != &item1 {
|
|
||||||
t.Error(items)
|
|
||||||
}
|
|
||||||
|
|
||||||
items = []*Item{&item2, &item1, &item1, &item2}
|
|
||||||
sort.Sort(ByRelevance(items))
|
|
||||||
if items[0] != &item2 || items[1] != &item2 ||
|
|
||||||
items[2] != &item1 || items[3] != &item1 {
|
|
||||||
t.Error(items)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort by relevance
|
|
||||||
item3 := Item{text: strs[1], rank: [5]int32{0, 0, 0, 0, 2}, offsets: []Offset{Offset{1, 3}, Offset{5, 7}}}
|
|
||||||
item4 := Item{text: strs[1], rank: [5]int32{0, 0, 0, 0, 2}, offsets: []Offset{Offset{1, 2}, Offset{6, 7}}}
|
|
||||||
item5 := Item{text: strs[2], rank: [5]int32{0, 0, 0, 0, 2}, offsets: []Offset{Offset{1, 3}, Offset{5, 7}}}
|
|
||||||
item6 := Item{text: strs[2], rank: [5]int32{0, 0, 0, 0, 2}, offsets: []Offset{Offset{1, 2}, Offset{6, 7}}}
|
|
||||||
items = []*Item{&item1, &item2, &item3, &item4, &item5, &item6}
|
|
||||||
sort.Sort(ByRelevance(items))
|
|
||||||
if items[0] != &item6 || items[1] != &item4 ||
|
|
||||||
items[2] != &item5 || items[3] != &item3 ||
|
|
||||||
items[4] != &item2 || items[5] != &item1 {
|
|
||||||
t.Error(items)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestColorOffset(t *testing.T) {
|
|
||||||
// ------------ 20 ---- -- ----
|
|
||||||
// ++++++++ ++++++++++
|
|
||||||
// --++++++++-- --++++++++++---
|
|
||||||
item := Item{
|
|
||||||
offsets: []Offset{Offset{5, 15}, Offset{25, 35}},
|
|
||||||
colors: []ansiOffset{
|
|
||||||
ansiOffset{[2]int32{0, 20}, ansiState{1, 5, false}},
|
|
||||||
ansiOffset{[2]int32{22, 27}, ansiState{2, 6, true}},
|
|
||||||
ansiOffset{[2]int32{30, 32}, ansiState{3, 7, false}},
|
|
||||||
ansiOffset{[2]int32{33, 40}, ansiState{4, 8, true}}}}
|
|
||||||
// [{[0 5] 9 false} {[5 15] 99 false} {[15 20] 9 false} {[22 25] 10 true} {[25 35] 99 false} {[35 40] 11 true}]
|
|
||||||
|
|
||||||
offsets := item.colorOffsets(99, false, true)
|
|
||||||
assert := func(idx int, b int32, e int32, c int, bold bool) {
|
|
||||||
o := offsets[idx]
|
|
||||||
if o.offset[0] != b || o.offset[1] != e || o.color != c || o.bold != bold {
|
|
||||||
t.Error(o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(0, 0, 5, curses.ColUser, false)
|
|
||||||
assert(1, 5, 15, 99, false)
|
|
||||||
assert(2, 15, 20, curses.ColUser, false)
|
|
||||||
assert(3, 22, 25, curses.ColUser+1, true)
|
|
||||||
assert(4, 25, 35, 99, false)
|
|
||||||
assert(5, 35, 40, curses.ColUser+2, true)
|
|
||||||
}
|
|
||||||
|
@@ -26,6 +26,7 @@ type Matcher struct {
|
|||||||
eventBox *util.EventBox
|
eventBox *util.EventBox
|
||||||
reqBox *util.EventBox
|
reqBox *util.EventBox
|
||||||
partitions int
|
partitions int
|
||||||
|
slab []*util.Slab
|
||||||
mergerCache map[string]*Merger
|
mergerCache map[string]*Merger
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -37,13 +38,15 @@ const (
|
|||||||
// NewMatcher returns a new Matcher
|
// NewMatcher returns a new Matcher
|
||||||
func NewMatcher(patternBuilder func([]rune) *Pattern,
|
func NewMatcher(patternBuilder func([]rune) *Pattern,
|
||||||
sort bool, tac bool, eventBox *util.EventBox) *Matcher {
|
sort bool, tac bool, eventBox *util.EventBox) *Matcher {
|
||||||
|
partitions := util.Min(numPartitionsMultiplier*runtime.NumCPU(), maxPartitions)
|
||||||
return &Matcher{
|
return &Matcher{
|
||||||
patternBuilder: patternBuilder,
|
patternBuilder: patternBuilder,
|
||||||
sort: sort,
|
sort: sort,
|
||||||
tac: tac,
|
tac: tac,
|
||||||
eventBox: eventBox,
|
eventBox: eventBox,
|
||||||
reqBox: util.NewEventBox(),
|
reqBox: util.NewEventBox(),
|
||||||
partitions: runtime.NumCPU(),
|
partitions: partitions,
|
||||||
|
slab: make([]*util.Slab, partitions),
|
||||||
mergerCache: make(map[string]*Merger)}
|
mergerCache: make(map[string]*Merger)}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -106,18 +109,19 @@ func (m *Matcher) Loop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Matcher) sliceChunks(chunks []*Chunk) [][]*Chunk {
|
func (m *Matcher) sliceChunks(chunks []*Chunk) [][]*Chunk {
|
||||||
perSlice := len(chunks) / m.partitions
|
partitions := m.partitions
|
||||||
|
perSlice := len(chunks) / partitions
|
||||||
|
|
||||||
// No need to parallelize
|
|
||||||
if perSlice == 0 {
|
if perSlice == 0 {
|
||||||
return [][]*Chunk{chunks}
|
partitions = len(chunks)
|
||||||
|
perSlice = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
slices := make([][]*Chunk, m.partitions)
|
slices := make([][]*Chunk, partitions)
|
||||||
for i := 0; i < m.partitions; i++ {
|
for i := 0; i < partitions; i++ {
|
||||||
start := i * perSlice
|
start := i * perSlice
|
||||||
end := start + perSlice
|
end := start + perSlice
|
||||||
if i == m.partitions-1 {
|
if i == partitions-1 {
|
||||||
end = len(chunks)
|
end = len(chunks)
|
||||||
}
|
}
|
||||||
slices[i] = chunks[start:end]
|
slices[i] = chunks[start:end]
|
||||||
@@ -127,7 +131,7 @@ func (m *Matcher) sliceChunks(chunks []*Chunk) [][]*Chunk {
|
|||||||
|
|
||||||
type partialResult struct {
|
type partialResult struct {
|
||||||
index int
|
index int
|
||||||
matches []*Item
|
matches []*Result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
||||||
@@ -152,17 +156,26 @@ func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
|||||||
|
|
||||||
for idx, chunks := range slices {
|
for idx, chunks := range slices {
|
||||||
waitGroup.Add(1)
|
waitGroup.Add(1)
|
||||||
go func(idx int, chunks []*Chunk) {
|
if m.slab[idx] == nil {
|
||||||
|
m.slab[idx] = util.MakeSlab(slab16Size, slab32Size)
|
||||||
|
}
|
||||||
|
go func(idx int, slab *util.Slab, chunks []*Chunk) {
|
||||||
defer func() { waitGroup.Done() }()
|
defer func() { waitGroup.Done() }()
|
||||||
sliceMatches := []*Item{}
|
count := 0
|
||||||
for _, chunk := range chunks {
|
allMatches := make([][]*Result, len(chunks))
|
||||||
matches := request.pattern.Match(chunk)
|
for idx, chunk := range chunks {
|
||||||
sliceMatches = append(sliceMatches, matches...)
|
matches := request.pattern.Match(chunk, slab)
|
||||||
|
allMatches[idx] = matches
|
||||||
|
count += len(matches)
|
||||||
if cancelled.Get() {
|
if cancelled.Get() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
countChan <- len(matches)
|
countChan <- len(matches)
|
||||||
}
|
}
|
||||||
|
sliceMatches := make([]*Result, 0, count)
|
||||||
|
for _, matches := range allMatches {
|
||||||
|
sliceMatches = append(sliceMatches, matches...)
|
||||||
|
}
|
||||||
if m.sort {
|
if m.sort {
|
||||||
if m.tac {
|
if m.tac {
|
||||||
sort.Sort(ByRelevanceTac(sliceMatches))
|
sort.Sort(ByRelevanceTac(sliceMatches))
|
||||||
@@ -171,7 +184,7 @@ func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
resultChan <- partialResult{idx, sliceMatches}
|
resultChan <- partialResult{idx, sliceMatches}
|
||||||
}(idx, chunks)
|
}(idx, m.slab[idx], chunks)
|
||||||
}
|
}
|
||||||
|
|
||||||
wait := func() bool {
|
wait := func() bool {
|
||||||
@@ -199,12 +212,12 @@ func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
partialResults := make([][]*Item, numSlices)
|
partialResults := make([][]*Result, numSlices)
|
||||||
for _, _ = range slices {
|
for _ = range slices {
|
||||||
partialResult := <-resultChan
|
partialResult := <-resultChan
|
||||||
partialResults[partialResult.index] = partialResult.matches
|
partialResults[partialResult.index] = partialResult.matches
|
||||||
}
|
}
|
||||||
return NewMerger(partialResults, m.sort, m.tac), false
|
return NewMerger(pattern, partialResults, m.sort, m.tac), false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset is called to interrupt/signal the ongoing search
|
// Reset is called to interrupt/signal the ongoing search
|
||||||
|
@@ -2,14 +2,15 @@ package fzf
|
|||||||
|
|
||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
// Merger with no data
|
// EmptyMerger is a Merger with no data
|
||||||
var EmptyMerger = NewMerger([][]*Item{}, false, false)
|
var EmptyMerger = NewMerger(nil, [][]*Result{}, false, false)
|
||||||
|
|
||||||
// Merger holds a set of locally sorted lists of items and provides the view of
|
// Merger holds a set of locally sorted lists of items and provides the view of
|
||||||
// a single, globally-sorted list
|
// a single, globally-sorted list
|
||||||
type Merger struct {
|
type Merger struct {
|
||||||
lists [][]*Item
|
pattern *Pattern
|
||||||
merged []*Item
|
lists [][]*Result
|
||||||
|
merged []*Result
|
||||||
chunks *[]*Chunk
|
chunks *[]*Chunk
|
||||||
cursors []int
|
cursors []int
|
||||||
sorted bool
|
sorted bool
|
||||||
@@ -22,9 +23,10 @@ type Merger struct {
|
|||||||
// original order
|
// original order
|
||||||
func PassMerger(chunks *[]*Chunk, tac bool) *Merger {
|
func PassMerger(chunks *[]*Chunk, tac bool) *Merger {
|
||||||
mg := Merger{
|
mg := Merger{
|
||||||
chunks: chunks,
|
pattern: nil,
|
||||||
tac: tac,
|
chunks: chunks,
|
||||||
count: 0}
|
tac: tac,
|
||||||
|
count: 0}
|
||||||
|
|
||||||
for _, chunk := range *mg.chunks {
|
for _, chunk := range *mg.chunks {
|
||||||
mg.count += len(*chunk)
|
mg.count += len(*chunk)
|
||||||
@@ -33,10 +35,11 @@ func PassMerger(chunks *[]*Chunk, tac bool) *Merger {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewMerger returns a new Merger
|
// NewMerger returns a new Merger
|
||||||
func NewMerger(lists [][]*Item, sorted bool, tac bool) *Merger {
|
func NewMerger(pattern *Pattern, lists [][]*Result, sorted bool, tac bool) *Merger {
|
||||||
mg := Merger{
|
mg := Merger{
|
||||||
|
pattern: pattern,
|
||||||
lists: lists,
|
lists: lists,
|
||||||
merged: []*Item{},
|
merged: []*Result{},
|
||||||
chunks: nil,
|
chunks: nil,
|
||||||
cursors: make([]int, len(lists)),
|
cursors: make([]int, len(lists)),
|
||||||
sorted: sorted,
|
sorted: sorted,
|
||||||
@@ -55,14 +58,14 @@ func (mg *Merger) Length() int {
|
|||||||
return mg.count
|
return mg.count
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the pointer to the Item object indexed by the given integer
|
// Get returns the pointer to the Result object indexed by the given integer
|
||||||
func (mg *Merger) Get(idx int) *Item {
|
func (mg *Merger) Get(idx int) *Result {
|
||||||
if mg.chunks != nil {
|
if mg.chunks != nil {
|
||||||
if mg.tac {
|
if mg.tac {
|
||||||
idx = mg.count - idx - 1
|
idx = mg.count - idx - 1
|
||||||
}
|
}
|
||||||
chunk := (*mg.chunks)[idx/chunkSize]
|
chunk := (*mg.chunks)[idx/chunkSize]
|
||||||
return (*chunk)[idx%chunkSize]
|
return &Result{item: (*chunk)[idx%chunkSize]}
|
||||||
}
|
}
|
||||||
|
|
||||||
if mg.sorted {
|
if mg.sorted {
|
||||||
@@ -86,9 +89,9 @@ func (mg *Merger) cacheable() bool {
|
|||||||
return mg.count < mergerCacheMax
|
return mg.count < mergerCacheMax
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mg *Merger) mergedGet(idx int) *Item {
|
func (mg *Merger) mergedGet(idx int) *Result {
|
||||||
for i := len(mg.merged); i <= idx; i++ {
|
for i := len(mg.merged); i <= idx; i++ {
|
||||||
minRank := buildEmptyRank(0)
|
minRank := minRank()
|
||||||
minIdx := -1
|
minIdx := -1
|
||||||
for listIdx, list := range mg.lists {
|
for listIdx, list := range mg.lists {
|
||||||
cursor := mg.cursors[listIdx]
|
cursor := mg.cursors[listIdx]
|
||||||
@@ -97,7 +100,7 @@ func (mg *Merger) mergedGet(idx int) *Item {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if cursor >= 0 {
|
if cursor >= 0 {
|
||||||
rank := list[cursor].Rank(false)
|
rank := list[cursor].rank
|
||||||
if minIdx < 0 || compareRanks(rank, minRank, mg.tac) {
|
if minIdx < 0 || compareRanks(rank, minRank, mg.tac) {
|
||||||
minRank = rank
|
minRank = rank
|
||||||
minIdx = listIdx
|
minIdx = listIdx
|
||||||
|
@@ -5,6 +5,8 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func assert(t *testing.T, cond bool, msg ...string) {
|
func assert(t *testing.T, cond bool, msg ...string) {
|
||||||
@@ -13,18 +15,11 @@ func assert(t *testing.T, cond bool, msg ...string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func randItem() *Item {
|
func randResult() *Result {
|
||||||
str := fmt.Sprintf("%d", rand.Uint32())
|
str := fmt.Sprintf("%d", rand.Uint32())
|
||||||
offsets := make([]Offset, rand.Int()%3)
|
return &Result{
|
||||||
for idx := range offsets {
|
item: &Item{text: util.RunesToChars([]rune(str))},
|
||||||
sidx := int32(rand.Uint32() % 20)
|
rank: rank{index: rand.Int31()}}
|
||||||
eidx := sidx + int32(rand.Uint32()%20)
|
|
||||||
offsets[idx] = Offset{sidx, eidx}
|
|
||||||
}
|
|
||||||
return &Item{
|
|
||||||
text: []rune(str),
|
|
||||||
rank: buildEmptyRank(rand.Int31()),
|
|
||||||
offsets: offsets}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyMerger(t *testing.T) {
|
func TestEmptyMerger(t *testing.T) {
|
||||||
@@ -34,23 +29,23 @@ func TestEmptyMerger(t *testing.T) {
|
|||||||
assert(t, len(EmptyMerger.merged) == 0, "Invalid merged list")
|
assert(t, len(EmptyMerger.merged) == 0, "Invalid merged list")
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildLists(partiallySorted bool) ([][]*Item, []*Item) {
|
func buildLists(partiallySorted bool) ([][]*Result, []*Result) {
|
||||||
numLists := 4
|
numLists := 4
|
||||||
lists := make([][]*Item, numLists)
|
lists := make([][]*Result, numLists)
|
||||||
cnt := 0
|
cnt := 0
|
||||||
for i := 0; i < numLists; i++ {
|
for i := 0; i < numLists; i++ {
|
||||||
numItems := rand.Int() % 20
|
numResults := rand.Int() % 20
|
||||||
cnt += numItems
|
cnt += numResults
|
||||||
lists[i] = make([]*Item, numItems)
|
lists[i] = make([]*Result, numResults)
|
||||||
for j := 0; j < numItems; j++ {
|
for j := 0; j < numResults; j++ {
|
||||||
item := randItem()
|
item := randResult()
|
||||||
lists[i][j] = item
|
lists[i][j] = item
|
||||||
}
|
}
|
||||||
if partiallySorted {
|
if partiallySorted {
|
||||||
sort.Sort(ByRelevance(lists[i]))
|
sort.Sort(ByRelevance(lists[i]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
items := []*Item{}
|
items := []*Result{}
|
||||||
for _, list := range lists {
|
for _, list := range lists {
|
||||||
items = append(items, list...)
|
items = append(items, list...)
|
||||||
}
|
}
|
||||||
@@ -62,7 +57,7 @@ func TestMergerUnsorted(t *testing.T) {
|
|||||||
cnt := len(items)
|
cnt := len(items)
|
||||||
|
|
||||||
// Not sorted: same order
|
// Not sorted: same order
|
||||||
mg := NewMerger(lists, false, false)
|
mg := NewMerger(nil, lists, false, false)
|
||||||
assert(t, cnt == mg.Length(), "Invalid Length")
|
assert(t, cnt == mg.Length(), "Invalid Length")
|
||||||
for i := 0; i < cnt; i++ {
|
for i := 0; i < cnt; i++ {
|
||||||
assert(t, items[i] == mg.Get(i), "Invalid Get")
|
assert(t, items[i] == mg.Get(i), "Invalid Get")
|
||||||
@@ -74,7 +69,7 @@ func TestMergerSorted(t *testing.T) {
|
|||||||
cnt := len(items)
|
cnt := len(items)
|
||||||
|
|
||||||
// Sorted sorted order
|
// Sorted sorted order
|
||||||
mg := NewMerger(lists, true, false)
|
mg := NewMerger(nil, lists, true, false)
|
||||||
assert(t, cnt == mg.Length(), "Invalid Length")
|
assert(t, cnt == mg.Length(), "Invalid Length")
|
||||||
sort.Sort(ByRelevance(items))
|
sort.Sort(ByRelevance(items))
|
||||||
for i := 0; i < cnt; i++ {
|
for i := 0; i < cnt; i++ {
|
||||||
@@ -84,7 +79,7 @@ func TestMergerSorted(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Inverse order
|
// Inverse order
|
||||||
mg2 := NewMerger(lists, true, false)
|
mg2 := NewMerger(nil, lists, true, false)
|
||||||
for i := cnt - 1; i >= 0; i-- {
|
for i := cnt - 1; i >= 0; i-- {
|
||||||
if items[i] != mg2.Get(i) {
|
if items[i] != mg2.Get(i) {
|
||||||
t.Error("Not sorted", items[i], mg2.Get(i))
|
t.Error("Not sorted", items[i], mg2.Get(i))
|
||||||
|
261
src/options.go
261
src/options.go
@@ -1,12 +1,14 @@
|
|||||||
package fzf
|
package fzf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/algo"
|
||||||
"github.com/junegunn/fzf/src/curses"
|
"github.com/junegunn/fzf/src/curses"
|
||||||
|
|
||||||
"github.com/junegunn/go-shellwords"
|
"github.com/junegunn/go-shellwords"
|
||||||
@@ -18,40 +20,53 @@ const usage = `usage: fzf [options]
|
|||||||
-x, --extended Extended-search mode
|
-x, --extended Extended-search mode
|
||||||
(enabled by default; +x or --no-extended to disable)
|
(enabled by default; +x or --no-extended to disable)
|
||||||
-e, --exact Enable Exact-match
|
-e, --exact Enable Exact-match
|
||||||
|
--algo=TYPE Fuzzy matching algorithm: [v1|v2] (default: v2)
|
||||||
-i Case-insensitive match (default: smart-case match)
|
-i Case-insensitive match (default: smart-case match)
|
||||||
+i Case-sensitive match
|
+i Case-sensitive match
|
||||||
-n, --nth=N[,..] Comma-separated list of field index expressions
|
-n, --nth=N[,..] Comma-separated list of field index expressions
|
||||||
for limiting search scope. Each can be a non-zero
|
for limiting search scope. Each can be a non-zero
|
||||||
integer or a range expression ([BEGIN]..[END]).
|
integer or a range expression ([BEGIN]..[END]).
|
||||||
--with-nth=N[,..] Transform item using index expressions within finder
|
--with-nth=N[,..] Transform the presentation of each line using
|
||||||
-d, --delimiter=STR Field delimiter regex for --nth (default: AWK-style)
|
field index expressions
|
||||||
|
-d, --delimiter=STR Field delimiter regex (default: AWK-style)
|
||||||
+s, --no-sort Do not sort the result
|
+s, --no-sort Do not sort the result
|
||||||
--tac Reverse the order of the input
|
--tac Reverse the order of the input
|
||||||
--tiebreak=CRI[,..] Comma-separated list of sort criteria to apply
|
--tiebreak=CRI[,..] Comma-separated list of sort criteria to apply
|
||||||
when the scores are tied;
|
when the scores are tied [length|begin|end|index]
|
||||||
[length|begin|end|index] (default: length)
|
(default: length)
|
||||||
|
|
||||||
Interface
|
Interface
|
||||||
-m, --multi Enable multi-select with tab/shift-tab
|
-m, --multi Enable multi-select with tab/shift-tab
|
||||||
--ansi Enable processing of ANSI color codes
|
|
||||||
--no-mouse Disable mouse
|
--no-mouse Disable mouse
|
||||||
--color=COLSPEC Base scheme (dark|light|16|bw) and/or custom colors
|
--bind=KEYBINDS Custom key bindings. Refer to the man page.
|
||||||
--black Use black background
|
|
||||||
--reverse Reverse orientation
|
|
||||||
--margin=MARGIN Screen margin (TRBL / TB,RL / T,RL,B / T,R,B,L)
|
|
||||||
--tabstop=SPACES Number of spaces for a tab character (default: 8)
|
|
||||||
--cycle Enable cyclic scroll
|
--cycle Enable cyclic scroll
|
||||||
--no-hscroll Disable horizontal scroll
|
--no-hscroll Disable horizontal scroll
|
||||||
--hscroll-off=COL Number of screen columns to keep to the right of the
|
--hscroll-off=COL Number of screen columns to keep to the right of the
|
||||||
highlighted substring (default: 10)
|
highlighted substring (default: 10)
|
||||||
|
--jump-labels=CHARS Label characters for jump and jump-accept
|
||||||
|
|
||||||
|
Layout
|
||||||
|
--reverse Reverse orientation
|
||||||
|
--margin=MARGIN Screen margin (TRBL / TB,RL / T,RL,B / T,R,B,L)
|
||||||
--inline-info Display finder info inline with the query
|
--inline-info Display finder info inline with the query
|
||||||
--prompt=STR Input prompt (default: '> ')
|
--prompt=STR Input prompt (default: '> ')
|
||||||
--bind=KEYBINDS Custom key bindings. Refer to the man page.
|
|
||||||
--history=FILE History file
|
|
||||||
--history-size=N Maximum number of history entries (default: 1000)
|
|
||||||
--header=STR String to print as header
|
--header=STR String to print as header
|
||||||
--header-lines=N The first N lines of the input are treated as header
|
--header-lines=N The first N lines of the input are treated as header
|
||||||
|
|
||||||
|
Display
|
||||||
|
--ansi Enable processing of ANSI color codes
|
||||||
|
--tabstop=SPACES Number of spaces for a tab character (default: 8)
|
||||||
|
--color=COLSPEC Base scheme (dark|light|16|bw) and/or custom colors
|
||||||
|
|
||||||
|
History
|
||||||
|
--history=FILE History file
|
||||||
|
--history-size=N Maximum number of history entries (default: 1000)
|
||||||
|
|
||||||
|
Preview
|
||||||
|
--preview=COMMAND Command to preview highlighted line ({})
|
||||||
|
--preview-window=OPT Preview window layout (default: right:50%)
|
||||||
|
[up|down|left|right][:SIZE[%]][:hidden]
|
||||||
|
|
||||||
Scripting
|
Scripting
|
||||||
-q, --query=STR Start the finder with the given query
|
-q, --query=STR Start the finder with the given query
|
||||||
-1, --select-1 Automatically select the only match
|
-1, --select-1 Automatically select the only match
|
||||||
@@ -81,19 +96,41 @@ const (
|
|||||||
type criterion int
|
type criterion int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
byMatchLen criterion = iota
|
byScore criterion = iota
|
||||||
byLength
|
byLength
|
||||||
byBegin
|
byBegin
|
||||||
byEnd
|
byEnd
|
||||||
)
|
)
|
||||||
|
|
||||||
func defaultMargin() [4]string {
|
type sizeSpec struct {
|
||||||
return [4]string{"0", "0", "0", "0"}
|
size float64
|
||||||
|
percent bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultMargin() [4]sizeSpec {
|
||||||
|
return [4]sizeSpec{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type windowPosition int
|
||||||
|
|
||||||
|
const (
|
||||||
|
posUp windowPosition = iota
|
||||||
|
posDown
|
||||||
|
posLeft
|
||||||
|
posRight
|
||||||
|
)
|
||||||
|
|
||||||
|
type previewOpts struct {
|
||||||
|
command string
|
||||||
|
position windowPosition
|
||||||
|
size sizeSpec
|
||||||
|
hidden bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options stores the values of command-line options
|
// Options stores the values of command-line options
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Fuzzy bool
|
Fuzzy bool
|
||||||
|
FuzzyAlgo algo.Algo
|
||||||
Extended bool
|
Extended bool
|
||||||
Case Case
|
Case Case
|
||||||
Nth []Range
|
Nth []Range
|
||||||
@@ -112,6 +149,7 @@ type Options struct {
|
|||||||
Hscroll bool
|
Hscroll bool
|
||||||
HscrollOff int
|
HscrollOff int
|
||||||
InlineInfo bool
|
InlineInfo bool
|
||||||
|
JumpLabels string
|
||||||
Prompt string
|
Prompt string
|
||||||
Query string
|
Query string
|
||||||
Select1 bool
|
Select1 bool
|
||||||
@@ -121,27 +159,23 @@ type Options struct {
|
|||||||
Expect map[int]string
|
Expect map[int]string
|
||||||
Keymap map[int]actionType
|
Keymap map[int]actionType
|
||||||
Execmap map[int]string
|
Execmap map[int]string
|
||||||
|
Preview previewOpts
|
||||||
PrintQuery bool
|
PrintQuery bool
|
||||||
ReadZero bool
|
ReadZero bool
|
||||||
|
Printer func(string)
|
||||||
Sync bool
|
Sync bool
|
||||||
History *History
|
History *History
|
||||||
Header []string
|
Header []string
|
||||||
HeaderLines int
|
HeaderLines int
|
||||||
Margin [4]string
|
Margin [4]sizeSpec
|
||||||
Tabstop int
|
Tabstop int
|
||||||
Version bool
|
Version bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func defaultTheme() *curses.ColorTheme {
|
|
||||||
if strings.Contains(os.Getenv("TERM"), "256") {
|
|
||||||
return curses.Dark256
|
|
||||||
}
|
|
||||||
return curses.Default16
|
|
||||||
}
|
|
||||||
|
|
||||||
func defaultOptions() *Options {
|
func defaultOptions() *Options {
|
||||||
return &Options{
|
return &Options{
|
||||||
Fuzzy: true,
|
Fuzzy: true,
|
||||||
|
FuzzyAlgo: algo.FuzzyMatchV2,
|
||||||
Extended: true,
|
Extended: true,
|
||||||
Case: CaseSmart,
|
Case: CaseSmart,
|
||||||
Nth: make([]Range, 0),
|
Nth: make([]Range, 0),
|
||||||
@@ -149,17 +183,18 @@ func defaultOptions() *Options {
|
|||||||
Delimiter: Delimiter{},
|
Delimiter: Delimiter{},
|
||||||
Sort: 1000,
|
Sort: 1000,
|
||||||
Tac: false,
|
Tac: false,
|
||||||
Criteria: []criterion{byMatchLen, byLength},
|
Criteria: []criterion{byScore, byLength},
|
||||||
Multi: false,
|
Multi: false,
|
||||||
Ansi: false,
|
Ansi: false,
|
||||||
Mouse: true,
|
Mouse: true,
|
||||||
Theme: defaultTheme(),
|
Theme: curses.EmptyTheme(),
|
||||||
Black: false,
|
Black: false,
|
||||||
Reverse: false,
|
Reverse: false,
|
||||||
Cycle: false,
|
Cycle: false,
|
||||||
Hscroll: true,
|
Hscroll: true,
|
||||||
HscrollOff: 10,
|
HscrollOff: 10,
|
||||||
InlineInfo: false,
|
InlineInfo: false,
|
||||||
|
JumpLabels: defaultJumpLabels,
|
||||||
Prompt: "> ",
|
Prompt: "> ",
|
||||||
Query: "",
|
Query: "",
|
||||||
Select1: false,
|
Select1: false,
|
||||||
@@ -169,8 +204,10 @@ func defaultOptions() *Options {
|
|||||||
Expect: make(map[int]string),
|
Expect: make(map[int]string),
|
||||||
Keymap: make(map[int]actionType),
|
Keymap: make(map[int]actionType),
|
||||||
Execmap: make(map[int]string),
|
Execmap: make(map[int]string),
|
||||||
|
Preview: previewOpts{"", posRight, sizeSpec{50, true}, false},
|
||||||
PrintQuery: false,
|
PrintQuery: false,
|
||||||
ReadZero: false,
|
ReadZero: false,
|
||||||
|
Printer: func(str string) { fmt.Println(str) },
|
||||||
Sync: false,
|
Sync: false,
|
||||||
History: nil,
|
History: nil,
|
||||||
Header: make([]string, 0),
|
Header: make([]string, 0),
|
||||||
@@ -290,6 +327,18 @@ func isAlphabet(char uint8) bool {
|
|||||||
return char >= 'a' && char <= 'z'
|
return char >= 'a' && char <= 'z'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseAlgo(str string) algo.Algo {
|
||||||
|
switch str {
|
||||||
|
case "v1":
|
||||||
|
return algo.FuzzyMatchV1
|
||||||
|
case "v2":
|
||||||
|
return algo.FuzzyMatchV2
|
||||||
|
default:
|
||||||
|
errorExit("invalid algorithm (expected: v1 or v2)")
|
||||||
|
}
|
||||||
|
return algo.FuzzyMatchV2
|
||||||
|
}
|
||||||
|
|
||||||
func parseKeyChords(str string, message string) map[int]string {
|
func parseKeyChords(str string, message string) map[int]string {
|
||||||
if len(str) == 0 {
|
if len(str) == 0 {
|
||||||
errorExit(message)
|
errorExit(message)
|
||||||
@@ -322,6 +371,12 @@ func parseKeyChords(str string, message string) map[int]string {
|
|||||||
chord = curses.AltZ + int(' ')
|
chord = curses.AltZ + int(' ')
|
||||||
case "bspace", "bs":
|
case "bspace", "bs":
|
||||||
chord = curses.BSpace
|
chord = curses.BSpace
|
||||||
|
case "alt-enter", "alt-return":
|
||||||
|
chord = curses.AltEnter
|
||||||
|
case "alt-space":
|
||||||
|
chord = curses.AltSpace
|
||||||
|
case "alt-/":
|
||||||
|
chord = curses.AltSlash
|
||||||
case "alt-bs", "alt-bspace":
|
case "alt-bs", "alt-bspace":
|
||||||
chord = curses.AltBS
|
chord = curses.AltBS
|
||||||
case "tab":
|
case "tab":
|
||||||
@@ -346,12 +401,14 @@ func parseKeyChords(str string, message string) map[int]string {
|
|||||||
chord = curses.SRight
|
chord = curses.SRight
|
||||||
case "double-click":
|
case "double-click":
|
||||||
chord = curses.DoubleClick
|
chord = curses.DoubleClick
|
||||||
|
case "f10":
|
||||||
|
chord = curses.F10
|
||||||
default:
|
default:
|
||||||
if len(key) == 6 && strings.HasPrefix(lkey, "ctrl-") && isAlphabet(lkey[5]) {
|
if len(key) == 6 && strings.HasPrefix(lkey, "ctrl-") && isAlphabet(lkey[5]) {
|
||||||
chord = curses.CtrlA + int(lkey[5]) - 'a'
|
chord = curses.CtrlA + int(lkey[5]) - 'a'
|
||||||
} else if len(key) == 5 && strings.HasPrefix(lkey, "alt-") && isAlphabet(lkey[4]) {
|
} else if len(key) == 5 && strings.HasPrefix(lkey, "alt-") && isAlphabet(lkey[4]) {
|
||||||
chord = curses.AltA + int(lkey[4]) - 'a'
|
chord = curses.AltA + int(lkey[4]) - 'a'
|
||||||
} else if len(key) == 2 && strings.HasPrefix(lkey, "f") && key[1] >= '1' && key[1] <= '4' {
|
} else if len(key) == 2 && strings.HasPrefix(lkey, "f") && key[1] >= '1' && key[1] <= '9' {
|
||||||
chord = curses.F1 + int(key[1]) - '1'
|
chord = curses.F1 + int(key[1]) - '1'
|
||||||
} else if utf8.RuneCountInString(key) == 1 {
|
} else if utf8.RuneCountInString(key) == 1 {
|
||||||
chord = curses.AltZ + int([]rune(key)[0])
|
chord = curses.AltZ + int([]rune(key)[0])
|
||||||
@@ -367,7 +424,7 @@ func parseKeyChords(str string, message string) map[int]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func parseTiebreak(str string) []criterion {
|
func parseTiebreak(str string) []criterion {
|
||||||
criteria := []criterion{byMatchLen}
|
criteria := []criterion{byScore}
|
||||||
hasIndex := false
|
hasIndex := false
|
||||||
hasLength := false
|
hasLength := false
|
||||||
hasBegin := false
|
hasBegin := false
|
||||||
@@ -454,6 +511,8 @@ func parseTheme(defaultTheme *curses.ColorTheme, str string) *curses.ColorTheme
|
|||||||
theme.Match = ansi
|
theme.Match = ansi
|
||||||
case "hl+":
|
case "hl+":
|
||||||
theme.CurrentMatch = ansi
|
theme.CurrentMatch = ansi
|
||||||
|
case "border":
|
||||||
|
theme.Border = ansi
|
||||||
case "prompt":
|
case "prompt":
|
||||||
theme.Prompt = ansi
|
theme.Prompt = ansi
|
||||||
case "spinner":
|
case "spinner":
|
||||||
@@ -534,6 +593,8 @@ func parseKeymap(keymap map[int]actionType, execmap map[int]string, str string)
|
|||||||
keymap[key] = actAbort
|
keymap[key] = actAbort
|
||||||
case "accept":
|
case "accept":
|
||||||
keymap[key] = actAccept
|
keymap[key] = actAccept
|
||||||
|
case "print-query":
|
||||||
|
keymap[key] = actPrintQuery
|
||||||
case "backward-char":
|
case "backward-char":
|
||||||
keymap[key] = actBackwardChar
|
keymap[key] = actBackwardChar
|
||||||
case "backward-delete-char":
|
case "backward-delete-char":
|
||||||
@@ -554,6 +615,10 @@ func parseKeymap(keymap map[int]actionType, execmap map[int]string, str string)
|
|||||||
keymap[key] = actForwardChar
|
keymap[key] = actForwardChar
|
||||||
case "forward-word":
|
case "forward-word":
|
||||||
keymap[key] = actForwardWord
|
keymap[key] = actForwardWord
|
||||||
|
case "jump":
|
||||||
|
keymap[key] = actJump
|
||||||
|
case "jump-accept":
|
||||||
|
keymap[key] = actJumpAccept
|
||||||
case "kill-line":
|
case "kill-line":
|
||||||
keymap[key] = actKillLine
|
keymap[key] = actKillLine
|
||||||
case "kill-word":
|
case "kill-word":
|
||||||
@@ -594,8 +659,18 @@ func parseKeymap(keymap map[int]actionType, execmap map[int]string, str string)
|
|||||||
keymap[key] = actPreviousHistory
|
keymap[key] = actPreviousHistory
|
||||||
case "next-history":
|
case "next-history":
|
||||||
keymap[key] = actNextHistory
|
keymap[key] = actNextHistory
|
||||||
|
case "toggle-preview":
|
||||||
|
keymap[key] = actTogglePreview
|
||||||
case "toggle-sort":
|
case "toggle-sort":
|
||||||
keymap[key] = actToggleSort
|
keymap[key] = actToggleSort
|
||||||
|
case "preview-up":
|
||||||
|
keymap[key] = actPreviewUp
|
||||||
|
case "preview-down":
|
||||||
|
keymap[key] = actPreviewDown
|
||||||
|
case "preview-page-up":
|
||||||
|
keymap[key] = actPreviewPageUp
|
||||||
|
case "preview-page-down":
|
||||||
|
keymap[key] = actPreviewPageDown
|
||||||
default:
|
default:
|
||||||
if isExecuteAction(actLower) {
|
if isExecuteAction(actLower) {
|
||||||
var offset int
|
var offset int
|
||||||
@@ -649,40 +724,87 @@ func strLines(str string) []string {
|
|||||||
return strings.Split(strings.TrimSuffix(str, "\n"), "\n")
|
return strings.Split(strings.TrimSuffix(str, "\n"), "\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseMargin(margin string) [4]string {
|
func parseSize(str string, maxPercent float64, label string) sizeSpec {
|
||||||
margins := strings.Split(margin, ",")
|
var val float64
|
||||||
checked := func(str string) string {
|
percent := strings.HasSuffix(str, "%")
|
||||||
if strings.HasSuffix(str, "%") {
|
if percent {
|
||||||
val := atof(str[:len(str)-1])
|
val = atof(str[:len(str)-1])
|
||||||
if val < 0 {
|
if val < 0 {
|
||||||
errorExit("margin must be non-negative")
|
errorExit(label + " must be non-negative")
|
||||||
}
|
|
||||||
if val > 100 {
|
|
||||||
errorExit("margin too large")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
val := atoi(str)
|
|
||||||
if val < 0 {
|
|
||||||
errorExit("margin must be non-negative")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return str
|
if val > maxPercent {
|
||||||
|
errorExit(fmt.Sprintf("%s too large (max: %d%%)", label, int(maxPercent)))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if strings.Contains(str, ".") {
|
||||||
|
errorExit(label + " (without %) must be a non-negative integer")
|
||||||
|
}
|
||||||
|
|
||||||
|
val = float64(atoi(str))
|
||||||
|
if val < 0 {
|
||||||
|
errorExit(label + " must be non-negative")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sizeSpec{val, percent}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parsePreviewWindow(opts *previewOpts, input string) {
|
||||||
|
layout := input
|
||||||
|
opts.hidden = false
|
||||||
|
if strings.HasSuffix(layout, ":hidden") {
|
||||||
|
opts.hidden = true
|
||||||
|
layout = strings.TrimSuffix(layout, ":hidden")
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens := strings.Split(layout, ":")
|
||||||
|
if len(tokens) == 0 || len(tokens) > 2 {
|
||||||
|
errorExit("invalid window layout: " + input)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tokens) > 1 {
|
||||||
|
opts.size = parseSize(tokens[1], 99, "window size")
|
||||||
|
} else {
|
||||||
|
opts.size = sizeSpec{50, true}
|
||||||
|
}
|
||||||
|
if !opts.size.percent && opts.size.size > 0 {
|
||||||
|
// Adjust size for border
|
||||||
|
opts.size.size += 2
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tokens[0] {
|
||||||
|
case "up":
|
||||||
|
opts.position = posUp
|
||||||
|
case "down":
|
||||||
|
opts.position = posDown
|
||||||
|
case "left":
|
||||||
|
opts.position = posLeft
|
||||||
|
case "right":
|
||||||
|
opts.position = posRight
|
||||||
|
default:
|
||||||
|
errorExit("invalid window position: " + input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseMargin(margin string) [4]sizeSpec {
|
||||||
|
margins := strings.Split(margin, ",")
|
||||||
|
checked := func(str string) sizeSpec {
|
||||||
|
return parseSize(str, 49, "margin")
|
||||||
}
|
}
|
||||||
switch len(margins) {
|
switch len(margins) {
|
||||||
case 1:
|
case 1:
|
||||||
m := checked(margins[0])
|
m := checked(margins[0])
|
||||||
return [4]string{m, m, m, m}
|
return [4]sizeSpec{m, m, m, m}
|
||||||
case 2:
|
case 2:
|
||||||
tb := checked(margins[0])
|
tb := checked(margins[0])
|
||||||
rl := checked(margins[1])
|
rl := checked(margins[1])
|
||||||
return [4]string{tb, rl, tb, rl}
|
return [4]sizeSpec{tb, rl, tb, rl}
|
||||||
case 3:
|
case 3:
|
||||||
t := checked(margins[0])
|
t := checked(margins[0])
|
||||||
rl := checked(margins[1])
|
rl := checked(margins[1])
|
||||||
b := checked(margins[2])
|
b := checked(margins[2])
|
||||||
return [4]string{t, rl, b, rl}
|
return [4]sizeSpec{t, rl, b, rl}
|
||||||
case 4:
|
case 4:
|
||||||
return [4]string{
|
return [4]sizeSpec{
|
||||||
checked(margins[0]), checked(margins[1]),
|
checked(margins[0]), checked(margins[1]),
|
||||||
checked(margins[2]), checked(margins[3])}
|
checked(margins[2]), checked(margins[3])}
|
||||||
default:
|
default:
|
||||||
@@ -714,6 +836,7 @@ func parseOptions(opts *Options, allArgs []string) {
|
|||||||
opts.History.maxSize = historyMax
|
opts.History.maxSize = historyMax
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
validateJumpLabels := false
|
||||||
for i := 0; i < len(allArgs); i++ {
|
for i := 0; i < len(allArgs); i++ {
|
||||||
arg := allArgs[i]
|
arg := allArgs[i]
|
||||||
switch arg {
|
switch arg {
|
||||||
@@ -736,6 +859,8 @@ func parseOptions(opts *Options, allArgs []string) {
|
|||||||
case "-f", "--filter":
|
case "-f", "--filter":
|
||||||
filter := nextString(allArgs, &i, "query string required")
|
filter := nextString(allArgs, &i, "query string required")
|
||||||
opts.Filter = &filter
|
opts.Filter = &filter
|
||||||
|
case "--algo":
|
||||||
|
opts.FuzzyAlgo = parseAlgo(nextString(allArgs, &i, "algorithm required (v1|v2)"))
|
||||||
case "--expect":
|
case "--expect":
|
||||||
opts.Expect = parseKeyChords(nextString(allArgs, &i, "key names required"), "key names required")
|
opts.Expect = parseKeyChords(nextString(allArgs, &i, "key names required"), "key names required")
|
||||||
case "--tiebreak":
|
case "--tiebreak":
|
||||||
@@ -745,7 +870,7 @@ func parseOptions(opts *Options, allArgs []string) {
|
|||||||
case "--color":
|
case "--color":
|
||||||
spec := optionalNextString(allArgs, &i)
|
spec := optionalNextString(allArgs, &i)
|
||||||
if len(spec) == 0 {
|
if len(spec) == 0 {
|
||||||
opts.Theme = defaultTheme()
|
opts.Theme = curses.EmptyTheme()
|
||||||
} else {
|
} else {
|
||||||
opts.Theme = parseTheme(opts.Theme, spec)
|
opts.Theme = parseTheme(opts.Theme, spec)
|
||||||
}
|
}
|
||||||
@@ -805,6 +930,9 @@ func parseOptions(opts *Options, allArgs []string) {
|
|||||||
opts.InlineInfo = true
|
opts.InlineInfo = true
|
||||||
case "--no-inline-info":
|
case "--no-inline-info":
|
||||||
opts.InlineInfo = false
|
opts.InlineInfo = false
|
||||||
|
case "--jump-labels":
|
||||||
|
opts.JumpLabels = nextString(allArgs, &i, "label characters required")
|
||||||
|
validateJumpLabels = true
|
||||||
case "-1", "--select-1":
|
case "-1", "--select-1":
|
||||||
opts.Select1 = true
|
opts.Select1 = true
|
||||||
case "+1", "--no-select-1":
|
case "+1", "--no-select-1":
|
||||||
@@ -817,6 +945,10 @@ func parseOptions(opts *Options, allArgs []string) {
|
|||||||
opts.ReadZero = true
|
opts.ReadZero = true
|
||||||
case "--no-read0":
|
case "--no-read0":
|
||||||
opts.ReadZero = false
|
opts.ReadZero = false
|
||||||
|
case "--print0":
|
||||||
|
opts.Printer = func(str string) { fmt.Print(str, "\x00") }
|
||||||
|
case "--no-print0":
|
||||||
|
opts.Printer = func(str string) { fmt.Println(str) }
|
||||||
case "--print-query":
|
case "--print-query":
|
||||||
opts.PrintQuery = true
|
opts.PrintQuery = true
|
||||||
case "--no-print-query":
|
case "--no-print-query":
|
||||||
@@ -844,6 +976,13 @@ func parseOptions(opts *Options, allArgs []string) {
|
|||||||
case "--header-lines":
|
case "--header-lines":
|
||||||
opts.HeaderLines = atoi(
|
opts.HeaderLines = atoi(
|
||||||
nextString(allArgs, &i, "number of header lines required"))
|
nextString(allArgs, &i, "number of header lines required"))
|
||||||
|
case "--preview":
|
||||||
|
opts.Preview.command = nextString(allArgs, &i, "preview command required")
|
||||||
|
case "--no-preview":
|
||||||
|
opts.Preview.command = ""
|
||||||
|
case "--preview-window":
|
||||||
|
parsePreviewWindow(&opts.Preview,
|
||||||
|
nextString(allArgs, &i, "preview window layout required: [up|down|left|right][:SIZE[%]]"))
|
||||||
case "--no-margin":
|
case "--no-margin":
|
||||||
opts.Margin = defaultMargin()
|
opts.Margin = defaultMargin()
|
||||||
case "--margin":
|
case "--margin":
|
||||||
@@ -854,7 +993,9 @@ func parseOptions(opts *Options, allArgs []string) {
|
|||||||
case "--version":
|
case "--version":
|
||||||
opts.Version = true
|
opts.Version = true
|
||||||
default:
|
default:
|
||||||
if match, value := optString(arg, "-q", "--query="); match {
|
if match, value := optString(arg, "--algo="); match {
|
||||||
|
opts.FuzzyAlgo = parseAlgo(value)
|
||||||
|
} else if match, value := optString(arg, "-q", "--query="); match {
|
||||||
opts.Query = value
|
opts.Query = value
|
||||||
} else if match, value := optString(arg, "-f", "--filter="); match {
|
} else if match, value := optString(arg, "-f", "--filter="); match {
|
||||||
opts.Filter = &value
|
opts.Filter = &value
|
||||||
@@ -886,12 +1027,18 @@ func parseOptions(opts *Options, allArgs []string) {
|
|||||||
opts.Header = strLines(value)
|
opts.Header = strLines(value)
|
||||||
} else if match, value := optString(arg, "--header-lines="); match {
|
} else if match, value := optString(arg, "--header-lines="); match {
|
||||||
opts.HeaderLines = atoi(value)
|
opts.HeaderLines = atoi(value)
|
||||||
|
} else if match, value := optString(arg, "--preview="); match {
|
||||||
|
opts.Preview.command = value
|
||||||
|
} else if match, value := optString(arg, "--preview-window="); match {
|
||||||
|
parsePreviewWindow(&opts.Preview, value)
|
||||||
} else if match, value := optString(arg, "--margin="); match {
|
} else if match, value := optString(arg, "--margin="); match {
|
||||||
opts.Margin = parseMargin(value)
|
opts.Margin = parseMargin(value)
|
||||||
} else if match, value := optString(arg, "--tabstop="); match {
|
} else if match, value := optString(arg, "--tabstop="); match {
|
||||||
opts.Tabstop = atoi(value)
|
opts.Tabstop = atoi(value)
|
||||||
} else if match, value := optString(arg, "--hscroll-off="); match {
|
} else if match, value := optString(arg, "--hscroll-off="); match {
|
||||||
opts.HscrollOff = atoi(value)
|
opts.HscrollOff = atoi(value)
|
||||||
|
} else if match, value := optString(arg, "--jump-labels="); match {
|
||||||
|
opts.JumpLabels = value
|
||||||
} else {
|
} else {
|
||||||
errorExit("unknown option: " + arg)
|
errorExit("unknown option: " + arg)
|
||||||
}
|
}
|
||||||
@@ -909,6 +1056,18 @@ func parseOptions(opts *Options, allArgs []string) {
|
|||||||
if opts.Tabstop < 1 {
|
if opts.Tabstop < 1 {
|
||||||
errorExit("tab stop must be a positive integer")
|
errorExit("tab stop must be a positive integer")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(opts.JumpLabels) == 0 {
|
||||||
|
errorExit("empty jump labels")
|
||||||
|
}
|
||||||
|
|
||||||
|
if validateJumpLabels {
|
||||||
|
for _, r := range opts.JumpLabels {
|
||||||
|
if r < 32 || r > 126 {
|
||||||
|
errorExit("non-ascii jump labels are not allowed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func postProcessOptions(opts *Options) {
|
func postProcessOptions(opts *Options) {
|
||||||
|
@@ -5,6 +5,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/curses"
|
"github.com/junegunn/fzf/src/curses"
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDelimiterRegex(t *testing.T) {
|
func TestDelimiterRegex(t *testing.T) {
|
||||||
@@ -42,24 +43,24 @@ func TestDelimiterRegex(t *testing.T) {
|
|||||||
|
|
||||||
func TestDelimiterRegexString(t *testing.T) {
|
func TestDelimiterRegexString(t *testing.T) {
|
||||||
delim := delimiterRegexp("*")
|
delim := delimiterRegexp("*")
|
||||||
tokens := Tokenize([]rune("-*--*---**---"), delim)
|
tokens := Tokenize(util.RunesToChars([]rune("-*--*---**---")), delim)
|
||||||
if delim.regex != nil ||
|
if delim.regex != nil ||
|
||||||
string(tokens[0].text) != "-*" ||
|
tokens[0].text.ToString() != "-*" ||
|
||||||
string(tokens[1].text) != "--*" ||
|
tokens[1].text.ToString() != "--*" ||
|
||||||
string(tokens[2].text) != "---*" ||
|
tokens[2].text.ToString() != "---*" ||
|
||||||
string(tokens[3].text) != "*" ||
|
tokens[3].text.ToString() != "*" ||
|
||||||
string(tokens[4].text) != "---" {
|
tokens[4].text.ToString() != "---" {
|
||||||
t.Errorf("%s %s %d", delim, tokens, len(tokens))
|
t.Errorf("%s %s %d", delim, tokens, len(tokens))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDelimiterRegexRegex(t *testing.T) {
|
func TestDelimiterRegexRegex(t *testing.T) {
|
||||||
delim := delimiterRegexp("--\\*")
|
delim := delimiterRegexp("--\\*")
|
||||||
tokens := Tokenize([]rune("-*--*---**---"), delim)
|
tokens := Tokenize(util.RunesToChars([]rune("-*--*---**---")), delim)
|
||||||
if delim.str != nil ||
|
if delim.str != nil ||
|
||||||
string(tokens[0].text) != "-*--*" ||
|
tokens[0].text.ToString() != "-*--*" ||
|
||||||
string(tokens[1].text) != "---*" ||
|
tokens[1].text.ToString() != "---*" ||
|
||||||
string(tokens[2].text) != "*---" {
|
tokens[2].text.ToString() != "*---" {
|
||||||
t.Errorf("%s %d", tokens, len(tokens))
|
t.Errorf("%s %d", tokens, len(tokens))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -123,14 +124,14 @@ func TestIrrelevantNth(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseKeys(t *testing.T) {
|
func TestParseKeys(t *testing.T) {
|
||||||
pairs := parseKeyChords("ctrl-z,alt-z,f2,@,Alt-a,!,ctrl-G,J,g", "")
|
pairs := parseKeyChords("ctrl-z,alt-z,f2,@,Alt-a,!,ctrl-G,J,g,ALT-enter,alt-SPACE", "")
|
||||||
check := func(i int, s string) {
|
check := func(i int, s string) {
|
||||||
if pairs[i] != s {
|
if pairs[i] != s {
|
||||||
t.Errorf("%s != %s", pairs[i], s)
|
t.Errorf("%s != %s", pairs[i], s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(pairs) != 9 {
|
if len(pairs) != 11 {
|
||||||
t.Error(9)
|
t.Error(11)
|
||||||
}
|
}
|
||||||
check(curses.CtrlZ, "ctrl-z")
|
check(curses.CtrlZ, "ctrl-z")
|
||||||
check(curses.AltZ, "alt-z")
|
check(curses.AltZ, "alt-z")
|
||||||
@@ -141,6 +142,8 @@ func TestParseKeys(t *testing.T) {
|
|||||||
check(curses.CtrlA+'g'-'a', "ctrl-G")
|
check(curses.CtrlA+'g'-'a', "ctrl-G")
|
||||||
check(curses.AltZ+'J', "J")
|
check(curses.AltZ+'J', "J")
|
||||||
check(curses.AltZ+'g', "g")
|
check(curses.AltZ+'g', "g")
|
||||||
|
check(curses.AltEnter, "ALT-enter")
|
||||||
|
check(curses.AltSpace, "alt-SPACE")
|
||||||
|
|
||||||
// Synonyms
|
// Synonyms
|
||||||
pairs = parseKeyChords("enter,Return,space,tab,btab,esc,up,down,left,right", "")
|
pairs = parseKeyChords("enter,Return,space,tab,btab,esc,up,down,left,right", "")
|
||||||
@@ -339,7 +342,7 @@ func TestDefaultCtrlNP(t *testing.T) {
|
|||||||
check([]string{"--bind=ctrl-n:accept"}, curses.CtrlN, actAccept)
|
check([]string{"--bind=ctrl-n:accept"}, curses.CtrlN, actAccept)
|
||||||
check([]string{"--bind=ctrl-p:accept"}, curses.CtrlP, actAccept)
|
check([]string{"--bind=ctrl-p:accept"}, curses.CtrlP, actAccept)
|
||||||
|
|
||||||
hist := "--history=/tmp/foo"
|
hist := "--history=/tmp/fzf-history"
|
||||||
check([]string{hist}, curses.CtrlN, actNextHistory)
|
check([]string{hist}, curses.CtrlN, actNextHistory)
|
||||||
check([]string{hist}, curses.CtrlP, actPreviousHistory)
|
check([]string{hist}, curses.CtrlP, actPreviousHistory)
|
||||||
|
|
||||||
@@ -350,14 +353,14 @@ func TestDefaultCtrlNP(t *testing.T) {
|
|||||||
check([]string{hist, "--bind=ctrl-p:accept"}, curses.CtrlP, actAccept)
|
check([]string{hist, "--bind=ctrl-p:accept"}, curses.CtrlP, actAccept)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestToggle(t *testing.T) {
|
func optsFor(words ...string) *Options {
|
||||||
optsFor := func(words ...string) *Options {
|
opts := defaultOptions()
|
||||||
opts := defaultOptions()
|
parseOptions(opts, words)
|
||||||
parseOptions(opts, words)
|
postProcessOptions(opts)
|
||||||
postProcessOptions(opts)
|
return opts
|
||||||
return opts
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
func TestToggle(t *testing.T) {
|
||||||
opts := optsFor()
|
opts := optsFor()
|
||||||
if opts.ToggleSort {
|
if opts.ToggleSort {
|
||||||
t.Error()
|
t.Error()
|
||||||
@@ -373,3 +376,31 @@ func TestToggle(t *testing.T) {
|
|||||||
t.Error()
|
t.Error()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPreviewOpts(t *testing.T) {
|
||||||
|
opts := optsFor()
|
||||||
|
if !(opts.Preview.command == "" &&
|
||||||
|
opts.Preview.hidden == false &&
|
||||||
|
opts.Preview.position == posRight &&
|
||||||
|
opts.Preview.size.percent == true &&
|
||||||
|
opts.Preview.size.size == 50) {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
opts = optsFor("--preview", "cat {}", "--preview-window=left:15:hidden")
|
||||||
|
if !(opts.Preview.command == "cat {}" &&
|
||||||
|
opts.Preview.hidden == true &&
|
||||||
|
opts.Preview.position == posLeft &&
|
||||||
|
opts.Preview.size.percent == false &&
|
||||||
|
opts.Preview.size.size == 15+2) {
|
||||||
|
t.Error(opts.Preview)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts = optsFor("--preview-window=left:15:hidden", "--preview-window=down")
|
||||||
|
if !(opts.Preview.command == "" &&
|
||||||
|
opts.Preview.hidden == false &&
|
||||||
|
opts.Preview.position == posDown &&
|
||||||
|
opts.Preview.size.percent == true &&
|
||||||
|
opts.Preview.size.size == 50) {
|
||||||
|
t.Error(opts.Preview)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
149
src/pattern.go
149
src/pattern.go
@@ -2,7 +2,6 @@ package fzf
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/algo"
|
"github.com/junegunn/fzf/src/algo"
|
||||||
@@ -41,6 +40,7 @@ type termSet []term
|
|||||||
// Pattern represents search pattern
|
// Pattern represents search pattern
|
||||||
type Pattern struct {
|
type Pattern struct {
|
||||||
fuzzy bool
|
fuzzy bool
|
||||||
|
fuzzyAlgo algo.Algo
|
||||||
extended bool
|
extended bool
|
||||||
caseSensitive bool
|
caseSensitive bool
|
||||||
forward bool
|
forward bool
|
||||||
@@ -49,7 +49,7 @@ type Pattern struct {
|
|||||||
cacheable bool
|
cacheable bool
|
||||||
delimiter Delimiter
|
delimiter Delimiter
|
||||||
nth []Range
|
nth []Range
|
||||||
procFun map[termType]func(bool, bool, []rune, []rune) (int, int)
|
procFun map[termType]algo.Algo
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -75,8 +75,8 @@ func clearChunkCache() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BuildPattern builds Pattern object from the given arguments
|
// BuildPattern builds Pattern object from the given arguments
|
||||||
func BuildPattern(fuzzy bool, extended bool, caseMode Case, forward bool,
|
func BuildPattern(fuzzy bool, fuzzyAlgo algo.Algo, extended bool, caseMode Case, forward bool,
|
||||||
nth []Range, delimiter Delimiter, runes []rune) *Pattern {
|
cacheable bool, nth []Range, delimiter Delimiter, runes []rune) *Pattern {
|
||||||
|
|
||||||
var asString string
|
var asString string
|
||||||
if extended {
|
if extended {
|
||||||
@@ -90,7 +90,7 @@ func BuildPattern(fuzzy bool, extended bool, caseMode Case, forward bool,
|
|||||||
return cached
|
return cached
|
||||||
}
|
}
|
||||||
|
|
||||||
caseSensitive, cacheable := true, true
|
caseSensitive := true
|
||||||
termSets := []termSet{}
|
termSets := []termSet{}
|
||||||
|
|
||||||
if extended {
|
if extended {
|
||||||
@@ -100,7 +100,7 @@ func BuildPattern(fuzzy bool, extended bool, caseMode Case, forward bool,
|
|||||||
for idx, term := range termSet {
|
for idx, term := range termSet {
|
||||||
// If the query contains inverse search terms or OR operators,
|
// If the query contains inverse search terms or OR operators,
|
||||||
// we cannot cache the search scope
|
// we cannot cache the search scope
|
||||||
if idx > 0 || term.inv {
|
if !cacheable || idx > 0 || term.inv {
|
||||||
cacheable = false
|
cacheable = false
|
||||||
break Loop
|
break Loop
|
||||||
}
|
}
|
||||||
@@ -117,6 +117,7 @@ func BuildPattern(fuzzy bool, extended bool, caseMode Case, forward bool,
|
|||||||
|
|
||||||
ptr := &Pattern{
|
ptr := &Pattern{
|
||||||
fuzzy: fuzzy,
|
fuzzy: fuzzy,
|
||||||
|
fuzzyAlgo: fuzzyAlgo,
|
||||||
extended: extended,
|
extended: extended,
|
||||||
caseSensitive: caseSensitive,
|
caseSensitive: caseSensitive,
|
||||||
forward: forward,
|
forward: forward,
|
||||||
@@ -125,9 +126,9 @@ func BuildPattern(fuzzy bool, extended bool, caseMode Case, forward bool,
|
|||||||
cacheable: cacheable,
|
cacheable: cacheable,
|
||||||
nth: nth,
|
nth: nth,
|
||||||
delimiter: delimiter,
|
delimiter: delimiter,
|
||||||
procFun: make(map[termType]func(bool, bool, []rune, []rune) (int, int))}
|
procFun: make(map[termType]algo.Algo)}
|
||||||
|
|
||||||
ptr.procFun[termFuzzy] = algo.FuzzyMatch
|
ptr.procFun[termFuzzy] = fuzzyAlgo
|
||||||
ptr.procFun[termEqual] = algo.EqualMatch
|
ptr.procFun[termEqual] = algo.EqualMatch
|
||||||
ptr.procFun[termExact] = algo.ExactMatchNaive
|
ptr.procFun[termExact] = algo.ExactMatchNaive
|
||||||
ptr.procFun[termPrefix] = algo.PrefixMatch
|
ptr.procFun[termPrefix] = algo.PrefixMatch
|
||||||
@@ -162,12 +163,13 @@ func parseTerms(fuzzy bool, caseMode Case, str string) []termSet {
|
|||||||
|
|
||||||
if strings.HasPrefix(text, "!") {
|
if strings.HasPrefix(text, "!") {
|
||||||
inv = true
|
inv = true
|
||||||
|
typ = termExact
|
||||||
text = text[1:]
|
text = text[1:]
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(text, "'") {
|
if strings.HasPrefix(text, "'") {
|
||||||
// Flip exactness
|
// Flip exactness
|
||||||
if fuzzy {
|
if fuzzy && !inv {
|
||||||
typ = termExact
|
typ = termExact
|
||||||
text = text[1:]
|
text = text[1:]
|
||||||
} else {
|
} else {
|
||||||
@@ -227,7 +229,7 @@ func (p *Pattern) CacheKey() string {
|
|||||||
}
|
}
|
||||||
cacheableTerms := []string{}
|
cacheableTerms := []string{}
|
||||||
for _, termSet := range p.termSets {
|
for _, termSet := range p.termSets {
|
||||||
if len(termSet) == 1 && !termSet[0].inv {
|
if len(termSet) == 1 && !termSet[0].inv && (p.fuzzy || termSet[0].typ == termExact) {
|
||||||
cacheableTerms = append(cacheableTerms, string(termSet[0].origText))
|
cacheableTerms = append(cacheableTerms, string(termSet[0].origText))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -235,9 +237,7 @@ func (p *Pattern) CacheKey() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Match returns the list of matches Items in the given Chunk
|
// Match returns the list of matches Items in the given Chunk
|
||||||
func (p *Pattern) Match(chunk *Chunk) []*Item {
|
func (p *Pattern) Match(chunk *Chunk, slab *util.Slab) []*Result {
|
||||||
space := chunk
|
|
||||||
|
|
||||||
// ChunkCache: Exact match
|
// ChunkCache: Exact match
|
||||||
cacheKey := p.CacheKey()
|
cacheKey := p.CacheKey()
|
||||||
if p.cacheable {
|
if p.cacheable {
|
||||||
@@ -246,7 +246,8 @@ func (p *Pattern) Match(chunk *Chunk) []*Item {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChunkCache: Prefix/suffix match
|
// Prefix/suffix cache
|
||||||
|
var space []*Result
|
||||||
Loop:
|
Loop:
|
||||||
for idx := 1; idx < len(cacheKey); idx++ {
|
for idx := 1; idx < len(cacheKey); idx++ {
|
||||||
// [---------| ] | [ |---------]
|
// [---------| ] | [ |---------]
|
||||||
@@ -256,14 +257,13 @@ Loop:
|
|||||||
suffix := cacheKey[idx:]
|
suffix := cacheKey[idx:]
|
||||||
for _, substr := range [2]*string{&prefix, &suffix} {
|
for _, substr := range [2]*string{&prefix, &suffix} {
|
||||||
if cached, found := _cache.Find(chunk, *substr); found {
|
if cached, found := _cache.Find(chunk, *substr); found {
|
||||||
cachedChunk := Chunk(cached)
|
space = cached
|
||||||
space = &cachedChunk
|
|
||||||
break Loop
|
break Loop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
matches := p.matchChunk(space)
|
matches := p.matchChunk(chunk, space, slab)
|
||||||
|
|
||||||
if p.cacheable {
|
if p.cacheable {
|
||||||
_cache.Add(chunk, cacheKey, matches)
|
_cache.Add(chunk, cacheKey, matches)
|
||||||
@@ -271,19 +271,19 @@ Loop:
|
|||||||
return matches
|
return matches
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) matchChunk(chunk *Chunk) []*Item {
|
func (p *Pattern) matchChunk(chunk *Chunk, space []*Result, slab *util.Slab) []*Result {
|
||||||
matches := []*Item{}
|
matches := []*Result{}
|
||||||
if !p.extended {
|
|
||||||
|
if space == nil {
|
||||||
for _, item := range *chunk {
|
for _, item := range *chunk {
|
||||||
if sidx, eidx, tlen := p.basicMatch(item); sidx >= 0 {
|
if match, _, _ := p.MatchItem(item, false, slab); match != nil {
|
||||||
matches = append(matches,
|
matches = append(matches, match)
|
||||||
dupItem(item, []Offset{Offset{int32(sidx), int32(eidx), int32(tlen)}}))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for _, item := range *chunk {
|
for _, result := range space {
|
||||||
if offsets := p.extendedMatch(item); len(offsets) == len(p.termSets) {
|
if match, _, _ := p.MatchItem(result.item, false, slab); match != nil {
|
||||||
matches = append(matches, dupItem(item, offsets))
|
matches = append(matches, match)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -291,57 +291,75 @@ func (p *Pattern) matchChunk(chunk *Chunk) []*Item {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MatchItem returns true if the Item is a match
|
// MatchItem returns true if the Item is a match
|
||||||
func (p *Pattern) MatchItem(item *Item) bool {
|
func (p *Pattern) MatchItem(item *Item, withPos bool, slab *util.Slab) (*Result, []Offset, *[]int) {
|
||||||
if !p.extended {
|
if p.extended {
|
||||||
sidx, _, _ := p.basicMatch(item)
|
if offsets, bonus, trimLen, pos := p.extendedMatch(item, withPos, slab); len(offsets) == len(p.termSets) {
|
||||||
return sidx >= 0
|
return buildResult(item, offsets, bonus, trimLen), offsets, pos
|
||||||
|
}
|
||||||
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
offsets := p.extendedMatch(item)
|
offset, bonus, trimLen, pos := p.basicMatch(item, withPos, slab)
|
||||||
return len(offsets) == len(p.termSets)
|
if sidx := offset[0]; sidx >= 0 {
|
||||||
|
offsets := []Offset{offset}
|
||||||
|
return buildResult(item, offsets, bonus, trimLen), offsets, pos
|
||||||
|
}
|
||||||
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func dupItem(item *Item, offsets []Offset) *Item {
|
func (p *Pattern) basicMatch(item *Item, withPos bool, slab *util.Slab) (Offset, int, int, *[]int) {
|
||||||
sort.Sort(ByOrder(offsets))
|
|
||||||
return &Item{
|
|
||||||
text: item.text,
|
|
||||||
origText: item.origText,
|
|
||||||
transformed: item.transformed,
|
|
||||||
offsets: offsets,
|
|
||||||
colors: item.colors,
|
|
||||||
rank: buildEmptyRank(item.Index())}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Pattern) basicMatch(item *Item) (int, int, int) {
|
|
||||||
input := p.prepareInput(item)
|
input := p.prepareInput(item)
|
||||||
if p.fuzzy {
|
if p.fuzzy {
|
||||||
return p.iter(algo.FuzzyMatch, input, p.caseSensitive, p.forward, p.text)
|
return p.iter(p.fuzzyAlgo, input, p.caseSensitive, p.forward, p.text, withPos, slab)
|
||||||
}
|
}
|
||||||
return p.iter(algo.ExactMatchNaive, input, p.caseSensitive, p.forward, p.text)
|
return p.iter(algo.ExactMatchNaive, input, p.caseSensitive, p.forward, p.text, withPos, slab)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) extendedMatch(item *Item) []Offset {
|
func (p *Pattern) extendedMatch(item *Item, withPos bool, slab *util.Slab) ([]Offset, int, int, *[]int) {
|
||||||
input := p.prepareInput(item)
|
input := p.prepareInput(item)
|
||||||
offsets := []Offset{}
|
offsets := []Offset{}
|
||||||
|
var totalScore int
|
||||||
|
var totalTrimLen int
|
||||||
|
var allPos *[]int
|
||||||
|
if withPos {
|
||||||
|
allPos = &[]int{}
|
||||||
|
}
|
||||||
for _, termSet := range p.termSets {
|
for _, termSet := range p.termSets {
|
||||||
var offset *Offset
|
var offset Offset
|
||||||
|
var currentScore int
|
||||||
|
var trimLen int
|
||||||
|
matched := false
|
||||||
for _, term := range termSet {
|
for _, term := range termSet {
|
||||||
pfun := p.procFun[term.typ]
|
pfun := p.procFun[term.typ]
|
||||||
if sidx, eidx, tlen := p.iter(pfun, input, term.caseSensitive, p.forward, term.text); sidx >= 0 {
|
off, score, tLen, pos := p.iter(pfun, input, term.caseSensitive, p.forward, term.text, withPos, slab)
|
||||||
|
if sidx := off[0]; sidx >= 0 {
|
||||||
if term.inv {
|
if term.inv {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
offset = &Offset{int32(sidx), int32(eidx), int32(tlen)}
|
offset, currentScore, trimLen = off, score, tLen
|
||||||
|
matched = true
|
||||||
|
if withPos {
|
||||||
|
if pos != nil {
|
||||||
|
*allPos = append(*allPos, *pos...)
|
||||||
|
} else {
|
||||||
|
for idx := off[0]; idx < off[1]; idx++ {
|
||||||
|
*allPos = append(*allPos, int(idx))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
break
|
break
|
||||||
} else if term.inv {
|
} else if term.inv {
|
||||||
offset = &Offset{0, 0, 0}
|
offset, currentScore, trimLen = Offset{0, 0}, 0, 0
|
||||||
|
matched = true
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if offset != nil {
|
if matched {
|
||||||
offsets = append(offsets, *offset)
|
offsets = append(offsets, offset)
|
||||||
|
totalScore += currentScore
|
||||||
|
totalTrimLen += trimLen
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return offsets
|
return offsets, totalScore, totalTrimLen, allPos
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) prepareInput(item *Item) []Token {
|
func (p *Pattern) prepareInput(item *Item) []Token {
|
||||||
@@ -350,23 +368,28 @@ func (p *Pattern) prepareInput(item *Item) []Token {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var ret []Token
|
var ret []Token
|
||||||
if len(p.nth) > 0 {
|
if len(p.nth) == 0 {
|
||||||
|
ret = []Token{Token{text: &item.text, prefixLength: 0, trimLength: int32(item.text.TrimLength())}}
|
||||||
|
} else {
|
||||||
tokens := Tokenize(item.text, p.delimiter)
|
tokens := Tokenize(item.text, p.delimiter)
|
||||||
ret = Transform(tokens, p.nth)
|
ret = Transform(tokens, p.nth)
|
||||||
} else {
|
|
||||||
ret = []Token{Token{text: item.text, prefixLength: 0, trimLength: util.TrimLen(item.text)}}
|
|
||||||
}
|
}
|
||||||
item.transformed = ret
|
item.transformed = ret
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) iter(pfun func(bool, bool, []rune, []rune) (int, int),
|
func (p *Pattern) iter(pfun algo.Algo, tokens []Token, caseSensitive bool, forward bool, pattern []rune, withPos bool, slab *util.Slab) (Offset, int, int, *[]int) {
|
||||||
tokens []Token, caseSensitive bool, forward bool, pattern []rune) (int, int, int) {
|
|
||||||
for _, part := range tokens {
|
for _, part := range tokens {
|
||||||
prefixLength := part.prefixLength
|
if res, pos := pfun(caseSensitive, forward, *part.text, pattern, withPos, slab); res.Start >= 0 {
|
||||||
if sidx, eidx := pfun(caseSensitive, forward, part.text, pattern); sidx >= 0 {
|
sidx := int32(res.Start) + part.prefixLength
|
||||||
return sidx + prefixLength, eidx + prefixLength, part.trimLength
|
eidx := int32(res.End) + part.prefixLength
|
||||||
|
if pos != nil {
|
||||||
|
for idx := range *pos {
|
||||||
|
(*pos)[idx] += int(part.prefixLength)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Offset{sidx, eidx}, res.Score, int(part.trimLength), pos
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return -1, -1, -1 // math.MaxUint16
|
return Offset{-1, -1}, 0, -1, nil
|
||||||
}
|
}
|
||||||
|
@@ -5,8 +5,15 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/algo"
|
"github.com/junegunn/fzf/src/algo"
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var slab *util.Slab
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
slab = util.MakeSlab(slab16Size, slab32Size)
|
||||||
|
}
|
||||||
|
|
||||||
func TestParseTermsExtended(t *testing.T) {
|
func TestParseTermsExtended(t *testing.T) {
|
||||||
terms := parseTerms(true, CaseSmart,
|
terms := parseTerms(true, CaseSmart,
|
||||||
"| aaa 'bbb ^ccc ddd$ !eee !'fff !^ggg !hhh$ | ^iii$ ^xxx | 'yyy | | zzz$ | !ZZZ |")
|
"| aaa 'bbb ^ccc ddd$ !eee !'fff !^ggg !hhh$ | ^iii$ ^xxx | 'yyy | | zzz$ | !ZZZ |")
|
||||||
@@ -15,15 +22,15 @@ func TestParseTermsExtended(t *testing.T) {
|
|||||||
terms[1][0].typ != termExact || terms[1][0].inv ||
|
terms[1][0].typ != termExact || terms[1][0].inv ||
|
||||||
terms[2][0].typ != termPrefix || terms[2][0].inv ||
|
terms[2][0].typ != termPrefix || terms[2][0].inv ||
|
||||||
terms[3][0].typ != termSuffix || terms[3][0].inv ||
|
terms[3][0].typ != termSuffix || terms[3][0].inv ||
|
||||||
terms[4][0].typ != termFuzzy || !terms[4][0].inv ||
|
terms[4][0].typ != termExact || !terms[4][0].inv ||
|
||||||
terms[5][0].typ != termExact || !terms[5][0].inv ||
|
terms[5][0].typ != termFuzzy || !terms[5][0].inv ||
|
||||||
terms[6][0].typ != termPrefix || !terms[6][0].inv ||
|
terms[6][0].typ != termPrefix || !terms[6][0].inv ||
|
||||||
terms[7][0].typ != termSuffix || !terms[7][0].inv ||
|
terms[7][0].typ != termSuffix || !terms[7][0].inv ||
|
||||||
terms[7][1].typ != termEqual || terms[7][1].inv ||
|
terms[7][1].typ != termEqual || terms[7][1].inv ||
|
||||||
terms[8][0].typ != termPrefix || terms[8][0].inv ||
|
terms[8][0].typ != termPrefix || terms[8][0].inv ||
|
||||||
terms[8][1].typ != termExact || terms[8][1].inv ||
|
terms[8][1].typ != termExact || terms[8][1].inv ||
|
||||||
terms[8][2].typ != termSuffix || terms[8][2].inv ||
|
terms[8][2].typ != termSuffix || terms[8][2].inv ||
|
||||||
terms[8][3].typ != termFuzzy || !terms[8][3].inv {
|
terms[8][3].typ != termExact || !terms[8][3].inv {
|
||||||
t.Errorf("%s", terms)
|
t.Errorf("%s", terms)
|
||||||
}
|
}
|
||||||
for idx, termSet := range terms[:8] {
|
for idx, termSet := range terms[:8] {
|
||||||
@@ -68,25 +75,31 @@ func TestParseTermsEmpty(t *testing.T) {
|
|||||||
func TestExact(t *testing.T) {
|
func TestExact(t *testing.T) {
|
||||||
defer clearPatternCache()
|
defer clearPatternCache()
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pattern := BuildPattern(true, true, CaseSmart, true,
|
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, true, true,
|
||||||
[]Range{}, Delimiter{}, []rune("'abc"))
|
[]Range{}, Delimiter{}, []rune("'abc"))
|
||||||
sidx, eidx := algo.ExactMatchNaive(
|
res, pos := algo.ExactMatchNaive(
|
||||||
pattern.caseSensitive, pattern.forward, []rune("aabbcc abc"), pattern.termSets[0][0].text)
|
pattern.caseSensitive, pattern.forward, util.RunesToChars([]rune("aabbcc abc")), pattern.termSets[0][0].text, true, nil)
|
||||||
if sidx != 7 || eidx != 10 {
|
if res.Start != 7 || res.End != 10 {
|
||||||
t.Errorf("%s / %d / %d", pattern.termSets, sidx, eidx)
|
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
|
||||||
|
}
|
||||||
|
if pos != nil {
|
||||||
|
t.Errorf("pos is expected to be nil")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEqual(t *testing.T) {
|
func TestEqual(t *testing.T) {
|
||||||
defer clearPatternCache()
|
defer clearPatternCache()
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pattern := BuildPattern(true, true, CaseSmart, true, []Range{}, Delimiter{}, []rune("^AbC$"))
|
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("^AbC$"))
|
||||||
|
|
||||||
match := func(str string, sidxExpected int, eidxExpected int) {
|
match := func(str string, sidxExpected int, eidxExpected int) {
|
||||||
sidx, eidx := algo.EqualMatch(
|
res, pos := algo.EqualMatch(
|
||||||
pattern.caseSensitive, pattern.forward, []rune(str), pattern.termSets[0][0].text)
|
pattern.caseSensitive, pattern.forward, util.RunesToChars([]rune(str)), pattern.termSets[0][0].text, true, nil)
|
||||||
if sidx != sidxExpected || eidx != eidxExpected {
|
if res.Start != sidxExpected || res.End != eidxExpected {
|
||||||
t.Errorf("%s / %d / %d", pattern.termSets, sidx, eidx)
|
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
|
||||||
|
}
|
||||||
|
if pos != nil {
|
||||||
|
t.Errorf("pos is expected to be nil")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
match("ABC", -1, -1)
|
match("ABC", -1, -1)
|
||||||
@@ -96,17 +109,17 @@ func TestEqual(t *testing.T) {
|
|||||||
func TestCaseSensitivity(t *testing.T) {
|
func TestCaseSensitivity(t *testing.T) {
|
||||||
defer clearPatternCache()
|
defer clearPatternCache()
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pat1 := BuildPattern(true, false, CaseSmart, true, []Range{}, Delimiter{}, []rune("abc"))
|
pat1 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("abc"))
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pat2 := BuildPattern(true, false, CaseSmart, true, []Range{}, Delimiter{}, []rune("Abc"))
|
pat2 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("Abc"))
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pat3 := BuildPattern(true, false, CaseIgnore, true, []Range{}, Delimiter{}, []rune("abc"))
|
pat3 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseIgnore, true, true, []Range{}, Delimiter{}, []rune("abc"))
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pat4 := BuildPattern(true, false, CaseIgnore, true, []Range{}, Delimiter{}, []rune("Abc"))
|
pat4 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseIgnore, true, true, []Range{}, Delimiter{}, []rune("Abc"))
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pat5 := BuildPattern(true, false, CaseRespect, true, []Range{}, Delimiter{}, []rune("abc"))
|
pat5 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseRespect, true, true, []Range{}, Delimiter{}, []rune("abc"))
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pat6 := BuildPattern(true, false, CaseRespect, true, []Range{}, Delimiter{}, []rune("Abc"))
|
pat6 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseRespect, true, true, []Range{}, Delimiter{}, []rune("Abc"))
|
||||||
|
|
||||||
if string(pat1.text) != "abc" || pat1.caseSensitive != false ||
|
if string(pat1.text) != "abc" || pat1.caseSensitive != false ||
|
||||||
string(pat2.text) != "Abc" || pat2.caseSensitive != true ||
|
string(pat2.text) != "Abc" || pat2.caseSensitive != true ||
|
||||||
@@ -119,31 +132,42 @@ func TestCaseSensitivity(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestOrigTextAndTransformed(t *testing.T) {
|
func TestOrigTextAndTransformed(t *testing.T) {
|
||||||
pattern := BuildPattern(true, true, CaseSmart, true, []Range{}, Delimiter{}, []rune("jg"))
|
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("jg"))
|
||||||
tokens := Tokenize([]rune("junegunn"), Delimiter{})
|
tokens := Tokenize(util.RunesToChars([]rune("junegunn")), Delimiter{})
|
||||||
trans := Transform(tokens, []Range{Range{1, 1}})
|
trans := Transform(tokens, []Range{Range{1, 1}})
|
||||||
|
|
||||||
origRunes := []rune("junegunn.choi")
|
origBytes := []byte("junegunn.choi")
|
||||||
for _, extended := range []bool{false, true} {
|
for _, extended := range []bool{false, true} {
|
||||||
chunk := Chunk{
|
chunk := Chunk{
|
||||||
&Item{
|
&Item{
|
||||||
text: []rune("junegunn"),
|
text: util.RunesToChars([]rune("junegunn")),
|
||||||
origText: &origRunes,
|
origText: &origBytes,
|
||||||
transformed: trans},
|
transformed: trans},
|
||||||
}
|
}
|
||||||
pattern.extended = extended
|
pattern.extended = extended
|
||||||
matches := pattern.matchChunk(&chunk)
|
matches := pattern.matchChunk(&chunk, nil, slab) // No cache
|
||||||
if string(matches[0].text) != "junegunn" || string(*matches[0].origText) != "junegunn.choi" ||
|
if !(matches[0].item.text.ToString() == "junegunn" &&
|
||||||
matches[0].offsets[0][0] != 0 || matches[0].offsets[0][1] != 5 ||
|
string(*matches[0].item.origText) == "junegunn.choi" &&
|
||||||
!reflect.DeepEqual(matches[0].transformed, trans) {
|
reflect.DeepEqual(matches[0].item.transformed, trans)) {
|
||||||
t.Error("Invalid match result", matches)
|
t.Error("Invalid match result", matches)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
match, offsets, pos := pattern.MatchItem(chunk[0], true, slab)
|
||||||
|
if !(match.item.text.ToString() == "junegunn" &&
|
||||||
|
string(*match.item.origText) == "junegunn.choi" &&
|
||||||
|
offsets[0][0] == 0 && offsets[0][1] == 5 &&
|
||||||
|
reflect.DeepEqual(match.item.transformed, trans)) {
|
||||||
|
t.Error("Invalid match result", match, offsets, extended)
|
||||||
|
}
|
||||||
|
if !((*pos)[0] == 4 && (*pos)[1] == 0) {
|
||||||
|
t.Error("Invalid pos array", *pos)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCacheKey(t *testing.T) {
|
func TestCacheKey(t *testing.T) {
|
||||||
test := func(extended bool, patStr string, expected string, cacheable bool) {
|
test := func(extended bool, patStr string, expected string, cacheable bool) {
|
||||||
pat := BuildPattern(true, extended, CaseSmart, true, []Range{}, Delimiter{}, []rune(patStr))
|
pat := BuildPattern(true, algo.FuzzyMatchV2, extended, CaseSmart, true, true, []Range{}, Delimiter{}, []rune(patStr))
|
||||||
if pat.CacheKey() != expected {
|
if pat.CacheKey() != expected {
|
||||||
t.Errorf("Expected: %s, actual: %s", expected, pat.CacheKey())
|
t.Errorf("Expected: %s, actual: %s", expected, pat.CacheKey())
|
||||||
}
|
}
|
||||||
|
@@ -34,7 +34,7 @@ func (r *Reader) feed(src io.Reader) {
|
|||||||
if r.delimNil {
|
if r.delimNil {
|
||||||
delim = '\000'
|
delim = '\000'
|
||||||
}
|
}
|
||||||
reader := bufio.NewReader(src)
|
reader := bufio.NewReaderSize(src, readerBufferSize)
|
||||||
for {
|
for {
|
||||||
// ReadBytes returns err != nil if and only if the returned data does not
|
// ReadBytes returns err != nil if and only if the returned data does not
|
||||||
// end in delim.
|
// end in delim.
|
||||||
|
241
src/result.go
Normal file
241
src/result.go
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
package fzf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/curses"
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Offset holds two 32-bit integers denoting the offsets of a matched substring
|
||||||
|
type Offset [2]int32
|
||||||
|
|
||||||
|
type colorOffset struct {
|
||||||
|
offset [2]int32
|
||||||
|
color int
|
||||||
|
attr curses.Attr
|
||||||
|
index int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type rank struct {
|
||||||
|
points [4]uint16
|
||||||
|
index int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type Result struct {
|
||||||
|
item *Item
|
||||||
|
rank rank
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildResult(item *Item, offsets []Offset, score int, trimLen int) *Result {
|
||||||
|
if len(offsets) > 1 {
|
||||||
|
sort.Sort(ByOrder(offsets))
|
||||||
|
}
|
||||||
|
|
||||||
|
result := Result{item: item, rank: rank{index: item.index}}
|
||||||
|
numChars := item.text.Length()
|
||||||
|
minBegin := math.MaxUint16
|
||||||
|
maxEnd := 0
|
||||||
|
validOffsetFound := false
|
||||||
|
for _, offset := range offsets {
|
||||||
|
b, e := int(offset[0]), int(offset[1])
|
||||||
|
if b < e {
|
||||||
|
minBegin = util.Min(b, minBegin)
|
||||||
|
maxEnd = util.Max(e, maxEnd)
|
||||||
|
validOffsetFound = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, criterion := range sortCriteria {
|
||||||
|
val := uint16(math.MaxUint16)
|
||||||
|
switch criterion {
|
||||||
|
case byScore:
|
||||||
|
// Higher is better
|
||||||
|
val = math.MaxUint16 - util.AsUint16(score)
|
||||||
|
case byLength:
|
||||||
|
// If offsets is empty, trimLen will be 0, but we don't care
|
||||||
|
val = util.AsUint16(trimLen)
|
||||||
|
case byBegin:
|
||||||
|
if validOffsetFound {
|
||||||
|
whitePrefixLen := 0
|
||||||
|
for idx := 0; idx < numChars; idx++ {
|
||||||
|
r := item.text.Get(idx)
|
||||||
|
whitePrefixLen = idx
|
||||||
|
if idx == minBegin || !unicode.IsSpace(r) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
val = util.AsUint16(minBegin - whitePrefixLen)
|
||||||
|
}
|
||||||
|
case byEnd:
|
||||||
|
if validOffsetFound {
|
||||||
|
val = util.AsUint16(1 + numChars - maxEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result.rank.points[idx] = val
|
||||||
|
}
|
||||||
|
|
||||||
|
return &result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort criteria to use. Never changes once fzf is started.
|
||||||
|
var sortCriteria []criterion
|
||||||
|
|
||||||
|
// Index returns ordinal index of the Item
|
||||||
|
func (result *Result) Index() int32 {
|
||||||
|
return result.item.index
|
||||||
|
}
|
||||||
|
|
||||||
|
func minRank() rank {
|
||||||
|
return rank{index: 0, points: [4]uint16{math.MaxUint16, 0, 0, 0}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (result *Result) colorOffsets(matchOffsets []Offset, color int, attr curses.Attr, current bool) []colorOffset {
|
||||||
|
itemColors := result.item.Colors()
|
||||||
|
|
||||||
|
if len(itemColors) == 0 {
|
||||||
|
var offsets []colorOffset
|
||||||
|
for _, off := range matchOffsets {
|
||||||
|
|
||||||
|
offsets = append(offsets, colorOffset{offset: [2]int32{off[0], off[1]}, color: color, attr: attr})
|
||||||
|
}
|
||||||
|
return offsets
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find max column
|
||||||
|
var maxCol int32
|
||||||
|
for _, off := range matchOffsets {
|
||||||
|
if off[1] > maxCol {
|
||||||
|
maxCol = off[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, ansi := range itemColors {
|
||||||
|
if ansi.offset[1] > maxCol {
|
||||||
|
maxCol = ansi.offset[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cols := make([]int, maxCol)
|
||||||
|
|
||||||
|
for colorIndex, ansi := range itemColors {
|
||||||
|
for i := ansi.offset[0]; i < ansi.offset[1]; i++ {
|
||||||
|
cols[i] = colorIndex + 1 // XXX
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, off := range matchOffsets {
|
||||||
|
for i := off[0]; i < off[1]; i++ {
|
||||||
|
cols[i] = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sort.Sort(ByOrder(offsets))
|
||||||
|
|
||||||
|
// Merge offsets
|
||||||
|
// ------------ ---- -- ----
|
||||||
|
// ++++++++ ++++++++++
|
||||||
|
// --++++++++-- --++++++++++---
|
||||||
|
curr := 0
|
||||||
|
start := 0
|
||||||
|
var colors []colorOffset
|
||||||
|
add := func(idx int) {
|
||||||
|
if curr != 0 && idx > start {
|
||||||
|
if curr == -1 {
|
||||||
|
colors = append(colors, colorOffset{
|
||||||
|
offset: [2]int32{int32(start), int32(idx)}, color: color, attr: attr})
|
||||||
|
} else {
|
||||||
|
ansi := itemColors[curr-1]
|
||||||
|
fg := ansi.color.fg
|
||||||
|
if fg == -1 {
|
||||||
|
if current {
|
||||||
|
fg = curses.CurrentFG
|
||||||
|
} else {
|
||||||
|
fg = curses.FG
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bg := ansi.color.bg
|
||||||
|
if bg == -1 {
|
||||||
|
if current {
|
||||||
|
bg = curses.DarkBG
|
||||||
|
} else {
|
||||||
|
bg = curses.BG
|
||||||
|
}
|
||||||
|
}
|
||||||
|
colors = append(colors, colorOffset{
|
||||||
|
offset: [2]int32{int32(start), int32(idx)},
|
||||||
|
color: curses.PairFor(fg, bg),
|
||||||
|
attr: ansi.color.attr | attr})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for idx, col := range cols {
|
||||||
|
if col != curr {
|
||||||
|
add(idx)
|
||||||
|
start = idx
|
||||||
|
curr = col
|
||||||
|
}
|
||||||
|
}
|
||||||
|
add(int(maxCol))
|
||||||
|
return colors
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByOrder is for sorting substring offsets
|
||||||
|
type ByOrder []Offset
|
||||||
|
|
||||||
|
func (a ByOrder) Len() int {
|
||||||
|
return len(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a ByOrder) Swap(i, j int) {
|
||||||
|
a[i], a[j] = a[j], a[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a ByOrder) Less(i, j int) bool {
|
||||||
|
ioff := a[i]
|
||||||
|
joff := a[j]
|
||||||
|
return (ioff[0] < joff[0]) || (ioff[0] == joff[0]) && (ioff[1] <= joff[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRelevance is for sorting Items
|
||||||
|
type ByRelevance []*Result
|
||||||
|
|
||||||
|
func (a ByRelevance) Len() int {
|
||||||
|
return len(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a ByRelevance) Swap(i, j int) {
|
||||||
|
a[i], a[j] = a[j], a[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a ByRelevance) Less(i, j int) bool {
|
||||||
|
return compareRanks((*a[i]).rank, (*a[j]).rank, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRelevanceTac is for sorting Items
|
||||||
|
type ByRelevanceTac []*Result
|
||||||
|
|
||||||
|
func (a ByRelevanceTac) Len() int {
|
||||||
|
return len(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a ByRelevanceTac) Swap(i, j int) {
|
||||||
|
a[i], a[j] = a[j], a[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a ByRelevanceTac) Less(i, j int) bool {
|
||||||
|
return compareRanks((*a[i]).rank, (*a[j]).rank, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareRanks(irank rank, jrank rank, tac bool) bool {
|
||||||
|
for idx := 0; idx < 4; idx++ {
|
||||||
|
left := irank.points[idx]
|
||||||
|
right := jrank.points[idx]
|
||||||
|
if left < right {
|
||||||
|
return true
|
||||||
|
} else if left > right {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return (irank.index <= jrank.index) != tac
|
||||||
|
}
|
123
src/result_test.go
Normal file
123
src/result_test.go
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
package fzf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/curses"
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOffsetSort(t *testing.T) {
|
||||||
|
offsets := []Offset{
|
||||||
|
Offset{3, 5}, Offset{2, 7},
|
||||||
|
Offset{1, 3}, Offset{2, 9}}
|
||||||
|
sort.Sort(ByOrder(offsets))
|
||||||
|
|
||||||
|
if offsets[0][0] != 1 || offsets[0][1] != 3 ||
|
||||||
|
offsets[1][0] != 2 || offsets[1][1] != 7 ||
|
||||||
|
offsets[2][0] != 2 || offsets[2][1] != 9 ||
|
||||||
|
offsets[3][0] != 3 || offsets[3][1] != 5 {
|
||||||
|
t.Error("Invalid order:", offsets)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRankComparison(t *testing.T) {
|
||||||
|
rank := func(vals ...uint16) rank {
|
||||||
|
return rank{
|
||||||
|
points: [4]uint16{vals[0], vals[1], vals[2], vals[3]},
|
||||||
|
index: int32(vals[4])}
|
||||||
|
}
|
||||||
|
if compareRanks(rank(3, 0, 0, 0, 5), rank(2, 0, 0, 0, 7), false) ||
|
||||||
|
!compareRanks(rank(3, 0, 0, 0, 5), rank(3, 0, 0, 0, 6), false) ||
|
||||||
|
!compareRanks(rank(1, 2, 0, 0, 3), rank(1, 3, 0, 0, 2), false) ||
|
||||||
|
!compareRanks(rank(0, 0, 0, 0, 0), rank(0, 0, 0, 0, 0), false) {
|
||||||
|
t.Error("Invalid order")
|
||||||
|
}
|
||||||
|
|
||||||
|
if compareRanks(rank(3, 0, 0, 0, 5), rank(2, 0, 0, 0, 7), true) ||
|
||||||
|
!compareRanks(rank(3, 0, 0, 0, 5), rank(3, 0, 0, 0, 6), false) ||
|
||||||
|
!compareRanks(rank(1, 2, 0, 0, 3), rank(1, 3, 0, 0, 2), true) ||
|
||||||
|
!compareRanks(rank(0, 0, 0, 0, 0), rank(0, 0, 0, 0, 0), false) {
|
||||||
|
t.Error("Invalid order (tac)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match length, string length, index
|
||||||
|
func TestResultRank(t *testing.T) {
|
||||||
|
// FIXME global
|
||||||
|
sortCriteria = []criterion{byScore, byLength}
|
||||||
|
|
||||||
|
strs := [][]rune{[]rune("foo"), []rune("foobar"), []rune("bar"), []rune("baz")}
|
||||||
|
item1 := buildResult(&Item{text: util.RunesToChars(strs[0]), index: 1}, []Offset{}, 2, 3)
|
||||||
|
if item1.rank.points[0] != math.MaxUint16-2 || // Bonus
|
||||||
|
item1.rank.points[1] != 3 || // Length
|
||||||
|
item1.rank.points[2] != 0 || // Unused
|
||||||
|
item1.rank.points[3] != 0 || // Unused
|
||||||
|
item1.item.index != 1 {
|
||||||
|
t.Error(item1.rank)
|
||||||
|
}
|
||||||
|
// Only differ in index
|
||||||
|
item2 := buildResult(&Item{text: util.RunesToChars(strs[0])}, []Offset{}, 2, 3)
|
||||||
|
|
||||||
|
items := []*Result{item1, item2}
|
||||||
|
sort.Sort(ByRelevance(items))
|
||||||
|
if items[0] != item2 || items[1] != item1 {
|
||||||
|
t.Error(items)
|
||||||
|
}
|
||||||
|
|
||||||
|
items = []*Result{item2, item1, item1, item2}
|
||||||
|
sort.Sort(ByRelevance(items))
|
||||||
|
if items[0] != item2 || items[1] != item2 ||
|
||||||
|
items[2] != item1 || items[3] != item1 {
|
||||||
|
t.Error(items, item1, item1.item.index, item2, item2.item.index)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by relevance
|
||||||
|
item3 := buildResult(&Item{index: 2}, []Offset{Offset{1, 3}, Offset{5, 7}}, 3, 0)
|
||||||
|
item4 := buildResult(&Item{index: 2}, []Offset{Offset{1, 2}, Offset{6, 7}}, 4, 0)
|
||||||
|
item5 := buildResult(&Item{index: 2}, []Offset{Offset{1, 3}, Offset{5, 7}}, 5, 0)
|
||||||
|
item6 := buildResult(&Item{index: 2}, []Offset{Offset{1, 2}, Offset{6, 7}}, 6, 0)
|
||||||
|
items = []*Result{item1, item2, item3, item4, item5, item6}
|
||||||
|
sort.Sort(ByRelevance(items))
|
||||||
|
if !(items[0] == item6 && items[1] == item5 &&
|
||||||
|
items[2] == item4 && items[3] == item3 &&
|
||||||
|
items[4] == item2 && items[5] == item1) {
|
||||||
|
t.Error(items, item1, item2, item3, item4, item5, item6)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestColorOffset(t *testing.T) {
|
||||||
|
// ------------ 20 ---- -- ----
|
||||||
|
// ++++++++ ++++++++++
|
||||||
|
// --++++++++-- --++++++++++---
|
||||||
|
|
||||||
|
offsets := []Offset{Offset{5, 15}, Offset{25, 35}}
|
||||||
|
item := Result{
|
||||||
|
item: &Item{
|
||||||
|
colors: &[]ansiOffset{
|
||||||
|
ansiOffset{[2]int32{0, 20}, ansiState{1, 5, 0}},
|
||||||
|
ansiOffset{[2]int32{22, 27}, ansiState{2, 6, curses.Bold}},
|
||||||
|
ansiOffset{[2]int32{30, 32}, ansiState{3, 7, 0}},
|
||||||
|
ansiOffset{[2]int32{33, 40}, ansiState{4, 8, curses.Bold}}}}}
|
||||||
|
// [{[0 5] 9 false} {[5 15] 99 false} {[15 20] 9 false} {[22 25] 10 true} {[25 35] 99 false} {[35 40] 11 true}]
|
||||||
|
|
||||||
|
colors := item.colorOffsets(offsets, 99, 0, true)
|
||||||
|
assert := func(idx int, b int32, e int32, c int, bold bool) {
|
||||||
|
var attr curses.Attr
|
||||||
|
if bold {
|
||||||
|
attr = curses.Bold
|
||||||
|
}
|
||||||
|
o := colors[idx]
|
||||||
|
if o.offset[0] != b || o.offset[1] != e || o.color != c || o.attr != attr {
|
||||||
|
t.Error(o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert(0, 0, 5, curses.ColUser, false)
|
||||||
|
assert(1, 5, 15, 99, false)
|
||||||
|
assert(2, 15, 20, curses.ColUser, false)
|
||||||
|
assert(3, 22, 25, curses.ColUser+1, true)
|
||||||
|
assert(4, 25, 35, 99, false)
|
||||||
|
assert(5, 35, 40, curses.ColUser+2, true)
|
||||||
|
}
|
611
src/terminal.go
611
src/terminal.go
File diff suppressed because it is too large
Load Diff
73
src/terminal_test.go
Normal file
73
src/terminal_test.go
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
package fzf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newItem(str string) *Item {
|
||||||
|
bytes := []byte(str)
|
||||||
|
trimmed, _, _ := extractColor(str, nil, nil)
|
||||||
|
return &Item{origText: &bytes, text: util.RunesToChars([]rune(trimmed))}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplacePlaceholder(t *testing.T) {
|
||||||
|
items1 := []*Item{newItem(" foo'bar \x1b[31mbaz\x1b[m")}
|
||||||
|
items2 := []*Item{
|
||||||
|
newItem("foo'bar \x1b[31mbaz\x1b[m"),
|
||||||
|
newItem("FOO'BAR \x1b[31mBAZ\x1b[m")}
|
||||||
|
|
||||||
|
var result string
|
||||||
|
check := func(expected string) {
|
||||||
|
if result != expected {
|
||||||
|
t.Errorf("expected: %s, actual: %s", expected, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// {}, preserve ansi
|
||||||
|
result = replacePlaceholder("echo {}", false, Delimiter{}, "query", items1)
|
||||||
|
check("echo ' foo'\\''bar \x1b[31mbaz\x1b[m'")
|
||||||
|
|
||||||
|
// {}, strip ansi
|
||||||
|
result = replacePlaceholder("echo {}", true, Delimiter{}, "query", items1)
|
||||||
|
check("echo ' foo'\\''bar baz'")
|
||||||
|
|
||||||
|
// {}, with multiple items
|
||||||
|
result = replacePlaceholder("echo {}", true, Delimiter{}, "query", items2)
|
||||||
|
check("echo 'foo'\\''bar baz' 'FOO'\\''BAR BAZ'")
|
||||||
|
|
||||||
|
// {..}, strip leading whitespaces, preserve ansi
|
||||||
|
result = replacePlaceholder("echo {..}", false, Delimiter{}, "query", items1)
|
||||||
|
check("echo 'foo'\\''bar \x1b[31mbaz\x1b[m'")
|
||||||
|
|
||||||
|
// {..}, strip leading whitespaces, strip ansi
|
||||||
|
result = replacePlaceholder("echo {..}", true, Delimiter{}, "query", items1)
|
||||||
|
check("echo 'foo'\\''bar baz'")
|
||||||
|
|
||||||
|
// {q}
|
||||||
|
result = replacePlaceholder("echo {} {q}", true, Delimiter{}, "query", items1)
|
||||||
|
check("echo ' foo'\\''bar baz' 'query'")
|
||||||
|
|
||||||
|
// {q}, multiple items
|
||||||
|
result = replacePlaceholder("echo {}{q}{}", true, Delimiter{}, "query 'string'", items2)
|
||||||
|
check("echo 'foo'\\''bar baz' 'FOO'\\''BAR BAZ''query '\\''string'\\''''foo'\\''bar baz' 'FOO'\\''BAR BAZ'")
|
||||||
|
|
||||||
|
result = replacePlaceholder("echo {1}/{2}/{2,1}/{-1}/{-2}/{}/{..}/{n.t}/\\{}/\\{1}/\\{q}/{3}", true, Delimiter{}, "query", items1)
|
||||||
|
check("echo 'foo'\\''bar'/'baz'/'bazfoo'\\''bar'/'baz'/'foo'\\''bar'/' foo'\\''bar baz'/'foo'\\''bar baz'/{n.t}/{}/{1}/{q}/''")
|
||||||
|
|
||||||
|
result = replacePlaceholder("echo {1}/{2}/{-1}/{-2}/{..}/{n.t}/\\{}/\\{1}/\\{q}/{3}", true, Delimiter{}, "query", items2)
|
||||||
|
check("echo 'foo'\\''bar' 'FOO'\\''BAR'/'baz' 'BAZ'/'baz' 'BAZ'/'foo'\\''bar' 'FOO'\\''BAR'/'foo'\\''bar baz' 'FOO'\\''BAR BAZ'/{n.t}/{}/{1}/{q}/'' ''")
|
||||||
|
|
||||||
|
// String delimiter
|
||||||
|
delim := "'"
|
||||||
|
result = replacePlaceholder("echo {}/{1}/{2}", true, Delimiter{str: &delim}, "query", items1)
|
||||||
|
check("echo ' foo'\\''bar baz'/'foo'/'bar baz'")
|
||||||
|
|
||||||
|
// Regex delimiter
|
||||||
|
regex := regexp.MustCompile("[oa]+")
|
||||||
|
// foo'bar baz
|
||||||
|
result = replacePlaceholder("echo {}/{1}/{3}/{2..3}", true, Delimiter{regex: regex}, "query", items1)
|
||||||
|
check("echo ' foo'\\''bar baz'/'f'/'r b'/''\\''bar b'")
|
||||||
|
}
|
102
src/tokenizer.go
102
src/tokenizer.go
@@ -18,9 +18,9 @@ type Range struct {
|
|||||||
|
|
||||||
// Token contains the tokenized part of the strings and its prefix length
|
// Token contains the tokenized part of the strings and its prefix length
|
||||||
type Token struct {
|
type Token struct {
|
||||||
text []rune
|
text *util.Chars
|
||||||
prefixLength int
|
prefixLength int32
|
||||||
trimLength int
|
trimLength int32
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delimiter for tokenizing the input
|
// Delimiter for tokenizing the input
|
||||||
@@ -75,15 +75,14 @@ func ParseRange(str *string) (Range, bool) {
|
|||||||
return newRange(n, n), true
|
return newRange(n, n), true
|
||||||
}
|
}
|
||||||
|
|
||||||
func withPrefixLengths(tokens [][]rune, begin int) []Token {
|
func withPrefixLengths(tokens []util.Chars, begin int) []Token {
|
||||||
ret := make([]Token, len(tokens))
|
ret := make([]Token, len(tokens))
|
||||||
|
|
||||||
prefixLength := begin
|
prefixLength := begin
|
||||||
for idx, token := range tokens {
|
for idx, token := range tokens {
|
||||||
// Need to define a new local variable instead of the reused token to take
|
// NOTE: &tokens[idx] instead of &tokens
|
||||||
// the pointer to it
|
ret[idx] = Token{&tokens[idx], int32(prefixLength), int32(token.TrimLength())}
|
||||||
ret[idx] = Token{token, prefixLength, util.TrimLen(token)}
|
prefixLength += token.Length()
|
||||||
prefixLength += len(token)
|
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
@@ -94,59 +93,60 @@ const (
|
|||||||
awkWhite
|
awkWhite
|
||||||
)
|
)
|
||||||
|
|
||||||
func awkTokenizer(input []rune) ([][]rune, int) {
|
func awkTokenizer(input util.Chars) ([]util.Chars, int) {
|
||||||
// 9, 32
|
// 9, 32
|
||||||
ret := [][]rune{}
|
ret := []util.Chars{}
|
||||||
str := []rune{}
|
|
||||||
prefixLength := 0
|
prefixLength := 0
|
||||||
state := awkNil
|
state := awkNil
|
||||||
for _, r := range input {
|
numChars := input.Length()
|
||||||
|
begin := 0
|
||||||
|
end := 0
|
||||||
|
for idx := 0; idx < numChars; idx++ {
|
||||||
|
r := input.Get(idx)
|
||||||
white := r == 9 || r == 32
|
white := r == 9 || r == 32
|
||||||
switch state {
|
switch state {
|
||||||
case awkNil:
|
case awkNil:
|
||||||
if white {
|
if white {
|
||||||
prefixLength++
|
prefixLength++
|
||||||
} else {
|
} else {
|
||||||
state = awkBlack
|
state, begin, end = awkBlack, idx, idx+1
|
||||||
str = append(str, r)
|
|
||||||
}
|
}
|
||||||
case awkBlack:
|
case awkBlack:
|
||||||
str = append(str, r)
|
end = idx + 1
|
||||||
if white {
|
if white {
|
||||||
state = awkWhite
|
state = awkWhite
|
||||||
}
|
}
|
||||||
case awkWhite:
|
case awkWhite:
|
||||||
if white {
|
if white {
|
||||||
str = append(str, r)
|
end = idx + 1
|
||||||
} else {
|
} else {
|
||||||
ret = append(ret, str)
|
ret = append(ret, input.Slice(begin, end))
|
||||||
state = awkBlack
|
state, begin, end = awkBlack, idx, idx+1
|
||||||
str = []rune{r}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(str) > 0 {
|
if begin < end {
|
||||||
ret = append(ret, str)
|
ret = append(ret, input.Slice(begin, end))
|
||||||
}
|
}
|
||||||
return ret, prefixLength
|
return ret, prefixLength
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tokenize tokenizes the given string with the delimiter
|
// Tokenize tokenizes the given string with the delimiter
|
||||||
func Tokenize(runes []rune, delimiter Delimiter) []Token {
|
func Tokenize(text util.Chars, delimiter Delimiter) []Token {
|
||||||
if delimiter.str == nil && delimiter.regex == nil {
|
if delimiter.str == nil && delimiter.regex == nil {
|
||||||
// AWK-style (\S+\s*)
|
// AWK-style (\S+\s*)
|
||||||
tokens, prefixLength := awkTokenizer(runes)
|
tokens, prefixLength := awkTokenizer(text)
|
||||||
return withPrefixLengths(tokens, prefixLength)
|
return withPrefixLengths(tokens, prefixLength)
|
||||||
}
|
}
|
||||||
|
|
||||||
var tokens []string
|
|
||||||
if delimiter.str != nil {
|
if delimiter.str != nil {
|
||||||
tokens = strings.Split(string(runes), *delimiter.str)
|
return withPrefixLengths(text.Split(*delimiter.str), 0)
|
||||||
for i := 0; i < len(tokens)-1; i++ {
|
}
|
||||||
tokens[i] = tokens[i] + *delimiter.str
|
|
||||||
}
|
// FIXME performance
|
||||||
} else if delimiter.regex != nil {
|
var tokens []string
|
||||||
str := string(runes)
|
if delimiter.regex != nil {
|
||||||
|
str := text.ToString()
|
||||||
for len(str) > 0 {
|
for len(str) > 0 {
|
||||||
loc := delimiter.regex.FindStringIndex(str)
|
loc := delimiter.regex.FindStringIndex(str)
|
||||||
if loc == nil {
|
if loc == nil {
|
||||||
@@ -157,9 +157,9 @@ func Tokenize(runes []rune, delimiter Delimiter) []Token {
|
|||||||
str = str[last:]
|
str = str[last:]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
asRunes := make([][]rune, len(tokens))
|
asRunes := make([]util.Chars, len(tokens))
|
||||||
for i, token := range tokens {
|
for i, token := range tokens {
|
||||||
asRunes[i] = []rune(token)
|
asRunes[i] = util.RunesToChars([]rune(token))
|
||||||
}
|
}
|
||||||
return withPrefixLengths(asRunes, 0)
|
return withPrefixLengths(asRunes, 0)
|
||||||
}
|
}
|
||||||
@@ -167,15 +167,7 @@ func Tokenize(runes []rune, delimiter Delimiter) []Token {
|
|||||||
func joinTokens(tokens []Token) []rune {
|
func joinTokens(tokens []Token) []rune {
|
||||||
ret := []rune{}
|
ret := []rune{}
|
||||||
for _, token := range tokens {
|
for _, token := range tokens {
|
||||||
ret = append(ret, token.text...)
|
ret = append(ret, token.text.ToRunes()...)
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func joinTokensAsRunes(tokens []Token) []rune {
|
|
||||||
ret := []rune{}
|
|
||||||
for _, token := range tokens {
|
|
||||||
ret = append(ret, token.text...)
|
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
@@ -185,19 +177,20 @@ func Transform(tokens []Token, withNth []Range) []Token {
|
|||||||
transTokens := make([]Token, len(withNth))
|
transTokens := make([]Token, len(withNth))
|
||||||
numTokens := len(tokens)
|
numTokens := len(tokens)
|
||||||
for idx, r := range withNth {
|
for idx, r := range withNth {
|
||||||
part := []rune{}
|
parts := []*util.Chars{}
|
||||||
minIdx := 0
|
minIdx := 0
|
||||||
if r.begin == r.end {
|
if r.begin == r.end {
|
||||||
idx := r.begin
|
idx := r.begin
|
||||||
if idx == rangeEllipsis {
|
if idx == rangeEllipsis {
|
||||||
part = append(part, joinTokensAsRunes(tokens)...)
|
chars := util.RunesToChars(joinTokens(tokens))
|
||||||
|
parts = append(parts, &chars)
|
||||||
} else {
|
} else {
|
||||||
if idx < 0 {
|
if idx < 0 {
|
||||||
idx += numTokens + 1
|
idx += numTokens + 1
|
||||||
}
|
}
|
||||||
if idx >= 1 && idx <= numTokens {
|
if idx >= 1 && idx <= numTokens {
|
||||||
minIdx = idx - 1
|
minIdx = idx - 1
|
||||||
part = append(part, tokens[idx-1].text...)
|
parts = append(parts, tokens[idx-1].text)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -224,17 +217,32 @@ func Transform(tokens []Token, withNth []Range) []Token {
|
|||||||
minIdx = util.Max(0, begin-1)
|
minIdx = util.Max(0, begin-1)
|
||||||
for idx := begin; idx <= end; idx++ {
|
for idx := begin; idx <= end; idx++ {
|
||||||
if idx >= 1 && idx <= numTokens {
|
if idx >= 1 && idx <= numTokens {
|
||||||
part = append(part, tokens[idx-1].text...)
|
parts = append(parts, tokens[idx-1].text)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var prefixLength int
|
// Merge multiple parts
|
||||||
|
var merged util.Chars
|
||||||
|
switch len(parts) {
|
||||||
|
case 0:
|
||||||
|
merged = util.RunesToChars([]rune{})
|
||||||
|
case 1:
|
||||||
|
merged = *parts[0]
|
||||||
|
default:
|
||||||
|
runes := []rune{}
|
||||||
|
for _, part := range parts {
|
||||||
|
runes = append(runes, part.ToRunes()...)
|
||||||
|
}
|
||||||
|
merged = util.RunesToChars(runes)
|
||||||
|
}
|
||||||
|
|
||||||
|
var prefixLength int32
|
||||||
if minIdx < numTokens {
|
if minIdx < numTokens {
|
||||||
prefixLength = tokens[minIdx].prefixLength
|
prefixLength = tokens[minIdx].prefixLength
|
||||||
} else {
|
} else {
|
||||||
prefixLength = 0
|
prefixLength = 0
|
||||||
}
|
}
|
||||||
transTokens[idx] = Token{part, prefixLength, util.TrimLen(part)}
|
transTokens[idx] = Token{&merged, prefixLength, int32(merged.TrimLength())}
|
||||||
}
|
}
|
||||||
return transTokens
|
return transTokens
|
||||||
}
|
}
|
||||||
|
@@ -1,6 +1,10 @@
|
|||||||
package fzf
|
package fzf
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
|
)
|
||||||
|
|
||||||
func TestParseRange(t *testing.T) {
|
func TestParseRange(t *testing.T) {
|
||||||
{
|
{
|
||||||
@@ -43,23 +47,23 @@ func TestParseRange(t *testing.T) {
|
|||||||
func TestTokenize(t *testing.T) {
|
func TestTokenize(t *testing.T) {
|
||||||
// AWK-style
|
// AWK-style
|
||||||
input := " abc: def: ghi "
|
input := " abc: def: ghi "
|
||||||
tokens := Tokenize([]rune(input), Delimiter{})
|
tokens := Tokenize(util.RunesToChars([]rune(input)), Delimiter{})
|
||||||
if string(tokens[0].text) != "abc: " || tokens[0].prefixLength != 2 || tokens[0].trimLength != 4 {
|
if tokens[0].text.ToString() != "abc: " || tokens[0].prefixLength != 2 || tokens[0].trimLength != 4 {
|
||||||
t.Errorf("%s", tokens)
|
t.Errorf("%s", tokens)
|
||||||
}
|
}
|
||||||
|
|
||||||
// With delimiter
|
// With delimiter
|
||||||
tokens = Tokenize([]rune(input), delimiterRegexp(":"))
|
tokens = Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp(":"))
|
||||||
if string(tokens[0].text) != " abc:" || tokens[0].prefixLength != 0 || tokens[0].trimLength != 4 {
|
if tokens[0].text.ToString() != " abc:" || tokens[0].prefixLength != 0 || tokens[0].trimLength != 4 {
|
||||||
t.Errorf("%s", tokens)
|
t.Errorf("%s", tokens)
|
||||||
}
|
}
|
||||||
|
|
||||||
// With delimiter regex
|
// With delimiter regex
|
||||||
tokens = Tokenize([]rune(input), delimiterRegexp("\\s+"))
|
tokens = Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp("\\s+"))
|
||||||
if string(tokens[0].text) != " " || tokens[0].prefixLength != 0 || tokens[0].trimLength != 0 ||
|
if tokens[0].text.ToString() != " " || tokens[0].prefixLength != 0 || tokens[0].trimLength != 0 ||
|
||||||
string(tokens[1].text) != "abc: " || tokens[1].prefixLength != 2 || tokens[1].trimLength != 4 ||
|
tokens[1].text.ToString() != "abc: " || tokens[1].prefixLength != 2 || tokens[1].trimLength != 4 ||
|
||||||
string(tokens[2].text) != "def: " || tokens[2].prefixLength != 8 || tokens[2].trimLength != 4 ||
|
tokens[2].text.ToString() != "def: " || tokens[2].prefixLength != 8 || tokens[2].trimLength != 4 ||
|
||||||
string(tokens[3].text) != "ghi " || tokens[3].prefixLength != 14 || tokens[3].trimLength != 3 {
|
tokens[3].text.ToString() != "ghi " || tokens[3].prefixLength != 14 || tokens[3].trimLength != 3 {
|
||||||
t.Errorf("%s", tokens)
|
t.Errorf("%s", tokens)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -67,7 +71,7 @@ func TestTokenize(t *testing.T) {
|
|||||||
func TestTransform(t *testing.T) {
|
func TestTransform(t *testing.T) {
|
||||||
input := " abc: def: ghi: jkl"
|
input := " abc: def: ghi: jkl"
|
||||||
{
|
{
|
||||||
tokens := Tokenize([]rune(input), Delimiter{})
|
tokens := Tokenize(util.RunesToChars([]rune(input)), Delimiter{})
|
||||||
{
|
{
|
||||||
ranges := splitNth("1,2,3")
|
ranges := splitNth("1,2,3")
|
||||||
tx := Transform(tokens, ranges)
|
tx := Transform(tokens, ranges)
|
||||||
@@ -80,25 +84,25 @@ func TestTransform(t *testing.T) {
|
|||||||
tx := Transform(tokens, ranges)
|
tx := Transform(tokens, ranges)
|
||||||
if string(joinTokens(tx)) != "abc: def: ghi: def: ghi: jklabc: " ||
|
if string(joinTokens(tx)) != "abc: def: ghi: def: ghi: jklabc: " ||
|
||||||
len(tx) != 4 ||
|
len(tx) != 4 ||
|
||||||
string(tx[0].text) != "abc: def: " || tx[0].prefixLength != 2 ||
|
tx[0].text.ToString() != "abc: def: " || tx[0].prefixLength != 2 ||
|
||||||
string(tx[1].text) != "ghi: " || tx[1].prefixLength != 14 ||
|
tx[1].text.ToString() != "ghi: " || tx[1].prefixLength != 14 ||
|
||||||
string(tx[2].text) != "def: ghi: jkl" || tx[2].prefixLength != 8 ||
|
tx[2].text.ToString() != "def: ghi: jkl" || tx[2].prefixLength != 8 ||
|
||||||
string(tx[3].text) != "abc: " || tx[3].prefixLength != 2 {
|
tx[3].text.ToString() != "abc: " || tx[3].prefixLength != 2 {
|
||||||
t.Errorf("%s", tx)
|
t.Errorf("%s", tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
tokens := Tokenize([]rune(input), delimiterRegexp(":"))
|
tokens := Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp(":"))
|
||||||
{
|
{
|
||||||
ranges := splitNth("1..2,3,2..,1")
|
ranges := splitNth("1..2,3,2..,1")
|
||||||
tx := Transform(tokens, ranges)
|
tx := Transform(tokens, ranges)
|
||||||
if string(joinTokens(tx)) != " abc: def: ghi: def: ghi: jkl abc:" ||
|
if string(joinTokens(tx)) != " abc: def: ghi: def: ghi: jkl abc:" ||
|
||||||
len(tx) != 4 ||
|
len(tx) != 4 ||
|
||||||
string(tx[0].text) != " abc: def:" || tx[0].prefixLength != 0 ||
|
tx[0].text.ToString() != " abc: def:" || tx[0].prefixLength != 0 ||
|
||||||
string(tx[1].text) != " ghi:" || tx[1].prefixLength != 12 ||
|
tx[1].text.ToString() != " ghi:" || tx[1].prefixLength != 12 ||
|
||||||
string(tx[2].text) != " def: ghi: jkl" || tx[2].prefixLength != 6 ||
|
tx[2].text.ToString() != " def: ghi: jkl" || tx[2].prefixLength != 6 ||
|
||||||
string(tx[3].text) != " abc:" || tx[3].prefixLength != 0 {
|
tx[3].text.ToString() != " abc:" || tx[3].prefixLength != 0 {
|
||||||
t.Errorf("%s", tx)
|
t.Errorf("%s", tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
# http://www.rubydoc.info/github/rest-client/rest-client/RestClient
|
# http://www.rubydoc.info/github/rest-client/rest-client/RestClient
|
||||||
require 'rest_client'
|
require 'rest_client'
|
||||||
|
require 'json'
|
||||||
|
|
||||||
if ARGV.length < 3
|
if ARGV.length < 3
|
||||||
puts "usage: #$0 <token> <version> <files...>"
|
puts "usage: #$0 <token> <version> <files...>"
|
||||||
|
157
src/util/chars.go
Normal file
157
src/util/chars.go
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Chars struct {
|
||||||
|
runes []rune
|
||||||
|
bytes []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToChars converts byte array into rune array
|
||||||
|
func ToChars(bytea []byte) Chars {
|
||||||
|
var runes []rune
|
||||||
|
ascii := true
|
||||||
|
numBytes := len(bytea)
|
||||||
|
for i := 0; i < numBytes; {
|
||||||
|
if bytea[i] < utf8.RuneSelf {
|
||||||
|
if !ascii {
|
||||||
|
runes = append(runes, rune(bytea[i]))
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
} else {
|
||||||
|
if ascii {
|
||||||
|
ascii = false
|
||||||
|
runes = make([]rune, i, numBytes)
|
||||||
|
for j := 0; j < i; j++ {
|
||||||
|
runes[j] = rune(bytea[j])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r, sz := utf8.DecodeRune(bytea[i:])
|
||||||
|
i += sz
|
||||||
|
runes = append(runes, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ascii {
|
||||||
|
return Chars{bytes: bytea}
|
||||||
|
}
|
||||||
|
return Chars{runes: runes}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RunesToChars(runes []rune) Chars {
|
||||||
|
return Chars{runes: runes}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) Get(i int) rune {
|
||||||
|
if chars.runes != nil {
|
||||||
|
return chars.runes[i]
|
||||||
|
}
|
||||||
|
return rune(chars.bytes[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) Length() int {
|
||||||
|
if chars.runes != nil {
|
||||||
|
return len(chars.runes)
|
||||||
|
}
|
||||||
|
return len(chars.bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimLength returns the length after trimming leading and trailing whitespaces
|
||||||
|
func (chars *Chars) TrimLength() int {
|
||||||
|
var i int
|
||||||
|
len := chars.Length()
|
||||||
|
for i = len - 1; i >= 0; i-- {
|
||||||
|
char := chars.Get(i)
|
||||||
|
if !unicode.IsSpace(char) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Completely empty
|
||||||
|
if i < 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var j int
|
||||||
|
for j = 0; j < len; j++ {
|
||||||
|
char := chars.Get(j)
|
||||||
|
if !unicode.IsSpace(char) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i - j + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) TrailingWhitespaces() int {
|
||||||
|
whitespaces := 0
|
||||||
|
for i := chars.Length() - 1; i >= 0; i-- {
|
||||||
|
char := chars.Get(i)
|
||||||
|
if !unicode.IsSpace(char) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
whitespaces++
|
||||||
|
}
|
||||||
|
return whitespaces
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) ToString() string {
|
||||||
|
if chars.runes != nil {
|
||||||
|
return string(chars.runes)
|
||||||
|
}
|
||||||
|
return string(chars.bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) ToRunes() []rune {
|
||||||
|
if chars.runes != nil {
|
||||||
|
return chars.runes
|
||||||
|
}
|
||||||
|
runes := make([]rune, len(chars.bytes))
|
||||||
|
for idx, b := range chars.bytes {
|
||||||
|
runes[idx] = rune(b)
|
||||||
|
}
|
||||||
|
return runes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) Slice(b int, e int) Chars {
|
||||||
|
if chars.runes != nil {
|
||||||
|
return Chars{runes: chars.runes[b:e]}
|
||||||
|
}
|
||||||
|
return Chars{bytes: chars.bytes[b:e]}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) Split(delimiter string) []Chars {
|
||||||
|
delim := []rune(delimiter)
|
||||||
|
numChars := chars.Length()
|
||||||
|
numDelim := len(delim)
|
||||||
|
begin := 0
|
||||||
|
ret := make([]Chars, 0, 1)
|
||||||
|
|
||||||
|
for index := 0; index < numChars; {
|
||||||
|
if index+numDelim <= numChars {
|
||||||
|
match := true
|
||||||
|
for off, d := range delim {
|
||||||
|
if chars.Get(index+off) != d {
|
||||||
|
match = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Found the delimiter
|
||||||
|
if match {
|
||||||
|
incr := Max(numDelim, 1)
|
||||||
|
ret = append(ret, chars.Slice(begin, index+incr))
|
||||||
|
index += incr
|
||||||
|
begin = index
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Impossible to find the delimiter in the remaining substring
|
||||||
|
break
|
||||||
|
}
|
||||||
|
index++
|
||||||
|
}
|
||||||
|
if begin < numChars || len(ret) == 0 {
|
||||||
|
ret = append(ret, chars.Slice(begin, numChars))
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
82
src/util/chars_test.go
Normal file
82
src/util/chars_test.go
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
package util
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestToCharsNil(t *testing.T) {
|
||||||
|
bs := Chars{bytes: []byte{}}
|
||||||
|
if bs.bytes == nil || bs.runes != nil {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
rs := RunesToChars([]rune{})
|
||||||
|
if rs.bytes != nil || rs.runes == nil {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestToCharsAscii(t *testing.T) {
|
||||||
|
chars := ToChars([]byte("foobar"))
|
||||||
|
if chars.ToString() != "foobar" || chars.runes != nil {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCharsLength(t *testing.T) {
|
||||||
|
chars := ToChars([]byte("\tabc한글 "))
|
||||||
|
if chars.Length() != 8 || chars.TrimLength() != 5 {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCharsToString(t *testing.T) {
|
||||||
|
text := "\tabc한글 "
|
||||||
|
chars := ToChars([]byte(text))
|
||||||
|
if chars.ToString() != text {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTrimLength(t *testing.T) {
|
||||||
|
check := func(str string, exp int) {
|
||||||
|
chars := ToChars([]byte(str))
|
||||||
|
trimmed := chars.TrimLength()
|
||||||
|
if trimmed != exp {
|
||||||
|
t.Errorf("Invalid TrimLength result for '%s': %d (expected %d)",
|
||||||
|
str, trimmed, exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
check("hello", 5)
|
||||||
|
check("hello ", 5)
|
||||||
|
check("hello ", 5)
|
||||||
|
check(" hello", 5)
|
||||||
|
check(" hello", 5)
|
||||||
|
check(" hello ", 5)
|
||||||
|
check(" hello ", 5)
|
||||||
|
check("h o", 5)
|
||||||
|
check(" h o ", 5)
|
||||||
|
check(" ", 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSplit(t *testing.T) {
|
||||||
|
check := func(str string, delim string, tokens ...string) {
|
||||||
|
input := ToChars([]byte(str))
|
||||||
|
result := input.Split(delim)
|
||||||
|
if len(result) != len(tokens) {
|
||||||
|
t.Errorf("Invalid Split result for '%s': %d tokens found (expected %d): %s",
|
||||||
|
str, len(result), len(tokens), result)
|
||||||
|
}
|
||||||
|
for idx, token := range tokens {
|
||||||
|
if result[idx].ToString() != token {
|
||||||
|
t.Errorf("Invalid Split result for '%s': %s (expected %s)",
|
||||||
|
str, result[idx].ToString(), token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
check("abc:def::", ":", "abc:", "def:", ":")
|
||||||
|
check("abc:def::", "-", "abc:def::")
|
||||||
|
check("abc", "", "a", "b", "c")
|
||||||
|
check("abc", "a", "a", "bc")
|
||||||
|
check("abc", "ab", "ab", "c")
|
||||||
|
check("abc", "abc", "abc")
|
||||||
|
check("abc", "abcd", "abc")
|
||||||
|
check("", "abcd", "")
|
||||||
|
}
|
12
src/util/slab.go
Normal file
12
src/util/slab.go
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
package util
|
||||||
|
|
||||||
|
type Slab struct {
|
||||||
|
I16 []int16
|
||||||
|
I32 []int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func MakeSlab(size16 int, size32 int) *Slab {
|
||||||
|
return &Slab{
|
||||||
|
I16: make([]int16, size16),
|
||||||
|
I32: make([]int32, size32)}
|
||||||
|
}
|
@@ -4,21 +4,34 @@ package util
|
|||||||
import "C"
|
import "C"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Max returns the largest integer
|
// Max returns the largest integer
|
||||||
func Max(first int, items ...int) int {
|
func Max(first int, second int) int {
|
||||||
max := first
|
if first >= second {
|
||||||
for _, item := range items {
|
return first
|
||||||
if item > max {
|
|
||||||
max = item
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return max
|
return second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max16 returns the largest integer
|
||||||
|
func Max16(first int16, second int16) int16 {
|
||||||
|
if first >= second {
|
||||||
|
return first
|
||||||
|
}
|
||||||
|
return second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max32 returns the largest 32-bit integer
|
||||||
|
func Max32(first int32, second int32) int32 {
|
||||||
|
if first > second {
|
||||||
|
return first
|
||||||
|
}
|
||||||
|
return second
|
||||||
}
|
}
|
||||||
|
|
||||||
// Min returns the smallest integer
|
// Min returns the smallest integer
|
||||||
@@ -37,14 +50,6 @@ func Min32(first int32, second int32) int32 {
|
|||||||
return second
|
return second
|
||||||
}
|
}
|
||||||
|
|
||||||
// Max32 returns the largest 32-bit integer
|
|
||||||
func Max32(first int32, second int32) int32 {
|
|
||||||
if first > second {
|
|
||||||
return first
|
|
||||||
}
|
|
||||||
return second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Constrain32 limits the given 32-bit integer with the upper and lower bounds
|
// Constrain32 limits the given 32-bit integer with the upper and lower bounds
|
||||||
func Constrain32(val int32, min int32, max int32) int32 {
|
func Constrain32(val int32, min int32, max int32) int32 {
|
||||||
if val < min {
|
if val < min {
|
||||||
@@ -67,6 +72,15 @@ func Constrain(val int, min int, max int) int {
|
|||||||
return val
|
return val
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func AsUint16(val int) uint16 {
|
||||||
|
if val > math.MaxUint16 {
|
||||||
|
return math.MaxUint16
|
||||||
|
} else if val < 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return uint16(val)
|
||||||
|
}
|
||||||
|
|
||||||
// DurWithin limits the given time.Duration with the upper and lower bounds
|
// DurWithin limits the given time.Duration with the upper and lower bounds
|
||||||
func DurWithin(
|
func DurWithin(
|
||||||
val time.Duration, min time.Duration, max time.Duration) time.Duration {
|
val time.Duration, min time.Duration, max time.Duration) time.Duration {
|
||||||
@@ -84,58 +98,6 @@ func IsTty() bool {
|
|||||||
return int(C.isatty(C.int(os.Stdin.Fd()))) != 0
|
return int(C.isatty(C.int(os.Stdin.Fd()))) != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// TrimRight returns rune array with trailing white spaces cut off
|
|
||||||
func TrimRight(runes []rune) []rune {
|
|
||||||
var i int
|
|
||||||
for i = len(runes) - 1; i >= 0; i-- {
|
|
||||||
char := runes[i]
|
|
||||||
if char != ' ' && char != '\t' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return runes[0 : i+1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// BytesToRunes converts byte array into rune array
|
|
||||||
func BytesToRunes(bytea []byte) []rune {
|
|
||||||
runes := make([]rune, 0, len(bytea))
|
|
||||||
for i := 0; i < len(bytea); {
|
|
||||||
if bytea[i] < utf8.RuneSelf {
|
|
||||||
runes = append(runes, rune(bytea[i]))
|
|
||||||
i++
|
|
||||||
} else {
|
|
||||||
r, sz := utf8.DecodeRune(bytea[i:])
|
|
||||||
i += sz
|
|
||||||
runes = append(runes, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return runes
|
|
||||||
}
|
|
||||||
|
|
||||||
// TrimLen returns the length of trimmed rune array
|
|
||||||
func TrimLen(runes []rune) int {
|
|
||||||
var i int
|
|
||||||
for i = len(runes) - 1; i >= 0; i-- {
|
|
||||||
char := runes[i]
|
|
||||||
if char != ' ' && char != '\t' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Completely empty
|
|
||||||
if i < 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
var j int
|
|
||||||
for j = 0; j < len(runes); j++ {
|
|
||||||
char := runes[j]
|
|
||||||
if char != ' ' && char != '\t' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return i - j + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecCommand executes the given command with $SHELL
|
// ExecCommand executes the given command with $SHELL
|
||||||
func ExecCommand(command string) *exec.Cmd {
|
func ExecCommand(command string) *exec.Cmd {
|
||||||
shell := os.Getenv("SHELL")
|
shell := os.Getenv("SHELL")
|
||||||
|
@@ -3,7 +3,7 @@ package util
|
|||||||
import "testing"
|
import "testing"
|
||||||
|
|
||||||
func TestMax(t *testing.T) {
|
func TestMax(t *testing.T) {
|
||||||
if Max(-2, 5, 1, 4, 3) != 5 {
|
if Max(-2, 5) != 5 {
|
||||||
t.Error("Invalid result")
|
t.Error("Invalid result")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -20,23 +20,3 @@ func TestContrain(t *testing.T) {
|
|||||||
t.Error("Expected", 3)
|
t.Error("Expected", 3)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTrimLen(t *testing.T) {
|
|
||||||
check := func(str string, exp int) {
|
|
||||||
trimmed := TrimLen([]rune(str))
|
|
||||||
if trimmed != exp {
|
|
||||||
t.Errorf("Invalid TrimLen result for '%s': %d (expected %d)",
|
|
||||||
str, trimmed, exp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
check("hello", 5)
|
|
||||||
check("hello ", 5)
|
|
||||||
check("hello ", 5)
|
|
||||||
check(" hello", 5)
|
|
||||||
check(" hello", 5)
|
|
||||||
check(" hello ", 5)
|
|
||||||
check(" hello ", 5)
|
|
||||||
check("h o", 5)
|
|
||||||
check(" h o ", 5)
|
|
||||||
check(" ", 0)
|
|
||||||
}
|
|
||||||
|
@@ -1,5 +1,6 @@
|
|||||||
Execute (Setup):
|
Execute (Setup):
|
||||||
let g:dir = fnamemodify(g:vader_file, ':p:h')
|
let g:dir = fnamemodify(g:vader_file, ':p:h')
|
||||||
|
unlet! g:fzf_layout g:fzf_action g:fzf_history_dir
|
||||||
Log 'Test directory: ' . g:dir
|
Log 'Test directory: ' . g:dir
|
||||||
Save &acd
|
Save &acd
|
||||||
|
|
||||||
@@ -43,6 +44,11 @@ Execute (fzf#run with dir option and noautochdir):
|
|||||||
" No change in working directory
|
" No change in working directory
|
||||||
AssertEqual cwd, getcwd()
|
AssertEqual cwd, getcwd()
|
||||||
|
|
||||||
|
call fzf#run({'source': ['/foobar'], 'sink': 'tabe', 'dir': '/tmp', 'options': '-1'})
|
||||||
|
AssertEqual cwd, getcwd()
|
||||||
|
tabclose
|
||||||
|
AssertEqual cwd, getcwd()
|
||||||
|
|
||||||
Execute (Incomplete fzf#run with dir option and autochdir):
|
Execute (Incomplete fzf#run with dir option and autochdir):
|
||||||
set acd
|
set acd
|
||||||
let cwd = getcwd()
|
let cwd = getcwd()
|
||||||
@@ -64,6 +70,79 @@ Execute (fzf#run with dir option and autochdir when final cwd is same as dir):
|
|||||||
" Working directory changed due to &acd
|
" Working directory changed due to &acd
|
||||||
AssertEqual '/', getcwd()
|
AssertEqual '/', getcwd()
|
||||||
|
|
||||||
|
Execute (fzf#wrap):
|
||||||
|
AssertThrows fzf#wrap({'foo': 'bar'})
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar')
|
||||||
|
Log opts
|
||||||
|
AssertEqual '~40%', opts.down
|
||||||
|
Assert opts.options =~ '--expect='
|
||||||
|
Assert !has_key(opts, 'sink')
|
||||||
|
Assert has_key(opts, 'sink*')
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {}, 0)
|
||||||
|
Log opts
|
||||||
|
AssertEqual '~40%', opts.down
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {}, 1)
|
||||||
|
Log opts
|
||||||
|
Assert !has_key(opts, 'down')
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'down': '50%'})
|
||||||
|
Log opts
|
||||||
|
AssertEqual '50%', opts.down
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'down': '50%'}, 1)
|
||||||
|
Log opts
|
||||||
|
Assert !has_key(opts, 'down')
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'sink': 'e'})
|
||||||
|
Log opts
|
||||||
|
AssertEqual 'e', opts.sink
|
||||||
|
Assert !has_key(opts, 'sink*')
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'options': '--reverse'})
|
||||||
|
Log opts
|
||||||
|
Assert opts.options =~ '--expect='
|
||||||
|
Assert opts.options =~ '--reverse'
|
||||||
|
|
||||||
|
let g:fzf_layout = {'window': 'enew'}
|
||||||
|
let opts = fzf#wrap('foobar')
|
||||||
|
Log opts
|
||||||
|
AssertEqual 'enew', opts.window
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {}, 1)
|
||||||
|
Log opts
|
||||||
|
Assert !has_key(opts, 'window')
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'right': '50%'})
|
||||||
|
Log opts
|
||||||
|
Assert !has_key(opts, 'window')
|
||||||
|
AssertEqual '50%', opts.right
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'right': '50%'}, 1)
|
||||||
|
Log opts
|
||||||
|
Assert !has_key(opts, 'window')
|
||||||
|
Assert !has_key(opts, 'right')
|
||||||
|
|
||||||
|
let g:fzf_action = {'a': 'tabe'}
|
||||||
|
let opts = fzf#wrap('foobar')
|
||||||
|
Log opts
|
||||||
|
Assert opts.options =~ '--expect=a'
|
||||||
|
Assert !has_key(opts, 'sink')
|
||||||
|
Assert has_key(opts, 'sink*')
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'sink': 'e'})
|
||||||
|
Log opts
|
||||||
|
AssertEqual 'e', opts.sink
|
||||||
|
Assert !has_key(opts, 'sink*')
|
||||||
|
|
||||||
|
let g:fzf_history_dir = '/tmp'
|
||||||
|
let opts = fzf#wrap('foobar', {'options': '--color light'})
|
||||||
|
Log opts
|
||||||
|
Assert opts.options =~ '--history /tmp/foobar'
|
||||||
|
Assert opts.options =~ '--color light'
|
||||||
|
|
||||||
Execute (Cleanup):
|
Execute (Cleanup):
|
||||||
unlet g:dir
|
unlet g:dir
|
||||||
Restore
|
Restore
|
||||||
|
484
test/test_go.rb
484
test/test_go.rb
@@ -31,11 +31,15 @@ def wait
|
|||||||
return if yield
|
return if yield
|
||||||
sleep 0.05
|
sleep 0.05
|
||||||
end
|
end
|
||||||
throw 'timeout'
|
raise 'timeout'
|
||||||
end
|
end
|
||||||
|
|
||||||
class Shell
|
class Shell
|
||||||
class << self
|
class << self
|
||||||
|
def unsets
|
||||||
|
'unset FZF_DEFAULT_COMMAND FZF_DEFAULT_OPTS FZF_CTRL_T_COMMAND FZF_CTRL_T_OPTS FZF_ALT_C_COMMAND FZF_ALT_C_OPTS FZF_CTRL_R_OPTS;'
|
||||||
|
end
|
||||||
|
|
||||||
def bash
|
def bash
|
||||||
'PS1= PROMPT_COMMAND= bash --rcfile ~/.fzf.bash'
|
'PS1= PROMPT_COMMAND= bash --rcfile ~/.fzf.bash'
|
||||||
end
|
end
|
||||||
@@ -45,6 +49,10 @@ class Shell
|
|||||||
FileUtils.cp File.expand_path('~/.fzf.zsh'), '/tmp/fzf-zsh/.zshrc'
|
FileUtils.cp File.expand_path('~/.fzf.zsh'), '/tmp/fzf-zsh/.zshrc'
|
||||||
'PS1= PROMPT_COMMAND= HISTSIZE=100 ZDOTDIR=/tmp/fzf-zsh zsh'
|
'PS1= PROMPT_COMMAND= HISTSIZE=100 ZDOTDIR=/tmp/fzf-zsh zsh'
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def fish
|
||||||
|
'fish'
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -57,11 +65,11 @@ class Tmux
|
|||||||
@win =
|
@win =
|
||||||
case shell
|
case shell
|
||||||
when :bash
|
when :bash
|
||||||
go("new-window -d -P -F '#I' '#{Shell.bash}'").first
|
go("new-window -d -P -F '#I' '#{Shell.unsets + Shell.bash}'").first
|
||||||
when :zsh
|
when :zsh
|
||||||
go("new-window -d -P -F '#I' '#{Shell.zsh}'").first
|
go("new-window -d -P -F '#I' '#{Shell.unsets + Shell.zsh}'").first
|
||||||
when :fish
|
when :fish
|
||||||
go("new-window -d -P -F '#I' 'fish'").first
|
go("new-window -d -P -F '#I' '#{Shell.unsets + Shell.fish}'").first
|
||||||
else
|
else
|
||||||
raise "Unknown shell: #{shell}"
|
raise "Unknown shell: #{shell}"
|
||||||
end
|
end
|
||||||
@@ -90,6 +98,10 @@ class Tmux
|
|||||||
go("send-keys -t #{target} #{args}")
|
go("send-keys -t #{target} #{args}")
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def paste str
|
||||||
|
%x[tmux setb '#{str.gsub("'", "'\\''")}' \\; pasteb -t #{win} \\; send-keys -t #{win} Enter]
|
||||||
|
end
|
||||||
|
|
||||||
def capture pane = 0
|
def capture pane = 0
|
||||||
File.unlink TEMPNAME while File.exists? TEMPNAME
|
File.unlink TEMPNAME while File.exists? TEMPNAME
|
||||||
wait do
|
wait do
|
||||||
@@ -149,12 +161,6 @@ class TestBase < Minitest::Test
|
|||||||
@temp_suffix].join '-'
|
@temp_suffix].join '-'
|
||||||
end
|
end
|
||||||
|
|
||||||
def setup
|
|
||||||
ENV.delete 'FZF_DEFAULT_OPTS'
|
|
||||||
ENV.delete 'FZF_CTRL_T_COMMAND'
|
|
||||||
ENV.delete 'FZF_DEFAULT_COMMAND'
|
|
||||||
end
|
|
||||||
|
|
||||||
def readonce
|
def readonce
|
||||||
wait { File.exists?(tempname) }
|
wait { File.exists?(tempname) }
|
||||||
File.read(tempname)
|
File.read(tempname)
|
||||||
@@ -207,13 +213,13 @@ class TestGoFZF < TestBase
|
|||||||
tmux.send_keys '99', 'C-a', '1', 'C-f', '3', 'C-b', 'C-h', 'C-u', 'C-e', 'C-y', 'C-k', 'Tab', 'BTab'
|
tmux.send_keys '99', 'C-a', '1', 'C-f', '3', 'C-b', 'C-h', 'C-u', 'C-e', 'C-y', 'C-k', 'Tab', 'BTab'
|
||||||
tmux.until { |lines| lines[-2] == ' 856/100000' }
|
tmux.until { |lines| lines[-2] == ' 856/100000' }
|
||||||
lines = tmux.capture
|
lines = tmux.capture
|
||||||
assert_equal '> 1391', lines[-4]
|
assert_equal '> 3910', lines[-4]
|
||||||
assert_equal ' 391', lines[-3]
|
assert_equal ' 391', lines[-3]
|
||||||
assert_equal ' 856/100000', lines[-2]
|
assert_equal ' 856/100000', lines[-2]
|
||||||
assert_equal '> 391', lines[-1]
|
assert_equal '> 391', lines[-1]
|
||||||
|
|
||||||
tmux.send_keys :Enter
|
tmux.send_keys :Enter
|
||||||
assert_equal '1391', readonce.chomp
|
assert_equal '3910', readonce.chomp
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_fzf_default_command
|
def test_fzf_default_command
|
||||||
@@ -357,12 +363,12 @@ class TestGoFZF < TestBase
|
|||||||
tmux.send_keys :BTab, :BTab, :BTab
|
tmux.send_keys :BTab, :BTab, :BTab
|
||||||
tmux.until { |lines| lines[-2].include?('(3)') }
|
tmux.until { |lines| lines[-2].include?('(3)') }
|
||||||
tmux.send_keys :Enter
|
tmux.send_keys :Enter
|
||||||
assert_equal ['5', '5', '15', '25'], readonce.split($/)
|
assert_equal ['5', '5', '50', '51'], readonce.split($/)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_query_unicode
|
def test_query_unicode
|
||||||
tmux.send_keys "(echo abc; echo 가나다) | #{fzf :query, '가다'}", :Enter
|
tmux.paste "(echo abc; echo 가나다) | #{fzf :query, '가다'}"
|
||||||
tmux.until { |lines| lines[-2].include? '1/2' }
|
tmux.until { |lines| lines[-2].include? '1/2' }
|
||||||
tmux.send_keys :Enter
|
tmux.send_keys :Enter
|
||||||
assert_equal ['가나다'], readonce.split($/)
|
assert_equal ['가나다'], readonce.split($/)
|
||||||
@@ -378,7 +384,7 @@ class TestGoFZF < TestBase
|
|||||||
tmux.send_keys :Enter
|
tmux.send_keys :Enter
|
||||||
tmux.until { |lines| lines[-1] == '>' }
|
tmux.until { |lines| lines[-1] == '>' }
|
||||||
tmux.send_keys 'C-K', :Enter
|
tmux.send_keys 'C-K', :Enter
|
||||||
assert_equal ['1919'], readonce.split($/)
|
assert_equal ['9090'], readonce.split($/)
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_tac
|
def test_tac
|
||||||
@@ -394,6 +400,7 @@ class TestGoFZF < TestBase
|
|||||||
tmux.send_keys "seq 1 1000 | #{fzf :tac, :multi}", :Enter
|
tmux.send_keys "seq 1 1000 | #{fzf :tac, :multi}", :Enter
|
||||||
tmux.until { |lines| lines[-2].include? '1000/1000' }
|
tmux.until { |lines| lines[-2].include? '1000/1000' }
|
||||||
tmux.send_keys '99'
|
tmux.send_keys '99'
|
||||||
|
tmux.until { |lines| lines[-2].include? '28/1000' }
|
||||||
tmux.send_keys :BTab, :BTab, :BTab
|
tmux.send_keys :BTab, :BTab, :BTab
|
||||||
tmux.until { |lines| lines[-2].include?('(3)') }
|
tmux.until { |lines| lines[-2].include?('(3)') }
|
||||||
tmux.send_keys :Enter
|
tmux.send_keys :Enter
|
||||||
@@ -413,11 +420,12 @@ class TestGoFZF < TestBase
|
|||||||
|
|
||||||
def test_expect
|
def test_expect
|
||||||
test = lambda do |key, feed, expected = key|
|
test = lambda do |key, feed, expected = key|
|
||||||
tmux.send_keys "seq 1 100 | #{fzf :expect, key}", :Enter
|
tmux.send_keys "seq 1 100 | #{fzf :expect, key}; sync", :Enter
|
||||||
tmux.until { |lines| lines[-2].include? '100/100' }
|
tmux.until { |lines| lines[-2].include? '100/100' }
|
||||||
tmux.send_keys '55'
|
tmux.send_keys '55'
|
||||||
tmux.until { |lines| lines[-2].include? '1/100' }
|
tmux.until { |lines| lines[-2].include? '1/100' }
|
||||||
tmux.send_keys *feed
|
tmux.send_keys *feed
|
||||||
|
tmux.prepare
|
||||||
assert_equal [expected, '55'], readonce.split($/)
|
assert_equal [expected, '55'], readonce.split($/)
|
||||||
end
|
end
|
||||||
test.call 'ctrl-t', 'C-T'
|
test.call 'ctrl-t', 'C-T'
|
||||||
@@ -428,6 +436,10 @@ class TestGoFZF < TestBase
|
|||||||
test.call 'f3', 'f3'
|
test.call 'f3', 'f3'
|
||||||
test.call 'f2,f4', 'f2', 'f2'
|
test.call 'f2,f4', 'f2', 'f2'
|
||||||
test.call 'f2,f4', 'f4', 'f4'
|
test.call 'f2,f4', 'f4', 'f4'
|
||||||
|
test.call 'alt-/', [:Escape, :/]
|
||||||
|
%w[f5 f6 f7 f8 f9 f10].each do |key|
|
||||||
|
test.call 'f5,f6,f7,f8,f9,f10', key, key
|
||||||
|
end
|
||||||
test.call '@', '@'
|
test.call '@', '@'
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -440,6 +452,15 @@ class TestGoFZF < TestBase
|
|||||||
assert_equal ['55', 'alt-z', '55'], readonce.split($/)
|
assert_equal ['55', 'alt-z', '55'], readonce.split($/)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_expect_printable_character_print_query
|
||||||
|
tmux.send_keys "seq 1 100 | #{fzf '--expect=z --print-query'}", :Enter
|
||||||
|
tmux.until { |lines| lines[-2].include? '100/100' }
|
||||||
|
tmux.send_keys '55'
|
||||||
|
tmux.until { |lines| lines[-2].include? '1/100' }
|
||||||
|
tmux.send_keys 'z'
|
||||||
|
assert_equal ['55', 'z', '55'], readonce.split($/)
|
||||||
|
end
|
||||||
|
|
||||||
def test_expect_print_query_select_1
|
def test_expect_print_query_select_1
|
||||||
tmux.send_keys "seq 1 100 | #{fzf '-q55 -1 --expect=alt-z --print-query'}", :Enter
|
tmux.send_keys "seq 1 100 | #{fzf '-q55 -1 --expect=alt-z --print-query'}", :Enter
|
||||||
assert_equal ['55', '', '55'], readonce.split($/)
|
assert_equal ['55', '', '55'], readonce.split($/)
|
||||||
@@ -505,162 +526,91 @@ class TestGoFZF < TestBase
|
|||||||
assert_equal input, `#{FZF} -f"!z" -x --tiebreak end < #{tempname}`.split($/)
|
assert_equal input, `#{FZF} -f"!z" -x --tiebreak end < #{tempname}`.split($/)
|
||||||
end
|
end
|
||||||
|
|
||||||
# Since 0.11.2
|
def test_tiebreak_index_begin
|
||||||
def test_tiebreak_list
|
writelines tempname, [
|
||||||
input = %w[
|
'xoxxxxxoxx',
|
||||||
f-o-o-b-a-r
|
'xoxxxxxox',
|
||||||
foobar----
|
'xxoxxxoxx',
|
||||||
--foobar
|
'xxxoxoxxx',
|
||||||
----foobar
|
'xxxxoxox',
|
||||||
foobar--
|
' xxoxoxxx',
|
||||||
--foobar--
|
|
||||||
foobar
|
|
||||||
]
|
]
|
||||||
writelines tempname, input
|
|
||||||
|
|
||||||
assert_equal %w[
|
assert_equal [
|
||||||
foobar----
|
'xxxxoxox',
|
||||||
--foobar
|
' xxoxoxxx',
|
||||||
----foobar
|
'xxxoxoxxx',
|
||||||
foobar--
|
'xxoxxxoxx',
|
||||||
--foobar--
|
'xoxxxxxox',
|
||||||
foobar
|
'xoxxxxxoxx',
|
||||||
f-o-o-b-a-r
|
], `#{FZF} -foo < #{tempname}`.split($/)
|
||||||
], `#{FZF} -ffb --tiebreak=index < #{tempname}`.split($/)
|
|
||||||
|
|
||||||
by_length = %w[
|
assert_equal [
|
||||||
foobar
|
'xxxoxoxxx',
|
||||||
--foobar
|
'xxxxoxox',
|
||||||
foobar--
|
' xxoxoxxx',
|
||||||
foobar----
|
'xxoxxxoxx',
|
||||||
----foobar
|
'xoxxxxxoxx',
|
||||||
--foobar--
|
'xoxxxxxox',
|
||||||
f-o-o-b-a-r
|
], `#{FZF} -foo --tiebreak=index < #{tempname}`.split($/)
|
||||||
]
|
|
||||||
assert_equal by_length, `#{FZF} -ffb < #{tempname}`.split($/)
|
|
||||||
assert_equal by_length, `#{FZF} -ffb --tiebreak=length < #{tempname}`.split($/)
|
|
||||||
|
|
||||||
assert_equal %w[
|
# Note that --tiebreak=begin is now based on the first occurrence of the
|
||||||
foobar
|
# first character on the pattern
|
||||||
foobar--
|
assert_equal [
|
||||||
--foobar
|
' xxoxoxxx',
|
||||||
foobar----
|
'xxxoxoxxx',
|
||||||
--foobar--
|
'xxxxoxox',
|
||||||
----foobar
|
'xxoxxxoxx',
|
||||||
f-o-o-b-a-r
|
'xoxxxxxoxx',
|
||||||
], `#{FZF} -ffb --tiebreak=length,begin < #{tempname}`.split($/)
|
'xoxxxxxox',
|
||||||
|
], `#{FZF} -foo --tiebreak=begin < #{tempname}`.split($/)
|
||||||
|
|
||||||
assert_equal %w[
|
assert_equal [
|
||||||
foobar
|
' xxoxoxxx',
|
||||||
--foobar
|
'xxxoxoxxx',
|
||||||
foobar--
|
'xxxxoxox',
|
||||||
----foobar
|
'xxoxxxoxx',
|
||||||
--foobar--
|
'xoxxxxxox',
|
||||||
foobar----
|
'xoxxxxxoxx',
|
||||||
f-o-o-b-a-r
|
], `#{FZF} -foo --tiebreak=begin,length < #{tempname}`.split($/)
|
||||||
], `#{FZF} -ffb --tiebreak=length,end < #{tempname}`.split($/)
|
|
||||||
|
|
||||||
assert_equal %w[
|
|
||||||
foobar----
|
|
||||||
foobar--
|
|
||||||
foobar
|
|
||||||
--foobar
|
|
||||||
--foobar--
|
|
||||||
----foobar
|
|
||||||
f-o-o-b-a-r
|
|
||||||
], `#{FZF} -ffb --tiebreak=begin < #{tempname}`.split($/)
|
|
||||||
|
|
||||||
by_begin_end = %w[
|
|
||||||
foobar
|
|
||||||
foobar--
|
|
||||||
foobar----
|
|
||||||
--foobar
|
|
||||||
--foobar--
|
|
||||||
----foobar
|
|
||||||
f-o-o-b-a-r
|
|
||||||
]
|
|
||||||
assert_equal by_begin_end, `#{FZF} -ffb --tiebreak=begin,length < #{tempname}`.split($/)
|
|
||||||
assert_equal by_begin_end, `#{FZF} -ffb --tiebreak=begin,end < #{tempname}`.split($/)
|
|
||||||
|
|
||||||
assert_equal %w[
|
|
||||||
--foobar
|
|
||||||
----foobar
|
|
||||||
foobar
|
|
||||||
foobar--
|
|
||||||
--foobar--
|
|
||||||
foobar----
|
|
||||||
f-o-o-b-a-r
|
|
||||||
], `#{FZF} -ffb --tiebreak=end < #{tempname}`.split($/)
|
|
||||||
|
|
||||||
by_begin_end = %w[
|
|
||||||
foobar
|
|
||||||
--foobar
|
|
||||||
----foobar
|
|
||||||
foobar--
|
|
||||||
--foobar--
|
|
||||||
foobar----
|
|
||||||
f-o-o-b-a-r
|
|
||||||
]
|
|
||||||
assert_equal by_begin_end, `#{FZF} -ffb --tiebreak=end,begin < #{tempname}`.split($/)
|
|
||||||
assert_equal by_begin_end, `#{FZF} -ffb --tiebreak=end,length < #{tempname}`.split($/)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_tiebreak_white_prefix
|
def test_tiebreak_end
|
||||||
writelines tempname, [
|
writelines tempname, [
|
||||||
'f o o b a r',
|
'xoxxxxxxxx',
|
||||||
' foo bar',
|
'xxoxxxxxxx',
|
||||||
' foobar',
|
'xxxoxxxxxx',
|
||||||
'----foo bar',
|
'xxxxoxxxx',
|
||||||
'----foobar',
|
'xxxxxoxxx',
|
||||||
' foo bar',
|
' xxxxoxxx',
|
||||||
' foobar--',
|
|
||||||
' foobar',
|
|
||||||
'--foo bar',
|
|
||||||
'--foobar',
|
|
||||||
'foobar',
|
|
||||||
]
|
]
|
||||||
|
|
||||||
assert_equal [
|
assert_equal [
|
||||||
' foobar',
|
' xxxxoxxx',
|
||||||
' foobar',
|
'xxxxoxxxx',
|
||||||
'foobar',
|
'xxxxxoxxx',
|
||||||
' foobar--',
|
'xoxxxxxxxx',
|
||||||
'--foobar',
|
'xxoxxxxxxx',
|
||||||
'----foobar',
|
'xxxoxxxxxx',
|
||||||
' foo bar',
|
], `#{FZF} -fo < #{tempname}`.split($/)
|
||||||
' foo bar',
|
|
||||||
'--foo bar',
|
|
||||||
'----foo bar',
|
|
||||||
'f o o b a r',
|
|
||||||
], `#{FZF} -ffb < #{tempname}`.split($/)
|
|
||||||
|
|
||||||
assert_equal [
|
assert_equal [
|
||||||
' foobar',
|
'xxxxxoxxx',
|
||||||
' foobar--',
|
' xxxxoxxx',
|
||||||
' foobar',
|
'xxxxoxxxx',
|
||||||
'foobar',
|
'xxxoxxxxxx',
|
||||||
'--foobar',
|
'xxoxxxxxxx',
|
||||||
'----foobar',
|
'xoxxxxxxxx',
|
||||||
' foo bar',
|
], `#{FZF} -fo --tiebreak=end < #{tempname}`.split($/)
|
||||||
' foo bar',
|
|
||||||
'--foo bar',
|
|
||||||
'----foo bar',
|
|
||||||
'f o o b a r',
|
|
||||||
], `#{FZF} -ffb --tiebreak=begin < #{tempname}`.split($/)
|
|
||||||
|
|
||||||
assert_equal [
|
assert_equal [
|
||||||
' foobar',
|
' xxxxoxxx',
|
||||||
' foobar',
|
'xxxxxoxxx',
|
||||||
'foobar',
|
'xxxxoxxxx',
|
||||||
' foobar--',
|
'xxxoxxxxxx',
|
||||||
'--foobar',
|
'xxoxxxxxxx',
|
||||||
'----foobar',
|
'xoxxxxxxxx',
|
||||||
' foo bar',
|
], `#{FZF} -fo --tiebreak=end,length,begin < #{tempname}`.split($/)
|
||||||
' foo bar',
|
|
||||||
'--foo bar',
|
|
||||||
'----foo bar',
|
|
||||||
'f o o b a r',
|
|
||||||
], `#{FZF} -ffb --tiebreak=begin,length < #{tempname}`.split($/)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_tiebreak_length_with_nth
|
def test_tiebreak_length_with_nth
|
||||||
@@ -709,10 +659,10 @@ class TestGoFZF < TestBase
|
|||||||
|
|
||||||
# len(1 ~ 2)
|
# len(1 ~ 2)
|
||||||
output = [
|
output = [
|
||||||
"apple ui bottle 2",
|
|
||||||
"app ic bottle 4",
|
"app ic bottle 4",
|
||||||
"apple juice bottle 1",
|
|
||||||
"app ice bottle 3",
|
"app ice bottle 3",
|
||||||
|
"apple ui bottle 2",
|
||||||
|
"apple juice bottle 1",
|
||||||
]
|
]
|
||||||
assert_equal output, `#{FZF} -fai -n1..2 < #{tempname}`.split($/)
|
assert_equal output, `#{FZF} -fai -n1..2 < #{tempname}`.split($/)
|
||||||
|
|
||||||
@@ -727,26 +677,15 @@ class TestGoFZF < TestBase
|
|||||||
|
|
||||||
# len(2)
|
# len(2)
|
||||||
output = [
|
output = [
|
||||||
"apple ui bottle 2",
|
|
||||||
"app ic bottle 4",
|
"app ic bottle 4",
|
||||||
"app ice bottle 3",
|
"app ice bottle 3",
|
||||||
|
"apple ui bottle 2",
|
||||||
"apple juice bottle 1",
|
"apple juice bottle 1",
|
||||||
]
|
]
|
||||||
assert_equal output, `#{FZF} -fi -n2 < #{tempname}`.split($/)
|
assert_equal output, `#{FZF} -fi -n2 < #{tempname}`.split($/)
|
||||||
assert_equal output, `#{FZF} -fi -n2,1..2 < #{tempname}`.split($/)
|
assert_equal output, `#{FZF} -fi -n2,1..2 < #{tempname}`.split($/)
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_tiebreak_end_backward_scan
|
|
||||||
input = %w[
|
|
||||||
foobar-fb
|
|
||||||
fubar
|
|
||||||
]
|
|
||||||
writelines tempname, input
|
|
||||||
|
|
||||||
assert_equal input.reverse, `#{FZF} -f fb < #{tempname}`.split($/)
|
|
||||||
assert_equal input, `#{FZF} -f fb --tiebreak=end < #{tempname}`.split($/)
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_invalid_cache
|
def test_invalid_cache
|
||||||
tmux.send_keys "(echo d; echo D; echo x) | #{fzf '-q d'}", :Enter
|
tmux.send_keys "(echo d; echo D; echo x) | #{fzf '-q d'}", :Enter
|
||||||
tmux.until { |lines| lines[-2].include? '2/3' }
|
tmux.until { |lines| lines[-2].include? '2/3' }
|
||||||
@@ -769,6 +708,13 @@ class TestGoFZF < TestBase
|
|||||||
assert_equal %w[4 5 6 9], readonce.split($/)
|
assert_equal %w[4 5 6 9], readonce.split($/)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_bind_print_query
|
||||||
|
tmux.send_keys "seq 1 1000 | #{fzf '-m --bind=ctrl-j:print-query'}", :Enter
|
||||||
|
tmux.until { |lines| lines[-2].end_with? '/1000' }
|
||||||
|
tmux.send_keys 'print-my-query', 'C-j'
|
||||||
|
assert_equal %w[print-my-query], readonce.split($/)
|
||||||
|
end
|
||||||
|
|
||||||
def test_long_line
|
def test_long_line
|
||||||
data = '.' * 256 * 1024
|
data = '.' * 256 * 1024
|
||||||
File.open(tempname, 'w') do |f|
|
File.open(tempname, 'w') do |f|
|
||||||
@@ -856,20 +802,34 @@ class TestGoFZF < TestBase
|
|||||||
|
|
||||||
def test_execute
|
def test_execute
|
||||||
output = '/tmp/fzf-test-execute'
|
output = '/tmp/fzf-test-execute'
|
||||||
opts = %[--bind \\"alt-a:execute(echo '[{}]' >> #{output}),alt-b:execute[echo '({}), ({})' >> #{output}],C:execute:echo '({}), [{}], @{}@' >> #{output}\\"]
|
opts = %[--bind \\"alt-a:execute(echo [{}] >> #{output}),alt-b:execute[echo /{}{}/ >> #{output}],C:execute:echo /{}{}{}/ >> #{output}\\"]
|
||||||
tmux.send_keys "seq 100 | #{fzf opts}", :Enter
|
wait = lambda { |exp| tmux.until { |lines| lines[-2].include? exp } }
|
||||||
tmux.until { |lines| lines[-2].include? '100/100' }
|
writelines tempname, %w[foo'bar foo"bar foo$bar]
|
||||||
tmux.send_keys :Escape, :a, :Escape, :a
|
tmux.send_keys "cat #{tempname} | #{fzf opts}; sync", :Enter
|
||||||
|
wait['3/3']
|
||||||
|
tmux.send_keys :Escape, :a
|
||||||
|
wait['/3']
|
||||||
|
tmux.send_keys :Escape, :a
|
||||||
|
wait['/3']
|
||||||
tmux.send_keys :Up
|
tmux.send_keys :Up
|
||||||
tmux.send_keys :Escape, :b, :Escape, :b
|
tmux.send_keys :Escape, :b
|
||||||
|
wait['/3']
|
||||||
|
tmux.send_keys :Escape, :b
|
||||||
|
wait['/3']
|
||||||
tmux.send_keys :Up
|
tmux.send_keys :Up
|
||||||
tmux.send_keys :C
|
tmux.send_keys :C
|
||||||
tmux.send_keys 'foobar'
|
wait['3/3']
|
||||||
tmux.until { |lines| lines[-2].include? '0/100' }
|
tmux.send_keys 'barfoo'
|
||||||
tmux.send_keys :Escape, :a, :Escape, :b, :Escape, :c
|
wait['0/3']
|
||||||
|
tmux.send_keys :Escape, :a
|
||||||
|
wait['/3']
|
||||||
|
tmux.send_keys :Escape, :b
|
||||||
|
wait['/3']
|
||||||
tmux.send_keys :Enter
|
tmux.send_keys :Enter
|
||||||
readonce
|
readonce
|
||||||
assert_equal ['["1"]', '["1"]', '("2"), ("2")', '("2"), ("2")', '("3"), ["3"], @"3"@'],
|
assert_equal %w[[foo'bar] [foo'bar]
|
||||||
|
/foo"barfoo"bar/ /foo"barfoo"bar/
|
||||||
|
/foo$barfoo$barfoo$bar/],
|
||||||
File.readlines(output).map(&:chomp)
|
File.readlines(output).map(&:chomp)
|
||||||
ensure
|
ensure
|
||||||
File.unlink output rescue nil
|
File.unlink output rescue nil
|
||||||
@@ -877,17 +837,24 @@ class TestGoFZF < TestBase
|
|||||||
|
|
||||||
def test_execute_multi
|
def test_execute_multi
|
||||||
output = '/tmp/fzf-test-execute-multi'
|
output = '/tmp/fzf-test-execute-multi'
|
||||||
opts = %[--multi --bind \\"alt-a:execute-multi(echo '[{}], @{}@' >> #{output})\\"]
|
opts = %[--multi --bind \\"alt-a:execute-multi(echo {}/{} >> #{output}; sync)\\"]
|
||||||
tmux.send_keys "seq 100 | #{fzf opts}", :Enter
|
writelines tempname, %w[foo'bar foo"bar foo$bar foobar]
|
||||||
tmux.until { |lines| lines[-2].include? '100/100' }
|
tmux.send_keys "cat #{tempname} | #{fzf opts}", :Enter
|
||||||
|
tmux.until { |lines| lines[-2].include? '4/4' }
|
||||||
tmux.send_keys :Escape, :a
|
tmux.send_keys :Escape, :a
|
||||||
|
tmux.until { |lines| lines[-2].include? '/4' }
|
||||||
tmux.send_keys :BTab, :BTab, :BTab
|
tmux.send_keys :BTab, :BTab, :BTab
|
||||||
tmux.send_keys :Escape, :a
|
tmux.send_keys :Escape, :a
|
||||||
|
tmux.until { |lines| lines[-2].include? '/4' }
|
||||||
tmux.send_keys :Tab, :Tab
|
tmux.send_keys :Tab, :Tab
|
||||||
tmux.send_keys :Escape, :a
|
tmux.send_keys :Escape, :a
|
||||||
|
tmux.until { |lines| lines[-2].include? '/4' }
|
||||||
tmux.send_keys :Enter
|
tmux.send_keys :Enter
|
||||||
|
tmux.prepare
|
||||||
readonce
|
readonce
|
||||||
assert_equal ['["1"], @"1"@', '["1" "2" "3"], @"1" "2" "3"@', '["1" "2" "4"], @"1" "2" "4"@'],
|
assert_equal [%[foo'bar/foo'bar],
|
||||||
|
%[foo'bar foo"bar foo$bar/foo'bar foo"bar foo$bar],
|
||||||
|
%[foo'bar foo"bar foobar/foo'bar foo"bar foobar]],
|
||||||
File.readlines(output).map(&:chomp)
|
File.readlines(output).map(&:chomp)
|
||||||
ensure
|
ensure
|
||||||
File.unlink output rescue nil
|
File.unlink output rescue nil
|
||||||
@@ -906,7 +873,7 @@ class TestGoFZF < TestBase
|
|||||||
tmux.until { |lines| lines[-2].include? '1/1' }
|
tmux.until { |lines| lines[-2].include? '1/1' }
|
||||||
tmux.send_keys 'C-c'
|
tmux.send_keys 'C-c'
|
||||||
tmux.prepare
|
tmux.prepare
|
||||||
assert_equal ['-c / "foo"bar'], File.readlines(output).map(&:chomp)
|
assert_equal ["-c / 'foo'bar"], File.readlines(output).map(&:chomp)
|
||||||
ensure
|
ensure
|
||||||
File.unlink output rescue nil
|
File.unlink output rescue nil
|
||||||
end
|
end
|
||||||
@@ -937,12 +904,12 @@ class TestGoFZF < TestBase
|
|||||||
lines[-2].include?('/90') &&
|
lines[-2].include?('/90') &&
|
||||||
lines[-3] == ' 1' &&
|
lines[-3] == ' 1' &&
|
||||||
lines[-4] == ' 2' &&
|
lines[-4] == ' 2' &&
|
||||||
lines[-13] == '> 15'
|
lines[-13] == '> 50'
|
||||||
end
|
end
|
||||||
tmux.send_keys :Down
|
tmux.send_keys :Down
|
||||||
end
|
end
|
||||||
tmux.send_keys :Enter
|
tmux.send_keys :Enter
|
||||||
assert_equal '15', readonce.chomp
|
assert_equal '50', readonce.chomp
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_header_lines_reverse
|
def test_header_lines_reverse
|
||||||
@@ -952,12 +919,12 @@ class TestGoFZF < TestBase
|
|||||||
lines[1].include?('/90') &&
|
lines[1].include?('/90') &&
|
||||||
lines[2] == ' 1' &&
|
lines[2] == ' 1' &&
|
||||||
lines[3] == ' 2' &&
|
lines[3] == ' 2' &&
|
||||||
lines[12] == '> 15'
|
lines[12] == '> 50'
|
||||||
end
|
end
|
||||||
tmux.send_keys :Up
|
tmux.send_keys :Up
|
||||||
end
|
end
|
||||||
tmux.send_keys :Enter
|
tmux.send_keys :Enter
|
||||||
assert_equal '15', readonce.chomp
|
assert_equal '50', readonce.chomp
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_header_lines_overflow
|
def test_header_lines_overflow
|
||||||
@@ -1020,7 +987,7 @@ class TestGoFZF < TestBase
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_canel
|
def test_cancel
|
||||||
tmux.send_keys "seq 10 | #{fzf "--bind 2:cancel"}", :Enter
|
tmux.send_keys "seq 10 | #{fzf "--bind 2:cancel"}", :Enter
|
||||||
tmux.until { |lines| lines[-2].include?('10/10') }
|
tmux.until { |lines| lines[-2].include?('10/10') }
|
||||||
tmux.send_keys '123'
|
tmux.send_keys '123'
|
||||||
@@ -1111,13 +1078,9 @@ class TestGoFZF < TestBase
|
|||||||
|
|
||||||
def test_exitstatus_empty
|
def test_exitstatus_empty
|
||||||
{ '99' => '0', '999' => '1' }.each do |query, status|
|
{ '99' => '0', '999' => '1' }.each do |query, status|
|
||||||
tmux.send_keys "seq 100 | #{FZF} -q #{query}", :Enter
|
tmux.send_keys "seq 100 | #{FZF} -q #{query}; echo --\\$?--", :Enter
|
||||||
tmux.until { |lines| lines[-2] =~ %r{ [10]/100} }
|
tmux.until { |lines| lines[-2] =~ %r{ [10]/100} }
|
||||||
tmux.send_keys :Enter
|
tmux.send_keys :Enter
|
||||||
|
|
||||||
tmux.send_keys 'echo --\$?--'
|
|
||||||
tmux.until { |lines| lines.last.include? "echo --$?--" }
|
|
||||||
tmux.send_keys :Enter
|
|
||||||
tmux.until { |lines| lines.last.include? "--#{status}--" }
|
tmux.until { |lines| lines.last.include? "--#{status}--" }
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@@ -1151,6 +1114,81 @@ class TestGoFZF < TestBase
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_partial_caching
|
||||||
|
tmux.send_keys 'seq 1000 | fzf -e', :Enter
|
||||||
|
tmux.until { |lines| lines[-2] == ' 1000/1000' }
|
||||||
|
tmux.send_keys 11
|
||||||
|
tmux.until { |lines| lines[-2] == ' 19/1000' }
|
||||||
|
tmux.send_keys 'C-a', "'"
|
||||||
|
tmux.until { |lines| lines[-2] == ' 28/1000' }
|
||||||
|
tmux.send_keys :Enter
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_jump
|
||||||
|
tmux.send_keys "seq 1000 | #{fzf "--multi --jump-labels 12345 --bind 'ctrl-j:jump'"}", :Enter
|
||||||
|
tmux.until { |lines| lines[-2] == ' 1000/1000' }
|
||||||
|
tmux.send_keys 'C-j'
|
||||||
|
tmux.until { |lines| lines[-7] == '5 5' }
|
||||||
|
tmux.until { |lines| lines[-8] == ' 6' }
|
||||||
|
tmux.send_keys '5'
|
||||||
|
tmux.until { |lines| lines[-7] == '> 5' }
|
||||||
|
tmux.send_keys :Tab
|
||||||
|
tmux.until { |lines| lines[-7] == ' >5' }
|
||||||
|
tmux.send_keys 'C-j'
|
||||||
|
tmux.until { |lines| lines[-7] == '5>5' }
|
||||||
|
tmux.send_keys '2'
|
||||||
|
tmux.until { |lines| lines[-4] == '> 2' }
|
||||||
|
tmux.send_keys :Tab
|
||||||
|
tmux.until { |lines| lines[-4] == ' >2' }
|
||||||
|
tmux.send_keys 'C-j'
|
||||||
|
tmux.until { |lines| lines[-7] == '5>5' }
|
||||||
|
|
||||||
|
# Press any key other than jump labels to cancel jump
|
||||||
|
tmux.send_keys '6'
|
||||||
|
tmux.until { |lines| lines[-3] == '> 1' }
|
||||||
|
tmux.send_keys :Tab
|
||||||
|
tmux.until { |lines| lines[-3] == '>>1' }
|
||||||
|
tmux.send_keys :Enter
|
||||||
|
assert_equal %w[5 2 1], readonce.split($/)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_jump_accept
|
||||||
|
tmux.send_keys "seq 1000 | #{fzf "--multi --jump-labels 12345 --bind 'ctrl-j:jump-accept'"}", :Enter
|
||||||
|
tmux.until { |lines| lines[-2] == ' 1000/1000' }
|
||||||
|
tmux.send_keys 'C-j'
|
||||||
|
tmux.until { |lines| lines[-7] == '5 5' }
|
||||||
|
tmux.send_keys '3'
|
||||||
|
assert_equal '3', readonce.chomp
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_preview
|
||||||
|
tmux.send_keys %[seq 1000 | sed s/^2$// | #{FZF} --preview 'sleep 0.2; echo {{}-{}}' --bind ?:toggle-preview], :Enter
|
||||||
|
tmux.until { |lines| lines[1].include?(' {1-1}') }
|
||||||
|
tmux.send_keys :Up
|
||||||
|
tmux.until { |lines| lines[1].include?(' {-}') }
|
||||||
|
tmux.send_keys '555'
|
||||||
|
tmux.until { |lines| lines[1].include?(' {555-555}') }
|
||||||
|
tmux.send_keys '?'
|
||||||
|
tmux.until { |lines| !lines[1].include?(' {555-555}') }
|
||||||
|
tmux.send_keys '?'
|
||||||
|
tmux.until { |lines| lines[1].include?(' {555-555}') }
|
||||||
|
tmux.send_keys :BSpace
|
||||||
|
tmux.until { |lines| lines[-2].start_with? ' 28/1000' }
|
||||||
|
tmux.send_keys 'foobar'
|
||||||
|
tmux.until { |lines| !lines[1].include?('{') }
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_preview_hidden
|
||||||
|
tmux.send_keys %[seq 1000 | #{FZF} --preview 'echo {{}-{}}' --preview-window down:1:hidden --bind ?:toggle-preview], :Enter
|
||||||
|
tmux.until { |lines| lines[-1] == '>' }
|
||||||
|
tmux.send_keys '?'
|
||||||
|
tmux.until { |lines| lines[-2].include?(' {1-1}') }
|
||||||
|
tmux.send_keys '555'
|
||||||
|
tmux.until { |lines| lines[-2].include?(' {555-555}') }
|
||||||
|
tmux.send_keys '?'
|
||||||
|
tmux.until { |lines| lines[-1] == '> 555' }
|
||||||
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
def writelines path, lines
|
def writelines path, lines
|
||||||
File.unlink path while File.exists? path
|
File.unlink path while File.exists? path
|
||||||
@@ -1206,6 +1244,35 @@ module TestShell
|
|||||||
tmux.until(0) { |lines| lines[-1].include? '1 2 3' }
|
tmux.until(0) { |lines| lines[-1].include? '1 2 3' }
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_ctrl_t_unicode
|
||||||
|
FileUtils.mkdir_p '/tmp/fzf-test'
|
||||||
|
tmux.paste 'cd /tmp/fzf-test; echo -n test1 > "fzf-unicode 테스트1"; echo -n test2 > "fzf-unicode 테스트2"'
|
||||||
|
tmux.prepare
|
||||||
|
tmux.send_keys 'cat ', 'C-t', pane: 0
|
||||||
|
tmux.until(1) { |lines| lines.item_count >= 1 }
|
||||||
|
tmux.send_keys 'fzf-unicode', pane: 1
|
||||||
|
tmux.until(1) { |lines| lines[-2].start_with? ' 2/' }
|
||||||
|
|
||||||
|
tmux.send_keys '1', pane: 1
|
||||||
|
tmux.until(1) { |lines| lines[-2].start_with? ' 1/' }
|
||||||
|
tmux.send_keys :BTab, pane: 1
|
||||||
|
tmux.until(1) { |lines| lines[-2].include? '(1)' }
|
||||||
|
|
||||||
|
tmux.send_keys :BSpace, pane: 1
|
||||||
|
tmux.until(1) { |lines| lines[-2].start_with? ' 2/' }
|
||||||
|
|
||||||
|
tmux.send_keys '2', pane: 1
|
||||||
|
tmux.until(1) { |lines| lines[-2].start_with? ' 1/' }
|
||||||
|
tmux.send_keys :BTab, pane: 1
|
||||||
|
tmux.until(1) { |lines| lines[-2].include? '(2)' }
|
||||||
|
|
||||||
|
tmux.send_keys :Enter, pane: 1
|
||||||
|
tmux.until { |lines| lines[-1].include?('cat') || lines[-2].include?('cat') }
|
||||||
|
tmux.until { |lines| lines[-1].include?('fzf-unicode') || lines[-2].include?('fzf-unicode') }
|
||||||
|
tmux.send_keys :Enter
|
||||||
|
tmux.until { |lines| lines[-1].include? 'test1test2' }
|
||||||
|
end
|
||||||
|
|
||||||
def test_alt_c
|
def test_alt_c
|
||||||
tmux.prepare
|
tmux.prepare
|
||||||
tmux.send_keys :Escape, :c, pane: 0
|
tmux.send_keys :Escape, :c, pane: 0
|
||||||
@@ -1314,6 +1381,7 @@ module CompletionTest
|
|||||||
lines[-2].include?('100/') &&
|
lines[-2].include?('100/') &&
|
||||||
lines[-3].include?('/tmp/fzf-test/.hidden-')
|
lines[-3].include?('/tmp/fzf-test/.hidden-')
|
||||||
end
|
end
|
||||||
|
tmux.send_keys :Enter
|
||||||
ensure
|
ensure
|
||||||
['/tmp/fzf-test', '/tmp/fzf test', '~/.fzf-home', 'no~such~user'].each do |f|
|
['/tmp/fzf-test', '/tmp/fzf test', '~/.fzf-home', 'no~such~user'].each do |f|
|
||||||
FileUtils.rm_rf File.expand_path(f)
|
FileUtils.rm_rf File.expand_path(f)
|
||||||
@@ -1405,6 +1473,32 @@ module CompletionTest
|
|||||||
tmux.send_keys :Enter
|
tmux.send_keys :Enter
|
||||||
tmux.until { |lines| lines[-1] == 'unset FOO' }
|
tmux.until { |lines| lines[-1] == 'unset FOO' }
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_file_completion_unicode
|
||||||
|
FileUtils.mkdir_p '/tmp/fzf-test'
|
||||||
|
tmux.paste 'cd /tmp/fzf-test; echo -n test3 > "fzf-unicode 테스트1"; echo -n test4 > "fzf-unicode 테스트2"'
|
||||||
|
tmux.prepare
|
||||||
|
tmux.send_keys 'cat fzf-unicode**', :Tab, pane: 0
|
||||||
|
tmux.until(1) { |lines| lines[-2].start_with? ' 2/' }
|
||||||
|
|
||||||
|
tmux.send_keys '1', pane: 1
|
||||||
|
tmux.until(1) { |lines| lines[-2].start_with? ' 1/' }
|
||||||
|
tmux.send_keys :BTab, pane: 1
|
||||||
|
tmux.until(1) { |lines| lines[-2].include? '(1)' }
|
||||||
|
|
||||||
|
tmux.send_keys :BSpace, pane: 1
|
||||||
|
tmux.until(1) { |lines| lines[-2].start_with? ' 2/' }
|
||||||
|
|
||||||
|
tmux.send_keys '2', pane: 1
|
||||||
|
tmux.until(1) { |lines| lines[-2].start_with? ' 1/' }
|
||||||
|
tmux.send_keys :BTab, pane: 1
|
||||||
|
tmux.until(1) { |lines| lines[-2].include? '(2)' }
|
||||||
|
|
||||||
|
tmux.send_keys :Enter, pane: 1
|
||||||
|
tmux.until { |lines| lines[-1].include?('cat') || lines[-2].include?('cat') }
|
||||||
|
tmux.send_keys :Enter
|
||||||
|
tmux.until { |lines| lines[-1].include? 'test3test4' }
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
class TestBash < TestBase
|
class TestBash < TestBase
|
||||||
|
Reference in New Issue
Block a user