mirror of
https://github.com/junegunn/fzf.git
synced 2025-08-15 12:13:52 -07:00
Compare commits
430 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
37f43fbb35 | ||
|
401a5fd5ff | ||
|
1854922f0c | ||
|
2fc7c18747 | ||
|
8ef2420677 | ||
|
cf6f4d74c4 | ||
|
f44d40f6b4 | ||
|
1c81a58127 | ||
|
9baf7c4874 | ||
|
22b089e47e | ||
|
b166f18220 | ||
|
68600f6ecf | ||
|
4d4447779f | ||
|
639de4c27b | ||
|
d87390934e | ||
|
411ec2e557 | ||
|
f025602841 | ||
|
f958c9daf5 | ||
|
b86838c2b0 | ||
|
1f7d1f9b15 | ||
|
f8fdf9618a | ||
|
827a83efbc | ||
|
3e88849386 | ||
|
608c416207 | ||
|
62f6ff9d6c | ||
|
37dc273148 | ||
|
f7f01d109e | ||
|
01ee335521 | ||
|
0e0de29b87 | ||
|
babf877fd6 | ||
|
935272824e | ||
|
3a9532c8fd | ||
|
c4c92142a6 | ||
|
d4b6338102 | ||
|
8df7d962e6 | ||
|
41e916a511 | ||
|
d9c8a9a880 | ||
|
ddc7bb9064 | ||
|
1d4057c209 | ||
|
822b86942c | ||
|
1e74dbb937 | ||
|
7cef92fffe | ||
|
42e4992f06 | ||
|
a6066175c6 | ||
|
27444d6b1e | ||
|
d6a99c0391 | ||
|
f787f7e651 | ||
|
a7c9c08371 | ||
|
fccc93176b | ||
|
6439a138fe | ||
|
a9a29dff4f | ||
|
6a52f8b8dd | ||
|
a1049328d6 | ||
|
5c2b96bd00 | ||
|
c36413fdf6 | ||
|
52cf5af91c | ||
|
3a4e053af7 | ||
|
049bc9ec68 | ||
|
b461a555b8 | ||
|
0f87b2d1e1 | ||
|
0fb5b76c0d | ||
|
0c918dd87a | ||
|
05299a0fee | ||
|
b36b0a91f5 | ||
|
6081eac58a | ||
|
942ba749c7 | ||
|
f941012687 | ||
|
fed5e5d5af | ||
|
b864885753 | ||
|
64747c2324 | ||
|
34965edcda | ||
|
bd4377084d | ||
|
38a2076b89 | ||
|
5759d50d4a | ||
|
e455836cc9 | ||
|
8a90f26c8a | ||
|
24e1fabf2e | ||
|
c39c039e15 | ||
|
07f176f426 | ||
|
19339e3a6d | ||
|
3e1d6a7bcf | ||
|
2bbc12063c | ||
|
b8737b724b | ||
|
d91c3a2f5e | ||
|
fe5db5aadc | ||
|
cf9c957c66 | ||
|
68b60c6d19 | ||
|
3a644b16a4 | ||
|
95b34de339 | ||
|
6a431cbf49 | ||
|
56fb2f00b3 | ||
|
1c86aaf342 | ||
|
cfc0b18eaa | ||
|
412c211655 | ||
|
923feb69ab | ||
|
92dba7035a | ||
|
b8a3ba16a2 | ||
|
cd5e4d9402 | ||
|
f074709fc9 | ||
|
e0b29e437b | ||
|
bdb94fba7d | ||
|
2f364c62f4 | ||
|
7ed9f83662 | ||
|
f498a9b3fb | ||
|
13330738b8 | ||
|
e53535cc61 | ||
|
c62fc5e75c | ||
|
70245ad98c | ||
|
6d235bceee | ||
|
4adebfc856 | ||
|
faccc0a410 | ||
|
9078688baf | ||
|
9bd8b1d25f | ||
|
dd4be1da38 | ||
|
66f86e1870 | ||
|
4ab75b68dc | ||
|
73cb70dbb3 | ||
|
d082cccb6d | ||
|
88a80e3c2c | ||
|
24516bcf4d | ||
|
b4c4a642ed | ||
|
0231617857 | ||
|
7f64fba80f | ||
|
633aec38f5 | ||
|
d1b402a23c | ||
|
35a9aff8e1 | ||
|
988c9bd9be | ||
|
095f31b316 | ||
|
d86cee2a69 | ||
|
e986f20a85 | ||
|
c727ba1d99 | ||
|
bb70923cd8 | ||
|
772fa42dcb | ||
|
85ef3263fc | ||
|
4bde8de63f | ||
|
654a7df9b0 | ||
|
c3aa836ec0 | ||
|
95764bef6f | ||
|
63dbf48546 | ||
|
e2401350a3 | ||
|
e867355b2a | ||
|
b28c14b93a | ||
|
879ead210f | ||
|
2f6d23b91e | ||
|
5f63a7b587 | ||
|
d9ce797d88 | ||
|
12230f8043 | ||
|
0c8de1ca44 | ||
|
89687105f4 | ||
|
74d1694be9 | ||
|
935e986be5 | ||
|
e5ac2ebd7c | ||
|
8d6e13bf94 | ||
|
2ca704405a | ||
|
802c1c2937 | ||
|
3cb5fef6b6 | ||
|
6da2e0aa1e | ||
|
24f3ec7f33 | ||
|
a57b375b41 | ||
|
6cc9d53978 | ||
|
df32c05833 | ||
|
c0652adf4c | ||
|
6ea760a336 | ||
|
f704b94603 | ||
|
444a67cafa | ||
|
f91cbd5688 | ||
|
3073ca3e5a | ||
|
b47ab633e2 | ||
|
09a2ab39fe | ||
|
6cf54833f7 | ||
|
2ccdf21a1f | ||
|
cf8afc527e | ||
|
1d6f05f974 | ||
|
85751966e9 | ||
|
a7bc9d5351 | ||
|
42c006d07c | ||
|
1b9ca314b8 | ||
|
e72a360337 | ||
|
45108ddd53 | ||
|
e3401a0645 | ||
|
26b9100709 | ||
|
a568120e42 | ||
|
e57182c658 | ||
|
6354dbbbdf | ||
|
2b3e740569 | ||
|
40d934e378 | ||
|
e95d82748f | ||
|
30bd0b53db | ||
|
1893eca41a | ||
|
82067463b8 | ||
|
ce9c51d399 | ||
|
96176476f3 | ||
|
68c84264af | ||
|
69438a55ca | ||
|
8695b5e319 | ||
|
95970164ad | ||
|
f6c6e59a50 | ||
|
45143f9541 | ||
|
23244bb410 | ||
|
edb647667e | ||
|
8d3a302a17 | ||
|
1d2d32c847 | ||
|
d635b3fd3c | ||
|
0f281ef894 | ||
|
b18db4733c | ||
|
6e08fe337c | ||
|
2a2c0a0957 | ||
|
4230b6f3c9 | ||
|
aa171b45cb | ||
|
661d06c90a | ||
|
a9aa263d3a | ||
|
6208fc9cfd | ||
|
e1dd798482 | ||
|
c8a3f6f06a | ||
|
3b9984379c | ||
|
a1b60b1d42 | ||
|
b5850ebd4c | ||
|
ac0a62e494 | ||
|
54b4b0c56f | ||
|
033afde3b5 | ||
|
a07944a5bb | ||
|
32010055e1 | ||
|
971ea2217c | ||
|
d513a210c6 | ||
|
a1db64e7b1 | ||
|
0b9c4e1e74 | ||
|
248320fa55 | ||
|
d4e26707c7 | ||
|
99ea1056ac | ||
|
7bcf4effa5 | ||
|
e1df876b61 | ||
|
28ffb9638d | ||
|
1c20255504 | ||
|
1fd884b34f | ||
|
701687faab | ||
|
bbc3055feb | ||
|
95c69083c7 | ||
|
57a37b5832 | ||
|
d29ae1c462 | ||
|
df468fc482 | ||
|
31278bcc68 | ||
|
e7e86b68f4 | ||
|
a89d8995c3 | ||
|
dbc854d5f4 | ||
|
f1cd0e2daf | ||
|
90d32bd756 | ||
|
e99731ea85 | ||
|
15659ac6e6 | ||
|
3ef41845a9 | ||
|
c84e681581 | ||
|
c3cf3427b1 | ||
|
2c4f71d85b | ||
|
c6328affae | ||
|
aaef18295d | ||
|
14f0d2035e | ||
|
64afff6b9a | ||
|
6bddffbca4 | ||
|
81a88693c1 | ||
|
68541e66b7 | ||
|
672b593634 | ||
|
5769d3867d | ||
|
724ffa3756 | ||
|
5694b5ed30 | ||
|
a1184ceb4e | ||
|
02203c7739 | ||
|
4d709e0dd2 | ||
|
ae04f56dbd | ||
|
f80ff8c917 | ||
|
b4ce89bbf5 | ||
|
486b87d821 | ||
|
b3010a4624 | ||
|
7d53051ec8 | ||
|
ed893c5f47 | ||
|
a4eb3323da | ||
|
1da065e50e | ||
|
86bc9d506f | ||
|
eee45a9578 | ||
|
659f49a09a | ||
|
8fa9e85980 | ||
|
92a75c9563 | ||
|
7c7a30c472 | ||
|
ea271cd4e2 | ||
|
6a38d07a4c | ||
|
c4e5ee63bb | ||
|
862da2c0b1 | ||
|
545370d2b3 | ||
|
59220c63a6 | ||
|
86306dd45a | ||
|
98d2bfa0db | ||
|
aec48f159b | ||
|
ad7e433a7d | ||
|
5a60aa5050 | ||
|
ebea470875 | ||
|
d980e00961 | ||
|
987799f8fb | ||
|
d2f3604c1d | ||
|
72cc558fdc | ||
|
6bc3fe6e67 | ||
|
9398878048 | ||
|
ca19762e58 | ||
|
8764be07e2 | ||
|
2022a3ad96 | ||
|
65d9d416b4 | ||
|
fa2f9f1f21 | ||
|
c656cfbdce | ||
|
de829c0938 | ||
|
64443221aa | ||
|
9017e29741 | ||
|
0a22142d88 | ||
|
ac160f98a8 | ||
|
62e01a2a62 | ||
|
5660cebaf6 | ||
|
a7e588ceac | ||
|
5baf1c5536 | ||
|
9a2d9ad947 | ||
|
90b0cd44ac | ||
|
698e8008df | ||
|
1de4cc3ba8 | ||
|
0d66ad23c6 | ||
|
7f7741099b | ||
|
5a72dc6922 | ||
|
80ed02e72e | ||
|
8fb31e1b4d | ||
|
148f21415a | ||
|
1c31e07d34 | ||
|
55d566b72f | ||
|
60336c7423 | ||
|
7ae877bd3a | ||
|
c601fc6437 | ||
|
e5fec408c4 | ||
|
8156e9894e | ||
|
cacc212f12 | ||
|
d0f2c00f9f | ||
|
766427de0c | ||
|
a7b75c99a5 | ||
|
bae10a6582 | ||
|
c4cf90a3d2 | ||
|
15c49a3e08 | ||
|
ae87f6548a | ||
|
7833fa7396 | ||
|
9278f3acd2 | ||
|
e83ae34a3b | ||
|
e13bafc1ab | ||
|
0ea66329b8 | ||
|
634670e3ea | ||
|
dea60b11bc | ||
|
5e90f0a57b | ||
|
0b4542fcdf | ||
|
02bd2d2adf | ||
|
dce6fe6f2d | ||
|
fcae99f09b | ||
|
fb1b026d3d | ||
|
9f953fc944 | ||
|
909ea1a698 | ||
|
7231acd442 | ||
|
7814371a9a | ||
|
6166e2dd80 | ||
|
ee0c8a2635 | ||
|
2bebddefc0 | ||
|
fdbf3d3fec | ||
|
f9136cffe6 | ||
|
51d84b1869 | ||
|
13e040baee | ||
|
cc0d5539ba | ||
|
b53f61fc59 | ||
|
4e0e03403e | ||
|
928fccc15b | ||
|
bbaa3ab8bd | ||
|
5e3cb3a4ea | ||
|
f71ea5f3ea | ||
|
f469c25730 | ||
|
18469b6954 | ||
|
d01db4862b | ||
|
8b2adba8d6 | ||
|
d459e9abce | ||
|
c9abe1b1ff | ||
|
a0e6147bb5 | ||
|
b0f491d3c3 | ||
|
392da53f53 | ||
|
ae72b0fb70 | ||
|
a79d080ea8 | ||
|
ec85fd552d | ||
|
11db046fc7 | ||
|
938151a834 | ||
|
14e3b84073 | ||
|
56100f0fa7 | ||
|
5254ee2e2a | ||
|
355d004895 | ||
|
a336494f5d | ||
|
8270f7f0ca | ||
|
638a956a9e | ||
|
d395ebd28f | ||
|
c0d3faa84f | ||
|
3492c8b780 | ||
|
a8b2c257cd | ||
|
5e8d8dab82 | ||
|
b504c6eb39 | ||
|
d261c36cde | ||
|
fe4e452d68 | ||
|
d54a4fa223 | ||
|
45bd323cab | ||
|
8677dbded1 | ||
|
794ad5785d | ||
|
fa5b58968e | ||
|
e720f56ea8 | ||
|
7db53e6459 | ||
|
e287bd7f04 | ||
|
022435a90a | ||
|
6c99cc1700 | ||
|
fe5b190a7d | ||
|
77bab51696 | ||
|
77048f3e3b | ||
|
8b618f7439 | ||
|
8973207bb4 | ||
|
6ad1736832 | ||
|
9fca611c4a | ||
|
8e7164553f | ||
|
3b52811796 | ||
|
2e84b1db64 | ||
|
9f33068ab3 | ||
|
eaa3c67a5e | ||
|
1b9b1d15bc | ||
|
97f433a274 | ||
|
45a3655eaf | ||
|
81ffde92fb | ||
|
0be4cead20 | ||
|
f6dd32046e | ||
|
443a80f254 | ||
|
8017635a71 | ||
|
98f62b191a |
29
.github/ISSUE_TEMPLATE.md
vendored
Normal file
29
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
<!-- Check all that apply [x] -->
|
||||||
|
- Category
|
||||||
|
- [ ] fzf binary
|
||||||
|
- [ ] fzf-tmux script
|
||||||
|
- [ ] Key bindings
|
||||||
|
- [ ] Completion
|
||||||
|
- [ ] Vim
|
||||||
|
- [ ] Neovim
|
||||||
|
- [ ] Etc.
|
||||||
|
- OS
|
||||||
|
- [ ] Linux
|
||||||
|
- [ ] Mac OS X
|
||||||
|
- [ ] Windows
|
||||||
|
- [ ] Etc.
|
||||||
|
- Shell
|
||||||
|
- [ ] bash
|
||||||
|
- [ ] zsh
|
||||||
|
- [ ] fish
|
||||||
|
|
||||||
|
<!--
|
||||||
|
### Before submitting
|
||||||
|
|
||||||
|
- Make sure that you have the latest version of fzf
|
||||||
|
- If you use tmux, make sure $TERM is set to screen or screen-256color
|
||||||
|
- For more Vim stuff, check out https://github.com/junegunn/fzf.vim
|
||||||
|
|
||||||
|
Describe your problem or suggestion from here ...
|
||||||
|
-->
|
||||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,5 +1,6 @@
|
|||||||
bin
|
bin
|
||||||
src/fzf/fzf_*
|
src/fzf/fzf-*
|
||||||
|
gopath
|
||||||
pkg
|
pkg
|
||||||
Gemfile.lock
|
Gemfile.lock
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
@@ -4,7 +4,7 @@ rvm:
|
|||||||
|
|
||||||
install:
|
install:
|
||||||
- sudo apt-get update
|
- sudo apt-get update
|
||||||
- sudo apt-get install -y libncurses-dev lib32ncurses5-dev
|
- sudo apt-get install -y libncurses-dev lib32ncurses5-dev libgpm-dev
|
||||||
- sudo add-apt-repository -y ppa:pi-rho/dev
|
- sudo add-apt-repository -y ppa:pi-rho/dev
|
||||||
- sudo apt-add-repository -y ppa:fish-shell/release-2
|
- sudo apt-add-repository -y ppa:fish-shell/release-2
|
||||||
- sudo apt-get update
|
- sudo apt-get update
|
||||||
|
258
CHANGELOG.md
258
CHANGELOG.md
@@ -1,6 +1,264 @@
|
|||||||
CHANGELOG
|
CHANGELOG
|
||||||
=========
|
=========
|
||||||
|
|
||||||
|
0.15.0
|
||||||
|
------
|
||||||
|
- Improved fuzzy search algorithm
|
||||||
|
- Added `--algo=[v1|v2]` option so one can still choose the old algorithm
|
||||||
|
which values the search performance over the quality of the result
|
||||||
|
- Advanced scoring criteria
|
||||||
|
- `--read0` to read input delimited by ASCII NUL character
|
||||||
|
- `--print0` to print output delimited by ASCII NUL character
|
||||||
|
|
||||||
|
0.13.5
|
||||||
|
------
|
||||||
|
- Memory and performance optimization
|
||||||
|
- Up to 2x performance with half the amount of memory
|
||||||
|
|
||||||
|
0.13.4
|
||||||
|
------
|
||||||
|
- Performance optimization
|
||||||
|
- Memory footprint for ascii string is reduced by 60%
|
||||||
|
- 15 to 20% improvement of query performance
|
||||||
|
- Up to 45% better performance of `--nth` with non-regex delimiters
|
||||||
|
- Fixed invalid handling of `hidden` property of `--preview-window`
|
||||||
|
|
||||||
|
0.13.3
|
||||||
|
------
|
||||||
|
- Fixed duplicate rendering of the last line in preview window
|
||||||
|
|
||||||
|
0.13.2
|
||||||
|
------
|
||||||
|
- Fixed race condition where preview window is not properly cleared
|
||||||
|
|
||||||
|
0.13.1
|
||||||
|
------
|
||||||
|
- Fixed UI issue with large `--preview` output with many ANSI codes
|
||||||
|
|
||||||
|
0.13.0
|
||||||
|
------
|
||||||
|
- Added preview feature
|
||||||
|
- `--preview CMD`
|
||||||
|
- `--preview-window POS[:SIZE][:hidden]`
|
||||||
|
- `{}` in execute action is now replaced to the single-quoted (instead of
|
||||||
|
double-quoted) string of the current line
|
||||||
|
- Fixed to ignore control characters for bracketed paste mode
|
||||||
|
|
||||||
|
0.12.2
|
||||||
|
------
|
||||||
|
|
||||||
|
- 256-color capability detection does not require `256` in `$TERM`
|
||||||
|
- Added `print-query` action
|
||||||
|
- More named keys for binding; <kbd>F1</kbd> ~ <kbd>F10</kbd>,
|
||||||
|
<kbd>ALT-/</kbd>, <kbd>ALT-space</kbd>, and <kbd>ALT-enter</kbd>
|
||||||
|
- Added `jump` and `jump-accept` actions that implement [EasyMotion][em]-like
|
||||||
|
movement
|
||||||
|
![][jump]
|
||||||
|
|
||||||
|
[em]: https://github.com/easymotion/vim-easymotion
|
||||||
|
[jump]: https://cloud.githubusercontent.com/assets/700826/15367574/b3999dc4-1d64-11e6-85da-28ceeb1a9bc2.png
|
||||||
|
|
||||||
|
0.12.1
|
||||||
|
------
|
||||||
|
|
||||||
|
- Ranking algorithm introduced in 0.12.0 is now universally applied
|
||||||
|
- Fixed invalid cache reference in exact mode
|
||||||
|
- Fixes and improvements in Vim plugin and shell extensions
|
||||||
|
|
||||||
|
0.12.0
|
||||||
|
------
|
||||||
|
|
||||||
|
- Enhanced ranking algorithm
|
||||||
|
- Minor bug fixes
|
||||||
|
|
||||||
|
0.11.4
|
||||||
|
------
|
||||||
|
|
||||||
|
- Added `--hscroll-off=COL` option (default: 10) (#513)
|
||||||
|
- Some fixes in Vim plugin and shell extensions
|
||||||
|
|
||||||
|
0.11.3
|
||||||
|
------
|
||||||
|
|
||||||
|
- Graceful exit on SIGTERM (#482)
|
||||||
|
- `$SHELL` instead of `sh` for `execute` action and `$FZF_DEFAULT_COMMAND` (#481)
|
||||||
|
- Changes in fuzzy completion API
|
||||||
|
- [`_fzf_compgen_{path,dir}`](https://github.com/junegunn/fzf/commit/9617647)
|
||||||
|
- [`_fzf_complete_COMMAND_post`](https://github.com/junegunn/fzf/commit/8206746)
|
||||||
|
for post-processing
|
||||||
|
|
||||||
|
0.11.2
|
||||||
|
------
|
||||||
|
|
||||||
|
- `--tiebreak` now accepts comma-separated list of sort criteria
|
||||||
|
- Each criterion should appear only once in the list
|
||||||
|
- `index` is only allowed at the end of the list
|
||||||
|
- `index` is implicitly appended to the list when not specified
|
||||||
|
- Default is `length` (or equivalently `length,index`)
|
||||||
|
- `begin` criterion will ignore leading whitespaces when calculating the index
|
||||||
|
- Added `toggle-in` and `toggle-out` actions
|
||||||
|
- Switch direction depending on `--reverse`-ness
|
||||||
|
- `export FZF_DEFAULT_OPTS="--bind tab:toggle-out,shift-tab:toggle-in"`
|
||||||
|
- Reduced the initial delay when `--tac` is not given
|
||||||
|
- fzf defers the initial rendering of the screen up to 100ms if the input
|
||||||
|
stream is ongoing to prevent unnecessary redraw during the initial
|
||||||
|
phase. However, 100ms delay is quite noticeable and might give the
|
||||||
|
impression that fzf is not snappy enough. This commit reduces the
|
||||||
|
maximum delay down to 20ms when `--tac` is not specified, in which case
|
||||||
|
the input list quickly fills the entire screen.
|
||||||
|
|
||||||
|
0.11.1
|
||||||
|
------
|
||||||
|
|
||||||
|
- Added `--tabstop=SPACES` option
|
||||||
|
|
||||||
|
0.11.0
|
||||||
|
------
|
||||||
|
|
||||||
|
- Added OR operator for extended-search mode
|
||||||
|
- Added `--execute-multi` action
|
||||||
|
- Fixed incorrect cursor position when unicode wide characters are used in
|
||||||
|
`--prompt`
|
||||||
|
- Fixes and improvements in shell extensions
|
||||||
|
|
||||||
|
0.10.9
|
||||||
|
------
|
||||||
|
|
||||||
|
- Extended-search mode is now enabled by default
|
||||||
|
- `--extended-exact` is deprecated and instead we have `--exact` for
|
||||||
|
orthogonally controlling "exactness" of search
|
||||||
|
- Fixed not to display non-printable characters
|
||||||
|
- Added `double-click` for `--bind` option
|
||||||
|
- More robust handling of SIGWINCH
|
||||||
|
|
||||||
|
0.10.8
|
||||||
|
------
|
||||||
|
|
||||||
|
- Fixed panic when trying to set colors after colors are disabled (#370)
|
||||||
|
|
||||||
|
0.10.7
|
||||||
|
------
|
||||||
|
|
||||||
|
- Fixed unserialized interrupt handling during execute action which often
|
||||||
|
caused invalid memory access and crash
|
||||||
|
- Changed `--tiebreak=length` (default) to use trimmed length when `--nth` is
|
||||||
|
used
|
||||||
|
|
||||||
|
0.10.6
|
||||||
|
------
|
||||||
|
|
||||||
|
- Replaced `--header-file` with `--header` option
|
||||||
|
- `--header` and `--header-lines` can be used together
|
||||||
|
- Changed exit status
|
||||||
|
- 0: Okay
|
||||||
|
- 1: No match
|
||||||
|
- 2: Error
|
||||||
|
- 130: Interrupted
|
||||||
|
- 64-bit linux binary is statically-linked with ncurses to avoid
|
||||||
|
compatibility issues.
|
||||||
|
|
||||||
|
0.10.5
|
||||||
|
------
|
||||||
|
|
||||||
|
- `'`-prefix to unquote the term in `--extended-exact` mode
|
||||||
|
- Backward scan when `--tiebreak=end` is set
|
||||||
|
|
||||||
|
0.10.4
|
||||||
|
------
|
||||||
|
|
||||||
|
- Fixed to remove ANSI code from output when `--with-nth` is set
|
||||||
|
|
||||||
|
0.10.3
|
||||||
|
------
|
||||||
|
|
||||||
|
- Fixed slow performance of `--with-nth` when used with `--delimiter`
|
||||||
|
- Regular expression engine of Golang as of now is very slow, so the fixed
|
||||||
|
version will treat the given delimiter pattern as a plain string instead
|
||||||
|
of a regular expression unless it contains special characters and is
|
||||||
|
a valid regular expression.
|
||||||
|
- Simpler regular expression for delimiter for better performance
|
||||||
|
|
||||||
|
0.10.2
|
||||||
|
------
|
||||||
|
|
||||||
|
### Fixes and improvements
|
||||||
|
|
||||||
|
- Improvement in perceived response time of queries
|
||||||
|
- Eager, efficient rune array conversion
|
||||||
|
- Graceful exit when failed to initialize ncurses (invalid $TERM)
|
||||||
|
- Improved ranking algorithm when `--nth` option is set
|
||||||
|
- Changed the default command not to fail when there are files whose names
|
||||||
|
start with dash
|
||||||
|
|
||||||
|
0.10.1
|
||||||
|
------
|
||||||
|
|
||||||
|
### New features
|
||||||
|
|
||||||
|
- Added `--margin` option
|
||||||
|
- Added options for sticky header
|
||||||
|
- `--header-file`
|
||||||
|
- `--header-lines`
|
||||||
|
- Added `cancel` action which clears the input or closes the finder when the
|
||||||
|
input is already empty
|
||||||
|
- e.g. `export FZF_DEFAULT_OPTS="--bind esc:cancel"`
|
||||||
|
- Added `delete-char/eof` action to differentiate `CTRL-D` and `DEL`
|
||||||
|
|
||||||
|
### Minor improvements/fixes
|
||||||
|
|
||||||
|
- Fixed to allow binding colon and comma keys
|
||||||
|
- Fixed ANSI processor to handle color regions spanning multiple lines
|
||||||
|
|
||||||
|
0.10.0
|
||||||
|
------
|
||||||
|
|
||||||
|
### New features
|
||||||
|
|
||||||
|
- More actions for `--bind`
|
||||||
|
- `select-all`
|
||||||
|
- `deselect-all`
|
||||||
|
- `toggle-all`
|
||||||
|
- `ignore`
|
||||||
|
- `execute(...)` action for running arbitrary command without leaving fzf
|
||||||
|
- `fzf --bind "ctrl-m:execute(less {})"`
|
||||||
|
- `fzf --bind "ctrl-t:execute(tmux new-window -d 'vim {}')"`
|
||||||
|
- If the command contains parentheses, use any of the follows alternative
|
||||||
|
notations to avoid parse errors
|
||||||
|
- `execute[...]`
|
||||||
|
- `execute~...~`
|
||||||
|
- `execute!...!`
|
||||||
|
- `execute@...@`
|
||||||
|
- `execute#...#`
|
||||||
|
- `execute$...$`
|
||||||
|
- `execute%...%`
|
||||||
|
- `execute^...^`
|
||||||
|
- `execute&...&`
|
||||||
|
- `execute*...*`
|
||||||
|
- `execute;...;`
|
||||||
|
- `execute/.../`
|
||||||
|
- `execute|...|`
|
||||||
|
- `execute:...`
|
||||||
|
- This is the special form that frees you from parse errors as it
|
||||||
|
does not expect the closing character
|
||||||
|
- The catch is that it should be the last one in the
|
||||||
|
comma-separated list
|
||||||
|
- Added support for optional search history
|
||||||
|
- `--history HISTORY_FILE`
|
||||||
|
- When used, `CTRL-N` and `CTRL-P` are automatically remapped to
|
||||||
|
`next-history` and `previous-history`
|
||||||
|
- `--history-size MAX_ENTRIES` (default: 1000)
|
||||||
|
- Cyclic scrolling can be enabled with `--cycle`
|
||||||
|
- Fixed the bug where the spinner was not spinning on idle input stream
|
||||||
|
- e.g. `sleep 100 | fzf`
|
||||||
|
|
||||||
|
### Minor improvements/fixes
|
||||||
|
|
||||||
|
- Added synonyms for key names that can be specified for `--bind`,
|
||||||
|
`--toggle-sort`, and `--expect`
|
||||||
|
- Fixed the color of multi-select marker on the current line
|
||||||
|
- Fixed to allow `^pattern$` in extended-search mode
|
||||||
|
|
||||||
|
|
||||||
0.9.13
|
0.9.13
|
||||||
------
|
------
|
||||||
|
|
||||||
|
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
|||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright (c) 2015 Junegunn Choi
|
Copyright (c) 2016 Junegunn Choi
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
258
README.md
258
README.md
@@ -1,4 +1,4 @@
|
|||||||
<img src="https://raw.githubusercontent.com/junegunn/i/master/fzf.png" height="170" alt="fzf - a command-line fuzzy finder"> [](https://travis-ci.org/junegunn/fzf) <a href="http://flattr.com/thing/3115381/junegunnfzf-on-GitHub" target="_blank"><img src="http://api.flattr.com/button/flattr-badge-large.png" alt="Flattr this" title="Flattr this" border="0" /></a>
|
<img src="https://raw.githubusercontent.com/junegunn/i/master/fzf.png" height="170" alt="fzf - a command-line fuzzy finder"> [](https://travis-ci.org/junegunn/fzf)
|
||||||
===
|
===
|
||||||
|
|
||||||
fzf is a general-purpose command-line fuzzy finder.
|
fzf is a general-purpose command-line fuzzy finder.
|
||||||
@@ -10,18 +10,15 @@ Pros
|
|||||||
|
|
||||||
- No dependencies
|
- No dependencies
|
||||||
- Blazingly fast
|
- Blazingly fast
|
||||||
- e.g. `locate / | fzf`
|
|
||||||
- Flexible layout
|
|
||||||
- Runs in fullscreen or in horizontal/vertical split using tmux
|
|
||||||
- The most comprehensive feature set
|
- The most comprehensive feature set
|
||||||
- Try `fzf --help` and be surprised
|
- Flexible layout using tmux panes
|
||||||
- Batteries included
|
- Batteries included
|
||||||
- Vim/Neovim plugin, key bindings and fuzzy auto-completion
|
- Vim/Neovim plugin, key bindings and fuzzy auto-completion
|
||||||
|
|
||||||
Installation
|
Installation
|
||||||
------------
|
------------
|
||||||
|
|
||||||
fzf project consists of the followings:
|
fzf project consists of the following components:
|
||||||
|
|
||||||
- `fzf` executable
|
- `fzf` executable
|
||||||
- `fzf-tmux` script for launching fzf in a tmux pane
|
- `fzf-tmux` script for launching fzf in a tmux pane
|
||||||
@@ -30,12 +27,12 @@ fzf project consists of the followings:
|
|||||||
- Fuzzy auto-completion (bash, zsh)
|
- Fuzzy auto-completion (bash, zsh)
|
||||||
- Vim/Neovim plugin
|
- Vim/Neovim plugin
|
||||||
|
|
||||||
You can [download fzf executable][bin] alone, but it's recommended that you
|
You can [download fzf executable][bin] alone if you don't need the extra
|
||||||
install the extra stuff using the attached install script.
|
stuff.
|
||||||
|
|
||||||
[bin]: https://github.com/junegunn/fzf-bin/releases
|
[bin]: https://github.com/junegunn/fzf-bin/releases
|
||||||
|
|
||||||
#### Using git (recommended)
|
### Using git
|
||||||
|
|
||||||
Clone this repository and run
|
Clone this repository and run
|
||||||
[install](https://github.com/junegunn/fzf/blob/master/install) script.
|
[install](https://github.com/junegunn/fzf/blob/master/install) script.
|
||||||
@@ -45,40 +42,44 @@ git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
|
|||||||
~/.fzf/install
|
~/.fzf/install
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Using Homebrew
|
### Using Homebrew
|
||||||
|
|
||||||
On OS X, you can use [Homebrew](http://brew.sh/) to install fzf.
|
On OS X, you can use [Homebrew](http://brew.sh/) to install fzf.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
brew reinstall --HEAD fzf
|
brew install fzf
|
||||||
|
|
||||||
# Install shell extensions
|
# Install shell extensions
|
||||||
/usr/local/Cellar/fzf/HEAD/install
|
/usr/local/opt/fzf/install
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Install as Vim plugin
|
### Vim plugin
|
||||||
|
|
||||||
Once you have cloned the repository, add the following line to your .vimrc.
|
You can manually add the directory to `&runtimepath` as follows,
|
||||||
|
|
||||||
```vim
|
```vim
|
||||||
|
" If installed using git
|
||||||
set rtp+=~/.fzf
|
set rtp+=~/.fzf
|
||||||
|
|
||||||
|
" If installed using Homebrew
|
||||||
|
set rtp+=/usr/local/opt/fzf
|
||||||
```
|
```
|
||||||
|
|
||||||
Or you can have [vim-plug](https://github.com/junegunn/vim-plug) manage fzf
|
But it's recommended that you use a plugin manager like
|
||||||
(recommended):
|
[vim-plug](https://github.com/junegunn/vim-plug).
|
||||||
|
|
||||||
```vim
|
```vim
|
||||||
Plug 'junegunn/fzf', { 'dir': '~/.fzf', 'do': 'yes \| ./install' }
|
Plug 'junegunn/fzf', { 'dir': '~/.fzf', 'do': './install --all' }
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Upgrading fzf
|
### Upgrading fzf
|
||||||
|
|
||||||
fzf is being actively developed and you might want to upgrade it once in a
|
fzf is being actively developed and you might want to upgrade it once in a
|
||||||
while. Please follow the instruction below depending on the installation
|
while. Please follow the instruction below depending on the installation
|
||||||
method.
|
method used.
|
||||||
|
|
||||||
- git: `cd ~/.fzf && git pull && ./install`
|
- git: `cd ~/.fzf && git pull && ./install`
|
||||||
- brew: `brew reinstall --HEAD fzf`
|
- brew: `brew update; brew reinstall fzf`
|
||||||
- vim-plug: `:PlugUpdate fzf`
|
- vim-plug: `:PlugUpdate fzf`
|
||||||
|
|
||||||
Usage
|
Usage
|
||||||
@@ -108,24 +109,41 @@ vim $(fzf)
|
|||||||
- Mouse: scroll, click, double-click; shift-click and shift-scroll on
|
- Mouse: scroll, click, double-click; shift-click and shift-scroll on
|
||||||
multi-select mode
|
multi-select mode
|
||||||
|
|
||||||
#### Extended-search mode
|
#### Search syntax
|
||||||
|
|
||||||
With `-x` or `--extended` option, fzf will start in "extended-search mode".
|
Unless otherwise specified, fzf starts in "extended-search mode" where you can
|
||||||
|
type in multiple search terms delimited by spaces. e.g. `^music .mp3$ sbtrkt
|
||||||
|
!rmx`
|
||||||
|
|
||||||
In this mode, you can specify multiple patterns delimited by spaces,
|
| Token | Match type | Description |
|
||||||
such as: `^music .mp3$ sbtrkt !rmx`
|
| -------- | -------------------- | -------------------------------- |
|
||||||
|
| `sbtrkt` | fuzzy-match | Items that match `sbtrkt` |
|
||||||
|
| `^music` | prefix-exact-match | Items that start with `music` |
|
||||||
|
| `.mp3$` | suffix-exact-match | Items that end with `.mp3` |
|
||||||
|
| `'wild` | exact-match (quoted) | Items that include `wild` |
|
||||||
|
| `!rmx` | inverse-fuzzy-match | Items that do not match `rmx` |
|
||||||
|
| `!'fire` | inverse-exact-match | Items that do not include `fire` |
|
||||||
|
|
||||||
| Token | Description | Match type |
|
If you don't prefer fuzzy matching and do not wish to "quote" every word,
|
||||||
| -------- | -------------------------------- | -------------------- |
|
start fzf with `-e` or `--exact` option. Note that when `--exact` is set,
|
||||||
| `^music` | Items that start with `music` | prefix-exact-match |
|
`'`-prefix "unquotes" the term.
|
||||||
| `.mp3$` | Items that end with `.mp3` | suffix-exact-match |
|
|
||||||
| `sbtrkt` | Items that match `sbtrkt` | fuzzy-match |
|
|
||||||
| `!rmx` | Items that do not match `rmx` | inverse-fuzzy-match |
|
|
||||||
| `'wild` | Items that include `wild` | exact-match (quoted) |
|
|
||||||
| `!'fire` | Items that do not include `fire` | inverse-exact-match |
|
|
||||||
|
|
||||||
If you don't need fuzzy matching and do not wish to "quote" every word, start
|
A single bar character term acts as an OR operator. For example, the following
|
||||||
fzf with `-e` or `--extended-exact` option.
|
query matches entries that start with `core` and end with either `go`, `rb`,
|
||||||
|
or `py`.
|
||||||
|
|
||||||
|
```
|
||||||
|
^core go$ | rb$ | py$
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Environment variables
|
||||||
|
|
||||||
|
- `FZF_DEFAULT_COMMAND`
|
||||||
|
- Default command to use when input is tty
|
||||||
|
- e.g. `export FZF_DEFAULT_COMMAND='ag -g ""'`
|
||||||
|
- `FZF_DEFAULT_OPTS`
|
||||||
|
- Default options
|
||||||
|
- e.g. `export FZF_DEFAULT_OPTS="--reverse --inline-info"`
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
--------
|
--------
|
||||||
@@ -134,26 +152,6 @@ Many useful examples can be found on [the wiki
|
|||||||
page](https://github.com/junegunn/fzf/wiki/examples). Feel free to add your
|
page](https://github.com/junegunn/fzf/wiki/examples). Feel free to add your
|
||||||
own as well.
|
own as well.
|
||||||
|
|
||||||
Key bindings for command line
|
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
The install script will setup the following key bindings for bash, zsh, and
|
|
||||||
fish.
|
|
||||||
|
|
||||||
- `CTRL-T` - Paste the selected file path(s) into the command line
|
|
||||||
- `CTRL-R` - Paste the selected command from history into the command line
|
|
||||||
- Sort is disabled by default to respect chronological ordering
|
|
||||||
- Press `CTRL-R` again to toggle sort
|
|
||||||
- `ALT-C` - cd into the selected directory
|
|
||||||
|
|
||||||
If you're on a tmux session, fzf will start in a split pane. You may disable
|
|
||||||
this tmux integration by setting `FZF_TMUX` to 0, or change the height of the
|
|
||||||
pane with `FZF_TMUX_HEIGHT` (e.g. `20`, `50%`).
|
|
||||||
|
|
||||||
If you use vi mode on bash, you need to add `set -o vi` *before* `source
|
|
||||||
~/.fzf.bash` in your .bashrc, so that it correctly sets up key bindings for vi
|
|
||||||
mode.
|
|
||||||
|
|
||||||
`fzf-tmux` script
|
`fzf-tmux` script
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
@@ -173,6 +171,31 @@ cat /usr/share/dict/words | fzf-tmux -l 20% --multi --reverse
|
|||||||
It will still work even when you're not on tmux, silently ignoring `-[udlr]`
|
It will still work even when you're not on tmux, silently ignoring `-[udlr]`
|
||||||
options, so you can invariably use `fzf-tmux` in your scripts.
|
options, so you can invariably use `fzf-tmux` in your scripts.
|
||||||
|
|
||||||
|
Key bindings for command line
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
The install script will setup the following key bindings for bash, zsh, and
|
||||||
|
fish.
|
||||||
|
|
||||||
|
- `CTRL-T` - Paste the selected files and directories onto the command line
|
||||||
|
- Set `FZF_CTRL_T_COMMAND` to override the default command
|
||||||
|
- Set `FZF_CTRL_T_OPTS` to pass additional options
|
||||||
|
- `CTRL-R` - Paste the selected command from history onto the command line
|
||||||
|
- Sort is disabled by default to respect chronological ordering
|
||||||
|
- Press `CTRL-R` again to toggle sort
|
||||||
|
- Set `FZF_CTRL_R_OPTS` to pass additional options
|
||||||
|
- `ALT-C` - cd into the selected directory
|
||||||
|
- Set `FZF_ALT_C_COMMAND` to override the default command
|
||||||
|
- Set `FZF_ALT_C_OPTS` to pass additional options
|
||||||
|
|
||||||
|
If you're on a tmux session, fzf will start in a split pane. You may disable
|
||||||
|
this tmux integration by setting `FZF_TMUX` to 0, or change the height of the
|
||||||
|
pane with `FZF_TMUX_HEIGHT` (e.g. `20`, `50%`).
|
||||||
|
|
||||||
|
If you use vi mode on bash, you need to add `set -o vi` *before* `source
|
||||||
|
~/.fzf.bash` in your .bashrc, so that it correctly sets up key bindings for vi
|
||||||
|
mode.
|
||||||
|
|
||||||
Fuzzy completion for bash and zsh
|
Fuzzy completion for bash and zsh
|
||||||
---------------------------------
|
---------------------------------
|
||||||
|
|
||||||
@@ -241,11 +264,33 @@ export FZF_COMPLETION_TRIGGER='~~'
|
|||||||
|
|
||||||
# Options to fzf command
|
# Options to fzf command
|
||||||
export FZF_COMPLETION_OPTS='+c -x'
|
export FZF_COMPLETION_OPTS='+c -x'
|
||||||
|
|
||||||
|
# Use ag instead of the default find command for listing candidates.
|
||||||
|
# - The first argument to the function is the base path to start traversal
|
||||||
|
# - Note that ag only lists files not directories
|
||||||
|
# - See the source code (completion.{bash,zsh}) for the details.
|
||||||
|
_fzf_compgen_path() {
|
||||||
|
ag -g "" "$1"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Supported commands
|
||||||
|
|
||||||
|
On bash, fuzzy completion is enabled only for a predefined set of commands
|
||||||
|
(`complete | grep _fzf` to see the list). But you can enable it for other
|
||||||
|
commands as well like follows.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# There are also _fzf_path_completion and _fzf_dir_completion
|
||||||
|
complete -F _fzf_file_completion -o default -o bashdefault doge
|
||||||
```
|
```
|
||||||
|
|
||||||
Usage as Vim plugin
|
Usage as Vim plugin
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
|
This repository only enables basic integration with Vim. If you're looking for
|
||||||
|
more, check out [fzf.vim](https://github.com/junegunn/fzf.vim) project.
|
||||||
|
|
||||||
(Note: To use fzf in GVim, an external terminal emulator is required.)
|
(Note: To use fzf in GVim, an external terminal emulator is required.)
|
||||||
|
|
||||||
#### `:FZF[!]`
|
#### `:FZF[!]`
|
||||||
@@ -276,12 +321,10 @@ customization.
|
|||||||
|
|
||||||
[fzf-config]: https://github.com/junegunn/fzf/wiki/Configuring-FZF-command-(vim)
|
[fzf-config]: https://github.com/junegunn/fzf/wiki/Configuring-FZF-command-(vim)
|
||||||
|
|
||||||
#### `fzf#run([options])`
|
#### `fzf#run`
|
||||||
|
|
||||||
For more advanced uses, you can call `fzf#run()` function which returns the list
|
For more advanced uses, you can use `fzf#run([options])` function with the
|
||||||
of the selected items.
|
following options.
|
||||||
|
|
||||||
`fzf#run()` may take an options-dictionary:
|
|
||||||
|
|
||||||
| Option name | Type | Description |
|
| Option name | Type | Description |
|
||||||
| -------------------------- | ------------- | ---------------------------------------------------------------- |
|
| -------------------------- | ------------- | ---------------------------------------------------------------- |
|
||||||
@@ -297,73 +340,26 @@ of the selected items.
|
|||||||
| `launcher` | string | External terminal emulator to start fzf with (GVim only) |
|
| `launcher` | string | External terminal emulator to start fzf with (GVim only) |
|
||||||
| `launcher` | funcref | Function for generating `launcher` string (GVim only) |
|
| `launcher` | funcref | Function for generating `launcher` string (GVim only) |
|
||||||
|
|
||||||
_However on Neovim `fzf#run` is asynchronous and does not return values so you
|
Examples can be found on [the wiki
|
||||||
should use `sink` or `sink*` to process the output from fzf._
|
|
||||||
|
|
||||||
##### Examples
|
|
||||||
|
|
||||||
If `sink` option is not given, `fzf#run` will simply return the list.
|
|
||||||
|
|
||||||
```vim
|
|
||||||
let items = fzf#run({ 'options': '-m +c', 'dir': '~', 'source': 'ls' })
|
|
||||||
```
|
|
||||||
|
|
||||||
But if `sink` is given as a string, the command will be executed for each
|
|
||||||
selected item.
|
|
||||||
|
|
||||||
```vim
|
|
||||||
" Each selected item will be opened in a new tab
|
|
||||||
let items = fzf#run({ 'sink': 'tabe', 'options': '-m +c', 'dir': '~', 'source': 'ls' })
|
|
||||||
```
|
|
||||||
|
|
||||||
We can also use a Vim list as the source as follows:
|
|
||||||
|
|
||||||
```vim
|
|
||||||
" Choose a color scheme with fzf
|
|
||||||
nnoremap <silent> <Leader>C :call fzf#run({
|
|
||||||
\ 'source':
|
|
||||||
\ map(split(globpath(&rtp, "colors/*.vim"), "\n"),
|
|
||||||
\ "substitute(fnamemodify(v:val, ':t'), '\\..\\{-}$', '', '')"),
|
|
||||||
\ 'sink': 'colo',
|
|
||||||
\ 'options': '+m',
|
|
||||||
\ 'left': 20,
|
|
||||||
\ 'launcher': 'xterm -geometry 20x30 -e bash -ic %s'
|
|
||||||
\ })<CR>
|
|
||||||
```
|
|
||||||
|
|
||||||
`sink` option can be a function reference. The following example creates a
|
|
||||||
handy mapping that selects an open buffer.
|
|
||||||
|
|
||||||
```vim
|
|
||||||
" List of buffers
|
|
||||||
function! s:buflist()
|
|
||||||
redir => ls
|
|
||||||
silent ls
|
|
||||||
redir END
|
|
||||||
return split(ls, '\n')
|
|
||||||
endfunction
|
|
||||||
|
|
||||||
function! s:bufopen(e)
|
|
||||||
execute 'buffer' matchstr(a:e, '^[ 0-9]*')
|
|
||||||
endfunction
|
|
||||||
|
|
||||||
nnoremap <silent> <Leader><Enter> :call fzf#run({
|
|
||||||
\ 'source': reverse(<sid>buflist()),
|
|
||||||
\ 'sink': function('<sid>bufopen'),
|
|
||||||
\ 'options': '+m',
|
|
||||||
\ 'down': len(<sid>buflist()) + 2
|
|
||||||
\ })<CR>
|
|
||||||
```
|
|
||||||
|
|
||||||
More examples can be found on [the wiki
|
|
||||||
page](https://github.com/junegunn/fzf/wiki/Examples-(vim)).
|
page](https://github.com/junegunn/fzf/wiki/Examples-(vim)).
|
||||||
|
|
||||||
|
#### `fzf#wrap`
|
||||||
|
|
||||||
|
`fzf#wrap([name string,] [opts dict,] [fullscreen boolean])` is a helper
|
||||||
|
function that decorates the options dictionary so that it understands
|
||||||
|
`g:fzf_layout`, `g:fzf_action`, and `g:fzf_history_dir` like `:FZF`.
|
||||||
|
|
||||||
|
```vim
|
||||||
|
command! -bang MyStuff
|
||||||
|
\ call fzf#run(fzf#wrap('my-stuff', {'dir': '~/my-stuff'}, <bang>0))
|
||||||
|
```
|
||||||
|
|
||||||
Tips
|
Tips
|
||||||
----
|
----
|
||||||
|
|
||||||
#### Rendering issues
|
#### Rendering issues
|
||||||
|
|
||||||
If you have any rendering issues, check the followings:
|
If you have any rendering issues, check the following:
|
||||||
|
|
||||||
1. Make sure `$TERM` is correctly set. fzf will use 256-color only if it
|
1. Make sure `$TERM` is correctly set. fzf will use 256-color only if it
|
||||||
contains `256` (e.g. `xterm-256color`)
|
contains `256` (e.g. `xterm-256color`)
|
||||||
@@ -383,13 +379,22 @@ filtering:
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
# Feed the output of ag into fzf
|
# Feed the output of ag into fzf
|
||||||
ag -l -g "" | fzf
|
ag -g "" | fzf
|
||||||
|
|
||||||
# Setting ag as the default source for fzf
|
# Setting ag as the default source for fzf
|
||||||
export FZF_DEFAULT_COMMAND='ag -l -g ""'
|
export FZF_DEFAULT_COMMAND='ag -g ""'
|
||||||
|
|
||||||
# Now fzf (w/o pipe) will use ag instead of find
|
# Now fzf (w/o pipe) will use ag instead of find
|
||||||
fzf
|
fzf
|
||||||
|
|
||||||
|
# To apply the command to CTRL-T as well
|
||||||
|
export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
|
||||||
|
```
|
||||||
|
|
||||||
|
If you don't want to exclude hidden files, use the following command:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
export FZF_DEFAULT_COMMAND='ag --hidden --ignore .git -g ""'
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `git ls-tree` for fast traversal
|
#### `git ls-tree` for fast traversal
|
||||||
@@ -400,7 +405,8 @@ speed of the traversal.
|
|||||||
```sh
|
```sh
|
||||||
export FZF_DEFAULT_COMMAND='
|
export FZF_DEFAULT_COMMAND='
|
||||||
(git ls-tree -r --name-only HEAD ||
|
(git ls-tree -r --name-only HEAD ||
|
||||||
find * -name ".*" -prune -o -type f -print -o -type l -print) 2> /dev/null'
|
find . -path "*/\.*" -prune -o -type f -print -o -type l -print |
|
||||||
|
sed s/^..//) 2> /dev/null'
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Fish shell
|
#### Fish shell
|
||||||
@@ -414,14 +420,6 @@ of fzf to a temporary file.
|
|||||||
fzf > $TMPDIR/fzf.result; and vim (cat $TMPDIR/fzf.result)
|
fzf > $TMPDIR/fzf.result; and vim (cat $TMPDIR/fzf.result)
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Handling UTF-8 NFD paths on OSX
|
|
||||||
|
|
||||||
Use iconv to convert NFD paths to NFC:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
find . | iconv -f utf-8-mac -t utf8//ignore | fzf
|
|
||||||
```
|
|
||||||
|
|
||||||
License
|
License
|
||||||
-------
|
-------
|
||||||
|
|
||||||
|
133
bin/fzf-tmux
133
bin/fzf-tmux
@@ -2,24 +2,51 @@
|
|||||||
# fzf-tmux: starts fzf in a tmux pane
|
# fzf-tmux: starts fzf in a tmux pane
|
||||||
# usage: fzf-tmux [-u|-d [HEIGHT[%]]] [-l|-r [WIDTH[%]]] [--] [FZF OPTIONS]
|
# usage: fzf-tmux [-u|-d [HEIGHT[%]]] [-l|-r [WIDTH[%]]] [--] [FZF OPTIONS]
|
||||||
|
|
||||||
|
fail() {
|
||||||
|
>&2 echo "$1"
|
||||||
|
exit 2
|
||||||
|
}
|
||||||
|
|
||||||
|
fzf="$(command -v fzf 2> /dev/null)" || fzf="$(dirname "$0")/fzf"
|
||||||
|
[[ -x "$fzf" ]] || fail 'fzf executable not found'
|
||||||
|
|
||||||
args=()
|
args=()
|
||||||
opt=""
|
opt=""
|
||||||
skip=""
|
skip=""
|
||||||
swap=""
|
swap=""
|
||||||
close=""
|
close=""
|
||||||
term=""
|
term=""
|
||||||
while [ $# -gt 0 ]; do
|
[[ -n "$LINES" ]] && lines=$LINES || lines=$(tput lines)
|
||||||
|
|
||||||
|
help() {
|
||||||
|
>&2 echo 'usage: fzf-tmux [-u|-d [HEIGHT[%]]] [-l|-r [WIDTH[%]]] [--] [FZF OPTIONS]
|
||||||
|
|
||||||
|
Layout
|
||||||
|
-u [HEIGHT[%]] Split above (up)
|
||||||
|
-d [HEIGHT[%]] Split below (down)
|
||||||
|
-l [WIDTH[%]] Split left
|
||||||
|
-r [WIDTH[%]] Split right
|
||||||
|
|
||||||
|
(default: -d 50%)
|
||||||
|
'
|
||||||
|
exit
|
||||||
|
}
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
arg="$1"
|
arg="$1"
|
||||||
case "$arg" in
|
shift
|
||||||
|
[[ -z "$skip" ]] && case "$arg" in
|
||||||
-)
|
-)
|
||||||
term=1
|
term=1
|
||||||
;;
|
;;
|
||||||
|
--help)
|
||||||
|
help
|
||||||
|
;;
|
||||||
|
--version)
|
||||||
|
echo "fzf-tmux (with fzf $("$fzf" --version))"
|
||||||
|
exit
|
||||||
|
;;
|
||||||
-w*|-h*|-d*|-u*|-r*|-l*)
|
-w*|-h*|-d*|-u*|-r*|-l*)
|
||||||
if [ -n "$skip" ]; then
|
|
||||||
args+=("$1")
|
|
||||||
shift
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
if [[ "$arg" =~ ^.[lrw] ]]; then
|
if [[ "$arg" =~ ^.[lrw] ]]; then
|
||||||
opt="-h"
|
opt="-h"
|
||||||
if [[ "$arg" =~ ^.l ]]; then
|
if [[ "$arg" =~ ^.l ]]; then
|
||||||
@@ -35,35 +62,33 @@ while [ $# -gt 0 ]; do
|
|||||||
close="; tmux swap-pane -D"
|
close="; tmux swap-pane -D"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
if [ ${#arg} -gt 2 ]; then
|
if [[ ${#arg} -gt 2 ]]; then
|
||||||
size="${arg:2}"
|
size="${arg:2}"
|
||||||
else
|
else
|
||||||
shift
|
|
||||||
if [[ "$1" =~ ^[0-9]+%?$ ]]; then
|
if [[ "$1" =~ ^[0-9]+%?$ ]]; then
|
||||||
size="$1"
|
size="$1"
|
||||||
else
|
|
||||||
[ -n "$1" -a "$1" != "--" ] && args+=("$1")
|
|
||||||
shift
|
shift
|
||||||
|
else
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$size" =~ %$ ]]; then
|
if [[ "$size" =~ %$ ]]; then
|
||||||
size=${size:0:((${#size}-1))}
|
size=${size:0:((${#size}-1))}
|
||||||
if [ -n "$swap" ]; then
|
if [[ -n "$swap" ]]; then
|
||||||
opt="$opt -p $(( 100 - size ))"
|
opt="$opt -p $(( 100 - size ))"
|
||||||
else
|
else
|
||||||
opt="$opt -p $size"
|
opt="$opt -p $size"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
if [ -n "$swap" ]; then
|
if [[ -n "$swap" ]]; then
|
||||||
if [[ "$arg" =~ ^.l ]]; then
|
if [[ "$arg" =~ ^.l ]]; then
|
||||||
[ -n "$COLUMNS" ] && max=$COLUMNS || max=$(tput cols)
|
[[ -n "$COLUMNS" ]] && max=$COLUMNS || max=$(tput cols)
|
||||||
else
|
else
|
||||||
[ -n "$LINES" ] && max=$LINES || max=$(tput lines)
|
max=$lines
|
||||||
fi
|
fi
|
||||||
size=$(( max - size ))
|
size=$(( max - size ))
|
||||||
[ $size -lt 0 ] && size=0
|
[[ $size -lt 0 ]] && size=0
|
||||||
opt="$opt -l $size"
|
opt="$opt -l $size"
|
||||||
else
|
else
|
||||||
opt="$opt -l $size"
|
opt="$opt -l $size"
|
||||||
@@ -74,63 +99,81 @@ while [ $# -gt 0 ]; do
|
|||||||
# "--" can be used to separate fzf-tmux options from fzf options to
|
# "--" can be used to separate fzf-tmux options from fzf options to
|
||||||
# avoid conflicts
|
# avoid conflicts
|
||||||
skip=1
|
skip=1
|
||||||
|
continue
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
args+=("$1")
|
args+=("$arg")
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
shift
|
[[ -n "$skip" ]] && args+=("$arg")
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ -z "$TMUX_PANE" ]; then
|
if [[ -z "$TMUX" ]] || [[ "$lines" -le 15 ]]; then
|
||||||
fzf "${args[@]}"
|
"$fzf" "${args[@]}"
|
||||||
exit $?
|
exit $?
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Handle zoomed tmux pane by moving it to a temp window
|
||||||
|
if tmux list-panes -F '#F' | grep -q Z; then
|
||||||
|
zoomed=1
|
||||||
|
original_window=$(tmux display-message -p "#{window_id}")
|
||||||
|
tmp_window=$(tmux new-window -d -P -F "#{window_id}" "bash -c 'while :; do for c in \\| / - \\\\; do sleep 0.2; printf \"\\r\$c fzf-tmux is running\\r\"; done; done'")
|
||||||
|
tmux swap-pane -t $tmp_window \; select-window -t $tmp_window
|
||||||
|
fi
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Clean up named pipes on exit
|
# Clean up named pipes on exit
|
||||||
id=$RANDOM
|
id=$RANDOM
|
||||||
argsf=/tmp/fzf-args-$id
|
argsf="${TMPDIR:-/tmp}/fzf-args-$id"
|
||||||
fifo1=/tmp/fzf-fifo1-$id
|
fifo1="${TMPDIR:-/tmp}/fzf-fifo1-$id"
|
||||||
fifo2=/tmp/fzf-fifo2-$id
|
fifo2="${TMPDIR:-/tmp}/fzf-fifo2-$id"
|
||||||
fifo3=/tmp/fzf-fifo3-$id
|
fifo3="${TMPDIR:-/tmp}/fzf-fifo3-$id"
|
||||||
cleanup() {
|
cleanup() {
|
||||||
rm -f $argsf $fifo1 $fifo2 $fifo3
|
rm -f $argsf $fifo1 $fifo2 $fifo3
|
||||||
|
|
||||||
|
# Remove temp window if we were zoomed
|
||||||
|
if [[ -n "$zoomed" ]]; then
|
||||||
|
tmux swap-pane -t $original_window \; \
|
||||||
|
select-window -t $original_window \; \
|
||||||
|
kill-window -t $tmp_window \; \
|
||||||
|
resize-pane -Z
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
trap cleanup EXIT SIGINT SIGTERM
|
trap cleanup EXIT SIGINT SIGTERM
|
||||||
|
|
||||||
fail() {
|
envs="env TERM=$TERM "
|
||||||
>&2 echo "$1"
|
[[ -n "$FZF_DEFAULT_OPTS" ]] && envs="$envs FZF_DEFAULT_OPTS=$(printf %q "$FZF_DEFAULT_OPTS")"
|
||||||
exit 1
|
[[ -n "$FZF_DEFAULT_COMMAND" ]] && envs="$envs FZF_DEFAULT_COMMAND=$(printf %q "$FZF_DEFAULT_COMMAND")"
|
||||||
}
|
|
||||||
fzf="$(which fzf 2> /dev/null)" || fzf="$(dirname "$0")/fzf"
|
|
||||||
[ -x "$fzf" ] || fail "fzf executable not found"
|
|
||||||
|
|
||||||
envs="env "
|
mkfifo -m o+w $fifo2
|
||||||
[ -n "$FZF_DEFAULT_OPTS" ] && envs="$envs FZF_DEFAULT_OPTS=$(printf %q "$FZF_DEFAULT_OPTS")"
|
mkfifo -m o+w $fifo3
|
||||||
[ -n "$FZF_DEFAULT_COMMAND" ] && envs="$envs FZF_DEFAULT_COMMAND=$(printf %q "$FZF_DEFAULT_COMMAND")"
|
|
||||||
|
|
||||||
mkfifo $fifo2
|
|
||||||
mkfifo $fifo3
|
|
||||||
|
|
||||||
# Build arguments to fzf
|
# Build arguments to fzf
|
||||||
opts=""
|
opts=""
|
||||||
for arg in "${args[@]}"; do
|
for arg in "${args[@]}"; do
|
||||||
opts="$opts \"${arg//\"/\\\"}\""
|
arg="${arg//\\/\\\\}"
|
||||||
|
arg="${arg//\"/\\\"}"
|
||||||
|
arg="${arg//\`/\\\`}"
|
||||||
|
arg="${arg//$/\\$}"
|
||||||
|
opts="$opts \"$arg\""
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ -n "$term" -o -t 0 ]; then
|
if [[ -n "$term" ]] || [[ -t 0 ]]; then
|
||||||
cat <<< "$fzf $opts > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
cat <<< "\"$fzf\" $opts > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
||||||
tmux set-window-option -q synchronize-panes off \;\
|
TMUX=$(echo $TMUX | cut -d , -f 1,2) tmux set-window-option synchronize-panes off \;\
|
||||||
split-window $opt "cd $(printf %q "$PWD");$envs bash $argsf" $swap
|
set-window-option remain-on-exit off \;\
|
||||||
|
split-window $opt "cd $(printf %q "$PWD");$envs bash $argsf" $swap \
|
||||||
|
> /dev/null 2>&1
|
||||||
else
|
else
|
||||||
mkfifo $fifo1
|
mkfifo $fifo1
|
||||||
cat <<< "$fzf $opts < $fifo1 > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
cat <<< "\"$fzf\" $opts < $fifo1 > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
||||||
tmux set-window-option -q synchronize-panes off \;\
|
TMUX=$(echo $TMUX | cut -d , -f 1,2) tmux set-window-option synchronize-panes off \;\
|
||||||
split-window $opt "$envs bash $argsf" $swap
|
set-window-option remain-on-exit off \;\
|
||||||
|
split-window $opt "$envs bash $argsf" $swap \
|
||||||
|
> /dev/null 2>&1
|
||||||
cat <&0 > $fifo1 &
|
cat <&0 > $fifo1 &
|
||||||
fi
|
fi
|
||||||
cat $fifo2
|
cat $fifo2
|
||||||
[ "$(cat $fifo3)" = '0' ]
|
exit "$(cat $fifo3)"
|
||||||
|
|
||||||
|
10
fzf
10
fzf
@@ -206,11 +206,11 @@ class FZF
|
|||||||
@expect = true
|
@expect = true
|
||||||
when /^--expect=(.*)$/
|
when /^--expect=(.*)$/
|
||||||
@expect = true
|
@expect = true
|
||||||
when '--toggle-sort', '--tiebreak', '--color', '--bind'
|
when '--toggle-sort', '--tiebreak', '--color', '--bind', '--history', '--history-size'
|
||||||
argv.shift
|
argv.shift
|
||||||
when '--tac', '--no-tac', '--sync', '--no-sync', '--hscroll', '--no-hscroll',
|
when '--tac', '--no-tac', '--sync', '--no-sync', '--hscroll', '--no-hscroll',
|
||||||
'--inline-info', '--no-inline-info', /^--bind=(.*)$/,
|
'--inline-info', '--no-inline-info', '--read0', '--cycle', /^--bind=(.*)$/,
|
||||||
/^--color=(.*)$/, /^--toggle-sort=(.*)$/, /^--tiebreak=(.*)$/
|
/^--color=(.*)$/, /^--toggle-sort=(.*)$/, /^--tiebreak=(.*)$/, /^--history(-max)?=(.*)$/
|
||||||
# XXX
|
# XXX
|
||||||
else
|
else
|
||||||
usage 1, "illegal option: #{o}"
|
usage 1, "illegal option: #{o}"
|
||||||
@@ -370,7 +370,7 @@ class FZF
|
|||||||
+i Case-sensitive match
|
+i Case-sensitive match
|
||||||
-n, --nth=N[,..] Comma-separated list of field index expressions
|
-n, --nth=N[,..] Comma-separated list of field index expressions
|
||||||
for limiting search scope. Each can be a non-zero
|
for limiting search scope. Each can be a non-zero
|
||||||
integer or a range expression ([BEGIN]..[END])
|
integer or a range expression ([BEGIN]..[END]).
|
||||||
--with-nth=N[,..] Transform the item using index expressions for search
|
--with-nth=N[,..] Transform the item using index expressions for search
|
||||||
-d, --delimiter=STR Field delimiter regex for --nth (default: AWK-style)
|
-d, --delimiter=STR Field delimiter regex for --nth (default: AWK-style)
|
||||||
|
|
||||||
@@ -396,7 +396,7 @@ class FZF
|
|||||||
|
|
||||||
Environment variables
|
Environment variables
|
||||||
FZF_DEFAULT_COMMAND Default command to use when input is tty
|
FZF_DEFAULT_COMMAND Default command to use when input is tty
|
||||||
FZF_DEFAULT_OPTS Defaults options. (e.g. "-x -m --sort 10000")] + $/ + $/
|
FZF_DEFAULT_OPTS Default options (e.g. "-x -m --sort 10000")] + $/ + $/
|
||||||
exit x
|
exit x
|
||||||
end
|
end
|
||||||
|
|
||||||
|
277
install
277
install
@@ -1,35 +1,93 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
version=0.9.13
|
set -u
|
||||||
|
|
||||||
cd $(dirname $BASH_SOURCE)
|
[[ "$@" =~ --pre ]] && version=0.15.0 pre=1 ||
|
||||||
fzf_base=$(pwd)
|
version=0.15.0 pre=0
|
||||||
|
|
||||||
# If stdin is a tty, we are "interactive".
|
auto_completion=
|
||||||
[ -t 0 ] && interactive=yes
|
key_bindings=
|
||||||
|
update_config=2
|
||||||
|
binary_arch=
|
||||||
|
allow_legacy=
|
||||||
|
|
||||||
|
help() {
|
||||||
|
cat << EOF
|
||||||
|
usage: $0 [OPTIONS]
|
||||||
|
|
||||||
|
--help Show this message
|
||||||
|
--bin Download fzf binary only; Do not generate ~/.fzf.{bash,zsh}
|
||||||
|
--all Download fzf binary and update configuration files
|
||||||
|
to enable key bindings and fuzzy completion
|
||||||
|
--[no-]key-bindings Enable/disable key bindings (CTRL-T, CTRL-R, ALT-C)
|
||||||
|
--[no-]completion Enable/disable fuzzy completion (bash & zsh)
|
||||||
|
--[no-]update-rc Whether or not to update shell configuration files
|
||||||
|
|
||||||
|
--32 Download 32-bit binary
|
||||||
|
--64 Download 64-bit binary
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
for opt in "$@"; do
|
||||||
|
case $opt in
|
||||||
|
--help)
|
||||||
|
help
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
--all)
|
||||||
|
auto_completion=1
|
||||||
|
key_bindings=1
|
||||||
|
update_config=1
|
||||||
|
allow_legacy=1
|
||||||
|
;;
|
||||||
|
--key-bindings) key_bindings=1 ;;
|
||||||
|
--no-key-bindings) key_bindings=0 ;;
|
||||||
|
--completion) auto_completion=1 ;;
|
||||||
|
--no-completion) auto_completion=0 ;;
|
||||||
|
--update-rc) update_config=1 ;;
|
||||||
|
--no-update-rc) update_config=0 ;;
|
||||||
|
--32) binary_arch=386 ;;
|
||||||
|
--64) binary_arch=amd64 ;;
|
||||||
|
--bin|--pre) ;;
|
||||||
|
*)
|
||||||
|
echo "unknown option: $opt"
|
||||||
|
help
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||||
|
fzf_base="$(pwd)"
|
||||||
|
|
||||||
ask() {
|
ask() {
|
||||||
|
# If stdin is a tty, we are "interactive".
|
||||||
# non-interactive shell: wait for a linefeed
|
# non-interactive shell: wait for a linefeed
|
||||||
# interactive shell: continue after a single keypress
|
# interactive shell: continue after a single keypress
|
||||||
[ -n "$interactive" ] && read_n='-n 1' || read_n=
|
read_n=$([ -t 0 ] && echo "-n 1")
|
||||||
|
|
||||||
read -p "$1 ([y]/n) " $read_n -r
|
read -p "$1 ([y]/n) " $read_n -r
|
||||||
echo
|
echo
|
||||||
[[ ! $REPLY =~ ^[Nn]$ ]]
|
[[ $REPLY =~ ^[Nn]$ ]]
|
||||||
}
|
}
|
||||||
|
|
||||||
check_binary() {
|
check_binary() {
|
||||||
echo -n " - Checking fzf executable ... "
|
echo -n " - Checking fzf executable ... "
|
||||||
local output=$("$fzf_base"/bin/fzf --version 2>&1)
|
local output
|
||||||
if [ "$version" = "$output" ]; then
|
output=$("$fzf_base"/bin/fzf --version 2>&1)
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "Error: $output"
|
||||||
|
binary_error="Invalid binary"
|
||||||
|
elif [ "$version" != "$output" ]; then
|
||||||
|
echo "$output != $version"
|
||||||
|
binary_error="Invalid version"
|
||||||
|
else
|
||||||
echo "$output"
|
echo "$output"
|
||||||
binary_error=""
|
binary_error=""
|
||||||
else
|
return 0
|
||||||
echo "$output != $version"
|
|
||||||
rm -f "$fzf_base"/bin/fzf
|
|
||||||
binary_error="Invalid binary"
|
|
||||||
return 1
|
|
||||||
fi
|
fi
|
||||||
|
rm -f "$fzf_base"/bin/fzf
|
||||||
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
symlink() {
|
symlink() {
|
||||||
@@ -43,14 +101,36 @@ symlink() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
link_fzf_in_path() {
|
||||||
|
if which_fzf="$(command -v fzf)"; then
|
||||||
|
echo " - Found in \$PATH"
|
||||||
|
echo " - Creating symlink: $which_fzf -> bin/fzf"
|
||||||
|
(cd "$fzf_base"/bin && rm -f fzf && ln -sf "$which_fzf" fzf)
|
||||||
|
check_binary && return
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
try_curl() {
|
||||||
|
command -v curl > /dev/null && curl -fL $1 | tar -xz
|
||||||
|
}
|
||||||
|
|
||||||
|
try_wget() {
|
||||||
|
command -v wget > /dev/null && wget -O - $1 | tar -xz
|
||||||
|
}
|
||||||
|
|
||||||
download() {
|
download() {
|
||||||
echo "Downloading bin/fzf ..."
|
echo "Downloading bin/fzf ..."
|
||||||
if [[ ! $1 =~ dev && -x "$fzf_base"/bin/fzf ]]; then
|
if [ $pre = 0 ]; then
|
||||||
|
if [ -x "$fzf_base"/bin/fzf ]; then
|
||||||
echo " - Already exists"
|
echo " - Already exists"
|
||||||
check_binary && return
|
check_binary && return
|
||||||
elif [ -x "$fzf_base"/bin/$1 ]; then
|
fi
|
||||||
|
if [ -x "$fzf_base"/bin/$1 ]; then
|
||||||
symlink $1 && check_binary && return
|
symlink $1 && check_binary && return
|
||||||
fi
|
fi
|
||||||
|
link_fzf_in_path && return
|
||||||
|
fi
|
||||||
mkdir -p "$fzf_base"/bin && cd "$fzf_base"/bin
|
mkdir -p "$fzf_base"/bin && cd "$fzf_base"/bin
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
binary_error="Failed to create bin directory"
|
binary_error="Failed to create bin directory"
|
||||||
@@ -58,14 +138,13 @@ download() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
local url=https://github.com/junegunn/fzf-bin/releases/download/$version/${1}.tgz
|
local url=https://github.com/junegunn/fzf-bin/releases/download/$version/${1}.tgz
|
||||||
if which curl > /dev/null; then
|
set -o pipefail
|
||||||
curl -fL $url | tar -xz
|
if ! (try_curl $url || try_wget $url); then
|
||||||
elif which wget > /dev/null; then
|
set +o pipefail
|
||||||
wget -O - $url | tar -xz
|
binary_error="Failed to download with curl and wget"
|
||||||
else
|
|
||||||
binary_error="curl or wget not found"
|
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
set +o pipefail
|
||||||
|
|
||||||
if [ ! -f $1 ]; then
|
if [ ! -f $1 ]; then
|
||||||
binary_error="Failed to download ${1}"
|
binary_error="Failed to download ${1}"
|
||||||
@@ -80,25 +159,22 @@ archi=$(uname -sm)
|
|||||||
binary_available=1
|
binary_available=1
|
||||||
binary_error=""
|
binary_error=""
|
||||||
case "$archi" in
|
case "$archi" in
|
||||||
Darwin\ x86_64) download fzf-$version-darwin_amd64 ;;
|
Darwin\ x86_64) download fzf-$version-darwin_${binary_arch:-amd64} ;;
|
||||||
Darwin\ i*86) download fzf-$version-darwin_386 ;;
|
Darwin\ i*86) download fzf-$version-darwin_${binary_arch:-386} ;;
|
||||||
Linux\ x86_64) download fzf-$version-linux_amd64 ;;
|
Linux\ x86_64) download fzf-$version-linux_${binary_arch:-amd64} ;;
|
||||||
Linux\ i*86) download fzf-$version-linux_386 ;;
|
Linux\ i*86) download fzf-$version-linux_${binary_arch:-386} ;;
|
||||||
*) binary_available=0 binary_error=1 ;;
|
*) binary_available=0 binary_error=1 ;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
cd "$fzf_base"
|
install_ruby_fzf() {
|
||||||
if [ -n "$binary_error" ]; then
|
if [ -z "$allow_legacy" ]; then
|
||||||
if [ $binary_available -eq 0 ]; then
|
ask "Do you want to install legacy Ruby version instead?" && exit 1
|
||||||
echo "No prebuilt binary for $archi ... "
|
|
||||||
else
|
|
||||||
echo " - $binary_error !!!"
|
|
||||||
fi
|
fi
|
||||||
echo "Installing legacy Ruby version ..."
|
echo "Installing legacy Ruby version ..."
|
||||||
|
|
||||||
# ruby executable
|
# ruby executable
|
||||||
echo -n "Checking Ruby executable ... "
|
echo -n "Checking Ruby executable ... "
|
||||||
ruby=`which ruby`
|
ruby=$(command -v ruby)
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "ruby executable not found !!!"
|
echo "ruby executable not found !!!"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -106,7 +182,7 @@ if [ -n "$binary_error" ]; then
|
|||||||
|
|
||||||
# System ruby is preferred
|
# System ruby is preferred
|
||||||
system_ruby=/usr/bin/ruby
|
system_ruby=/usr/bin/ruby
|
||||||
if [ -x $system_ruby -a $system_ruby != "$ruby" ]; then
|
if [ -x $system_ruby ] && [ $system_ruby != "$ruby" ]; then
|
||||||
$system_ruby --disable-gems -rcurses -e0 2> /dev/null
|
$system_ruby --disable-gems -rcurses -e0 2> /dev/null
|
||||||
[ $? -eq 0 ] && ruby=$system_ruby
|
[ $? -eq 0 ] && ruby=$system_ruby
|
||||||
fi
|
fi
|
||||||
@@ -159,43 +235,75 @@ if [ -n "$binary_error" ]; then
|
|||||||
echo "$fzf_cmd \"\$@\"" >> "$fzf_base"/bin/fzf
|
echo "$fzf_cmd \"\$@\"" >> "$fzf_base"/bin/fzf
|
||||||
chmod +x "$fzf_base"/bin/fzf
|
chmod +x "$fzf_base"/bin/fzf
|
||||||
echo "OK"
|
echo "OK"
|
||||||
|
}
|
||||||
|
|
||||||
|
cd "$fzf_base"
|
||||||
|
if [ -n "$binary_error" ]; then
|
||||||
|
if [ $binary_available -eq 0 ]; then
|
||||||
|
echo "No prebuilt binary for $archi ..."
|
||||||
|
else
|
||||||
|
echo " - $binary_error !!!"
|
||||||
|
fi
|
||||||
|
if command -v go > /dev/null; then
|
||||||
|
echo -n "Building binary (go get -u github.com/junegunn/fzf/src/fzf) ... "
|
||||||
|
if [ -z "${GOPATH-}" ]; then
|
||||||
|
export GOPATH="${TMPDIR:-/tmp}/fzf-gopath"
|
||||||
|
mkdir -p "$GOPATH"
|
||||||
|
fi
|
||||||
|
if go get -u github.com/junegunn/fzf/src/fzf; then
|
||||||
|
echo "OK"
|
||||||
|
cp "$GOPATH/bin/fzf" "$fzf_base/bin/"
|
||||||
|
else
|
||||||
|
echo "Failed to build binary ..."
|
||||||
|
install_ruby_fzf
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "go executable not found. Cannot build binary ..."
|
||||||
|
install_ruby_fzf
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
[[ "$*" =~ "--bin" ]] && exit 0
|
[[ "$*" =~ "--bin" ]] && exit 0
|
||||||
|
|
||||||
# Auto-completion
|
# Auto-completion
|
||||||
ask "Do you want to add auto-completion support?"
|
if [ -z "$auto_completion" ]; then
|
||||||
|
ask "Do you want to enable fuzzy auto-completion?"
|
||||||
auto_completion=$?
|
auto_completion=$?
|
||||||
|
fi
|
||||||
|
|
||||||
# Key-bindings
|
# Key-bindings
|
||||||
ask "Do you want to add key bindings?"
|
if [ -z "$key_bindings" ]; then
|
||||||
|
ask "Do you want to enable key bindings?"
|
||||||
key_bindings=$?
|
key_bindings=$?
|
||||||
|
fi
|
||||||
|
|
||||||
echo
|
echo
|
||||||
for shell in bash zsh; do
|
has_zsh=$(command -v zsh > /dev/null && echo 1 || echo 0)
|
||||||
|
shells=$([ $has_zsh -eq 1 ] && echo "bash zsh" || echo "bash")
|
||||||
|
for shell in $shells; do
|
||||||
echo -n "Generate ~/.fzf.$shell ... "
|
echo -n "Generate ~/.fzf.$shell ... "
|
||||||
src=~/.fzf.${shell}
|
src=~/.fzf.${shell}
|
||||||
|
|
||||||
fzf_completion="[[ \$- =~ i ]] && source \"$fzf_base/shell/completion.${shell}\" 2> /dev/null"
|
fzf_completion="[[ \$- == *i* ]] && source \"$fzf_base/shell/completion.${shell}\" 2> /dev/null"
|
||||||
if [ $auto_completion -ne 0 ]; then
|
if [ $auto_completion -eq 0 ]; then
|
||||||
fzf_completion="# $fzf_completion"
|
fzf_completion="# $fzf_completion"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
fzf_key_bindings="source \"$fzf_base/shell/key-bindings.${shell}\""
|
fzf_key_bindings="source \"$fzf_base/shell/key-bindings.${shell}\""
|
||||||
if [ $key_bindings -ne 0 ]; then
|
if [ $key_bindings -eq 0 ]; then
|
||||||
fzf_key_bindings="# $fzf_key_bindings"
|
fzf_key_bindings="# $fzf_key_bindings"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cat > $src << EOF
|
cat > $src << EOF
|
||||||
# Setup fzf
|
# Setup fzf
|
||||||
# ---------
|
# ---------
|
||||||
if [[ ! "\$PATH" =~ "$fzf_base/bin" ]]; then
|
if [[ ! "\$PATH" == *$fzf_base/bin* ]]; then
|
||||||
export PATH="\$PATH:$fzf_base/bin"
|
export PATH="\$PATH:$fzf_base/bin"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Man path
|
# Man path
|
||||||
# --------
|
# --------
|
||||||
if [[ ! "\$MANPATH" =~ "$fzf_base/man" && -d "$fzf_base/man" ]]; then
|
if [[ ! "\$MANPATH" == *$fzf_base/man* && -d "$fzf_base/man" ]]; then
|
||||||
export MANPATH="\$MANPATH:$fzf_base/man"
|
export MANPATH="\$MANPATH:$fzf_base/man"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -212,9 +320,8 @@ EOF
|
|||||||
done
|
done
|
||||||
|
|
||||||
# fish
|
# fish
|
||||||
has_fish=0
|
has_fish=$(command -v fish > /dev/null && echo 1 || echo 0)
|
||||||
if [ -n "$(which fish 2> /dev/null)" ]; then
|
if [ $has_fish -eq 1 ]; then
|
||||||
has_fish=1
|
|
||||||
echo -n "Update fish_user_paths ... "
|
echo -n "Update fish_user_paths ... "
|
||||||
fish << EOF
|
fish << EOF
|
||||||
echo \$fish_user_paths | grep $fzf_base/bin > /dev/null
|
echo \$fish_user_paths | grep $fzf_base/bin > /dev/null
|
||||||
@@ -228,50 +335,74 @@ EOF
|
|||||||
rm -f ~/.config/fish/functions/fzf.fish && echo "OK" || echo "Failed"
|
rm -f ~/.config/fish/functions/fzf.fish && echo "OK" || echo "Failed"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $key_bindings -eq 0 ]; then
|
fish_binding=~/.config/fish/functions/fzf_key_bindings.fish
|
||||||
echo -n "Symlink ~/.config/fish/functions/fzf_key_bindings.fish ... "
|
if [ $key_bindings -ne 0 ]; then
|
||||||
ln -sf $fzf_base/shell/key-bindings.fish \
|
echo -n "Symlink $fish_binding ... "
|
||||||
~/.config/fish/functions/fzf_key_bindings.fish && echo "OK" || echo "Failed"
|
ln -sf "$fzf_base/shell/key-bindings.fish" \
|
||||||
|
"$fish_binding" && echo "OK" || echo "Failed"
|
||||||
|
else
|
||||||
|
echo -n "Removing $fish_binding ... "
|
||||||
|
rm -f "$fish_binding"
|
||||||
|
echo "OK"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
append_line() {
|
append_line() {
|
||||||
echo "Update $2:"
|
set -e
|
||||||
echo " - $1"
|
|
||||||
[ -f "$2" ] || touch "$2"
|
local update line file pat lno
|
||||||
if [ $# -lt 3 ]; then
|
update="$1"
|
||||||
line=$(\grep -nF "$1" "$2" | sed 's/:.*//' | tr '\n' ' ')
|
line="$2"
|
||||||
|
file="$3"
|
||||||
|
pat="${4:-}"
|
||||||
|
|
||||||
|
echo "Update $file:"
|
||||||
|
echo " - $line"
|
||||||
|
[ -f "$file" ] || touch "$file"
|
||||||
|
if [ $# -lt 4 ]; then
|
||||||
|
lno=$(\grep -nF "$line" "$file" | sed 's/:.*//' | tr '\n' ' ')
|
||||||
else
|
else
|
||||||
line=$(\grep -nF "$3" "$2" | sed 's/:.*//' | tr '\n' ' ')
|
lno=$(\grep -nF "$pat" "$file" | sed 's/:.*//' | tr '\n' ' ')
|
||||||
fi
|
fi
|
||||||
if [ -n "$line" ]; then
|
if [ -n "$lno" ]; then
|
||||||
echo " - Already exists: line #$line"
|
echo " - Already exists: line #$lno"
|
||||||
else
|
else
|
||||||
echo "$1" >> "$2"
|
if [ $update -eq 1 ]; then
|
||||||
|
echo >> "$file"
|
||||||
|
echo "$line" >> "$file"
|
||||||
echo " + Added"
|
echo " + Added"
|
||||||
|
else
|
||||||
|
echo " ~ Skipped"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
echo
|
echo
|
||||||
|
set +e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if [ $update_config -eq 2 ]; then
|
||||||
echo
|
echo
|
||||||
for shell in bash zsh; do
|
ask "Do you want to update your shell configuration files?"
|
||||||
append_line "[ -f ~/.fzf.${shell} ] && source ~/.fzf.${shell}" ~/.${shell}rc "~/.fzf.${shell}"
|
update_config=$?
|
||||||
|
fi
|
||||||
|
echo
|
||||||
|
for shell in $shells; do
|
||||||
|
[ $shell = zsh ] && dest=${ZDOTDIR:-~}/.zshrc || dest=~/.bashrc
|
||||||
|
append_line $update_config "[ -f ~/.fzf.${shell} ] && source ~/.fzf.${shell}" "$dest" "~/.fzf.${shell}"
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ $key_bindings -eq 0 -a $has_fish -eq 1 ]; then
|
if [ $key_bindings -eq 1 ] && [ $has_fish -eq 1 ]; then
|
||||||
bind_file=~/.config/fish/functions/fish_user_key_bindings.fish
|
bind_file=~/.config/fish/functions/fish_user_key_bindings.fish
|
||||||
append_line "fzf_key_bindings" "$bind_file"
|
append_line $update_config "fzf_key_bindings" "$bind_file"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cat << EOF
|
if [ $update_config -eq 1 ]; then
|
||||||
Finished. Restart your shell or reload config file.
|
echo 'Finished. Restart your shell or reload config file.'
|
||||||
source ~/.bashrc # bash
|
echo ' source ~/.bashrc # bash'
|
||||||
source ~/.zshrc # zsh
|
[ $has_zsh -eq 1 ] && echo " source ${ZDOTDIR:-~}/.zshrc # zsh"
|
||||||
EOF
|
[ $has_fish -eq 1 ] && [ $key_bindings -eq 1 ] && echo ' fzf_key_bindings # fish'
|
||||||
[ $has_fish -eq 1 ] && echo " fzf_key_bindings # fish"; cat << EOF
|
echo
|
||||||
|
echo 'Use uninstall script to remove fzf.'
|
||||||
Use uninstall script to remove fzf.
|
echo
|
||||||
|
fi
|
||||||
For more information, see: https://github.com/junegunn/fzf
|
echo 'For more information, see: https://github.com/junegunn/fzf'
|
||||||
EOF
|
|
||||||
|
|
||||||
|
54
man/man1/fzf-tmux.1
Normal file
54
man/man1/fzf-tmux.1
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
.ig
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2016 Junegunn Choi
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
..
|
||||||
|
.TH fzf-tmux 1 "Sep 2016" "fzf 0.15.0" "fzf-tmux - open fzf in tmux split pane"
|
||||||
|
|
||||||
|
.SH NAME
|
||||||
|
fzf-tmux - open fzf in tmux split pane
|
||||||
|
|
||||||
|
.SH SYNOPSIS
|
||||||
|
.B fzf-tmux [-u|-d [HEIGHT[%]]] [-l|-r [WIDTH[%]]] [--] [FZF OPTIONS]
|
||||||
|
|
||||||
|
.SH DESCRIPTION
|
||||||
|
fzf-tmux is a wrapper script for fzf that opens fzf in a tmux split pane. It is
|
||||||
|
designed to work just like fzf except that it does not take up the whole
|
||||||
|
screen. You can safely use fzf-tmux instead of fzf in your scripts as the extra
|
||||||
|
options will be silently ignored if you're not on tmux.
|
||||||
|
|
||||||
|
.SH OPTIONS
|
||||||
|
.SS Layout
|
||||||
|
|
||||||
|
(default: \fB-d 50%\fR)
|
||||||
|
|
||||||
|
.TP
|
||||||
|
.B "-u [height[%]]"
|
||||||
|
Split above (up)
|
||||||
|
.TP
|
||||||
|
.B "-d [height[%]]"
|
||||||
|
Split below (down)
|
||||||
|
.TP
|
||||||
|
.B "-l [width[%]]"
|
||||||
|
Split left
|
||||||
|
.TP
|
||||||
|
.B "-r [width[%]]"
|
||||||
|
Split right
|
392
man/man1/fzf.1
392
man/man1/fzf.1
@@ -1,7 +1,7 @@
|
|||||||
.ig
|
.ig
|
||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright (c) 2015 Junegunn Choi
|
Copyright (c) 2016 Junegunn Choi
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
@@ -21,7 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
THE SOFTWARE.
|
THE SOFTWARE.
|
||||||
..
|
..
|
||||||
.TH fzf 1 "June 2015" "fzf 0.9.13" "fzf - a command-line fuzzy finder"
|
.TH fzf 1 "Sep 2016" "fzf 0.15.0" "fzf - a command-line fuzzy finder"
|
||||||
|
|
||||||
.SH NAME
|
.SH NAME
|
||||||
fzf - a command-line fuzzy finder
|
fzf - a command-line fuzzy finder
|
||||||
@@ -36,26 +36,37 @@ fzf is a general-purpose command-line fuzzy finder.
|
|||||||
.SS Search mode
|
.SS Search mode
|
||||||
.TP
|
.TP
|
||||||
.B "-x, --extended"
|
.B "-x, --extended"
|
||||||
Extended-search mode
|
Extended-search mode. Since 0.10.9, this is enabled by default. You can disable
|
||||||
|
it with \fB+x\fR or \fB--no-extended\fR.
|
||||||
.TP
|
.TP
|
||||||
.B "-e, --extended-exact"
|
.B "-e, --exact"
|
||||||
Extended-search mode (exact match)
|
Enable exact-match
|
||||||
.TP
|
.TP
|
||||||
.B "-i"
|
.B "-i"
|
||||||
Case-insensitive match (default: smart-case match)
|
Case-insensitive match (default: smart-case match)
|
||||||
.TP
|
.TP
|
||||||
.B "+i"
|
.B "+i"
|
||||||
Case-sensitive match
|
Case-sensitive match
|
||||||
|
.TP
|
||||||
|
.BI "--algo=" TYPE
|
||||||
|
Fuzzy matching algorithm (default: v2)
|
||||||
|
|
||||||
|
.br
|
||||||
|
.BR v2 " Optimal scoring algorithm (quality)"
|
||||||
|
.br
|
||||||
|
.BR v1 " Faster but not guaranteed to find the optimal result (performance)"
|
||||||
|
.br
|
||||||
|
|
||||||
.TP
|
.TP
|
||||||
.BI "-n, --nth=" "N[,..]"
|
.BI "-n, --nth=" "N[,..]"
|
||||||
Comma-separated list of field index expressions for limiting search scope.
|
Comma-separated list of field index expressions for limiting search scope.
|
||||||
See \fBFIELD INDEX EXPRESSION\fR for details.
|
See \fBFIELD INDEX EXPRESSION\fR for the details.
|
||||||
.TP
|
.TP
|
||||||
.BI "--with-nth=" "N[,..]"
|
.BI "--with-nth=" "N[,..]"
|
||||||
Transform the item using the list of index expressions for search
|
Transform the presentation of each line using field index expressions
|
||||||
.TP
|
.TP
|
||||||
.BI "-d, --delimiter=" "STR"
|
.BI "-d, --delimiter=" "STR"
|
||||||
Field delimiter regex for \fI--nth\fR and \fI--with-nth\fR (default: AWK-style)
|
Field delimiter regex for \fB--nth\fR and \fB--with-nth\fR (default: AWK-style)
|
||||||
.SS Search result
|
.SS Search result
|
||||||
.TP
|
.TP
|
||||||
.B "+s, --no-sort"
|
.B "+s, --no-sort"
|
||||||
@@ -63,33 +74,113 @@ Do not sort the result
|
|||||||
.TP
|
.TP
|
||||||
.B "--tac"
|
.B "--tac"
|
||||||
Reverse the order of the input
|
Reverse the order of the input
|
||||||
|
|
||||||
.RS
|
.RS
|
||||||
e.g. \fBhistory | fzf --tac --no-sort\fR
|
e.g. \fBhistory | fzf --tac --no-sort\fR
|
||||||
.RE
|
.RE
|
||||||
.TP
|
.TP
|
||||||
.BI "--tiebreak=" "CRI"
|
.BI "--tiebreak=" "CRI[,..]"
|
||||||
Sort criterion to use when the scores are tied
|
Comma-separated list of sort criteria to apply when the scores are tied.
|
||||||
.br
|
.br
|
||||||
.R ""
|
|
||||||
.br
|
.br
|
||||||
.BR length " Prefers item with shorter length"
|
.BR length " Prefers line with shorter length"
|
||||||
.br
|
.br
|
||||||
.BR begin " Prefers item with matched substring closer to the beginning"
|
.BR begin " Prefers line with matched substring closer to the beginning"
|
||||||
.br
|
.br
|
||||||
.BR end " Prefers item with matched substring closer to the end"
|
.BR end " Prefers line with matched substring closer to the end"
|
||||||
.br
|
.br
|
||||||
.BR index " Prefers item that appeared earlier in the input stream"
|
.BR index " Prefers line that appeared earlier in the input stream"
|
||||||
.br
|
.br
|
||||||
|
|
||||||
|
.br
|
||||||
|
- Each criterion should appear only once in the list
|
||||||
|
.br
|
||||||
|
- \fBindex\fR is only allowed at the end of the list
|
||||||
|
.br
|
||||||
|
- \fBindex\fR is implicitly appended to the list when not specified
|
||||||
|
.br
|
||||||
|
- Default is \fBlength\fR (or equivalently \fBlength\fR,index)
|
||||||
|
.br
|
||||||
|
- If \fBend\fR is found in the list, fzf will scan each line backwards
|
||||||
.SS Interface
|
.SS Interface
|
||||||
.TP
|
.TP
|
||||||
.B "-m, --multi"
|
.B "-m, --multi"
|
||||||
Enable multi-select with tab/shift-tab
|
Enable multi-select with tab/shift-tab
|
||||||
.TP
|
.TP
|
||||||
|
.B "--no-mouse"
|
||||||
|
Disable mouse
|
||||||
|
.TP
|
||||||
|
.BI "--bind=" "KEYBINDS"
|
||||||
|
Comma-separated list of custom key bindings. See \fBKEY BINDINGS\fR for the
|
||||||
|
details.
|
||||||
|
.TP
|
||||||
|
.B "--cycle"
|
||||||
|
Enable cyclic scroll
|
||||||
|
.TP
|
||||||
|
.B "--no-hscroll"
|
||||||
|
Disable horizontal scroll
|
||||||
|
.TP
|
||||||
|
.BI "--hscroll-off=" "COL"
|
||||||
|
Number of screen columns to keep to the right of the highlighted substring
|
||||||
|
(default: 10). Setting it to a large value will cause the text to be positioned
|
||||||
|
on the center of the screen.
|
||||||
|
.TP
|
||||||
|
.BI "--jump-labels=" "CHARS"
|
||||||
|
Label characters for \fBjump\fR and \fBjump-accept\fR
|
||||||
|
.SS Layout
|
||||||
|
.TP
|
||||||
|
.B "--reverse"
|
||||||
|
Reverse orientation
|
||||||
|
.TP
|
||||||
|
.BI "--margin=" MARGIN
|
||||||
|
Comma-separated expression for margins around the finder.
|
||||||
|
.br
|
||||||
|
|
||||||
|
.br
|
||||||
|
.RS
|
||||||
|
.BR TRBL " Same margin for top, right, bottom, and left"
|
||||||
|
.br
|
||||||
|
.BR TB,RL " Vertical, horizontal margin"
|
||||||
|
.br
|
||||||
|
.BR T,RL,B " Top, horizontal, bottom margin"
|
||||||
|
.br
|
||||||
|
.BR T,R,B,L " Top, right, bottom, left margin"
|
||||||
|
.br
|
||||||
|
|
||||||
|
.br
|
||||||
|
Each part can be given in absolute number or in percentage relative to the
|
||||||
|
terminal size with \fB%\fR suffix.
|
||||||
|
.br
|
||||||
|
|
||||||
|
.br
|
||||||
|
e.g. \fBfzf --margin 10%\fR
|
||||||
|
\fBfzf --margin 1,5%\fR
|
||||||
|
.RE
|
||||||
|
.TP
|
||||||
|
.B "--inline-info"
|
||||||
|
Display finder info inline with the query
|
||||||
|
.TP
|
||||||
|
.BI "--prompt=" "STR"
|
||||||
|
Input prompt (default: '> ')
|
||||||
|
.TP
|
||||||
|
.BI "--header=" "STR"
|
||||||
|
The given string will be printed as the sticky header. The lines are displayed
|
||||||
|
in the given order from top to bottom regardless of \fB--reverse\fR option, and
|
||||||
|
are not affected by \fB--with-nth\fR. ANSI color codes are processed even when
|
||||||
|
\fB--ansi\fR is not set.
|
||||||
|
.TP
|
||||||
|
.BI "--header-lines=" "N"
|
||||||
|
The first N lines of the input are treated as the sticky header. When
|
||||||
|
\fB--with-nth\fR is set, the lines are transformed just like the other
|
||||||
|
lines that follow.
|
||||||
|
.SS Display
|
||||||
|
.TP
|
||||||
.B "--ansi"
|
.B "--ansi"
|
||||||
Enable processing of ANSI color codes
|
Enable processing of ANSI color codes
|
||||||
.TP
|
.TP
|
||||||
.B "--no-mouse"
|
.BI "--tabstop=" SPACES
|
||||||
Disable mouse
|
Number of spaces for a tab character (default: 8)
|
||||||
.TP
|
.TP
|
||||||
.BI "--color=" "[BASE_SCHEME][,COLOR:ANSI]"
|
.BI "--color=" "[BASE_SCHEME][,COLOR:ANSI]"
|
||||||
Color configuration. The name of the base color scheme is followed by custom
|
Color configuration. The name of the base color scheme is followed by custom
|
||||||
@@ -122,66 +213,48 @@ e.g. \fBfzf --color=bg+:24\fR
|
|||||||
\fBpointer \fRPointer to the current line
|
\fBpointer \fRPointer to the current line
|
||||||
\fBmarker \fRMulti-select marker
|
\fBmarker \fRMulti-select marker
|
||||||
\fBspinner \fRStreaming input indicator
|
\fBspinner \fRStreaming input indicator
|
||||||
|
\fBheader \fRHeader
|
||||||
.RE
|
.RE
|
||||||
.TP
|
.TP
|
||||||
.B "--black"
|
.B "--black"
|
||||||
Use black background
|
Use black background
|
||||||
|
.SS History
|
||||||
.TP
|
.TP
|
||||||
.B "--reverse"
|
.BI "--history=" "HISTORY_FILE"
|
||||||
Reverse orientation
|
Load search history from the specified file and update the file on completion.
|
||||||
|
When enabled, \fBCTRL-N\fR and \fBCTRL-P\fR are automatically remapped to
|
||||||
|
\fBnext-history\fR and \fBprevious-history\fR.
|
||||||
.TP
|
.TP
|
||||||
.B "--no-hscroll"
|
.BI "--history-size=" "N"
|
||||||
Disable horizontal scroll
|
Maximum number of entries in the history file (default: 1000). The file is
|
||||||
|
automatically truncated when the number of the lines exceeds the value.
|
||||||
|
.SS Preview
|
||||||
.TP
|
.TP
|
||||||
.B "--inline-info"
|
.BI "--preview=" "COMMAND"
|
||||||
Display finder info inline with the query
|
Execute the given command for the current line and display the result on the
|
||||||
.TP
|
preview window. \fB{}\fR is the placeholder for the quoted string of the
|
||||||
.BI "--prompt=" "STR"
|
current line.
|
||||||
Input prompt (default: '> ')
|
|
||||||
.TP
|
|
||||||
.BI "--toggle-sort=" "KEY"
|
|
||||||
Key to toggle sort (\fIctrl-[a-z]\fR, \fIalt-[a-z]\fR, \fIf[1-4]\fR,
|
|
||||||
or any single character)
|
|
||||||
.TP
|
|
||||||
.BI "--bind=" "KEYBINDS"
|
|
||||||
Comma-separated list of custom key bindings. Each key binding expression
|
|
||||||
follows the following format: \fBKEY:ACTION\fR
|
|
||||||
.RS
|
.RS
|
||||||
e.g. \fBfzf --bind=ctrl-j:accept,ctrl-k:kill-line\fR
|
e.g. \fBfzf --preview="head -$LINES {}"\fR
|
||||||
|
.RE
|
||||||
|
.TP
|
||||||
|
.BI "--preview-window=" "[POSITION][:SIZE[%]][:hidden]"
|
||||||
|
Determine the layout of the preview window. If the argument ends with
|
||||||
|
\fB:hidden\fR, the preview window will be hidden by default until
|
||||||
|
\fBtoggle-preview\fR action is triggered.
|
||||||
|
|
||||||
|
.RS
|
||||||
|
.B POSITION: (default: right)
|
||||||
|
\fBup
|
||||||
|
\fBdown
|
||||||
|
\fBleft
|
||||||
|
\fBright
|
||||||
.RE
|
.RE
|
||||||
|
|
||||||
.RS
|
.RS
|
||||||
.B KEY:
|
e.g. \fBfzf --preview="head {}" --preview-window=up:30%\fR
|
||||||
\fIctrl-[a-z]\fR, \fIalt-[a-z]\fR, \fIf[1-4]\fR, or any single character
|
\fBfzf --preview="file {}" --preview-window=down:1\fR
|
||||||
.RE
|
|
||||||
|
|
||||||
.RS
|
|
||||||
.B ACTION:
|
|
||||||
abort
|
|
||||||
accept
|
|
||||||
backward-char
|
|
||||||
backward-delete-char
|
|
||||||
backward-kill-word
|
|
||||||
backward-word
|
|
||||||
beginning-of-line
|
|
||||||
clear-screen
|
|
||||||
delete-char
|
|
||||||
down
|
|
||||||
end-of-line
|
|
||||||
forward-char
|
|
||||||
forward-word
|
|
||||||
kill-line (not bound)
|
|
||||||
kill-word
|
|
||||||
page-down
|
|
||||||
page-up
|
|
||||||
toggle (not bound)
|
|
||||||
toggle-down
|
|
||||||
toggle-sort (not bound; equivalent to \fB--toggle-sort\fR)
|
|
||||||
toggle-up
|
|
||||||
unix-line-discard
|
|
||||||
unix-word-rubout
|
|
||||||
up
|
|
||||||
yank
|
|
||||||
.RE
|
.RE
|
||||||
.SS Scripting
|
.SS Scripting
|
||||||
.TP
|
.TP
|
||||||
@@ -202,40 +275,51 @@ fzf becomes a fuzzy-version of grep.
|
|||||||
Print query as the first line
|
Print query as the first line
|
||||||
.TP
|
.TP
|
||||||
.BI "--expect=" "KEY[,..]"
|
.BI "--expect=" "KEY[,..]"
|
||||||
Comma-separated list of keys (\fIctrl-[a-z]\fR, \fIalt-[a-z]\fR, \fIf[1-4]\fR,
|
Comma-separated list of keys that can be used to complete fzf in addition to
|
||||||
or any single character) that can be used to complete fzf in addition to the
|
the default enter key. When this option is set, fzf will print the name of the
|
||||||
default enter key. When this option is set, fzf will print the name of the key
|
key pressed as the first line of its output (or as the second line if
|
||||||
pressed as the first line of its output (or as the second line if
|
|
||||||
\fB--print-query\fR is also used). The line will be empty if fzf is completed
|
\fB--print-query\fR is also used). The line will be empty if fzf is completed
|
||||||
with the default enter key.
|
with the default enter key.
|
||||||
|
|
||||||
.RS
|
.RS
|
||||||
e.g. \fBfzf --expect=ctrl-v,ctrl-t,alt-s,f1,f2,~,@\fR
|
e.g. \fBfzf --expect=ctrl-v,ctrl-t,alt-s,f1,f2,~,@\fR
|
||||||
.RE
|
.RE
|
||||||
.TP
|
.TP
|
||||||
|
.B "--read0"
|
||||||
|
Read input delimited by ASCII NUL character instead of newline character
|
||||||
|
.TP
|
||||||
|
.B "--print0"
|
||||||
|
Print output delimited by ASCII NUL character instead of newline character
|
||||||
|
.TP
|
||||||
.B "--sync"
|
.B "--sync"
|
||||||
Synchronous search for multi-staged filtering. If specified, fzf will launch
|
Synchronous search for multi-staged filtering. If specified, fzf will launch
|
||||||
ncurses finder only after the input stream is complete.
|
ncurses finder only after the input stream is complete.
|
||||||
|
|
||||||
.RS
|
.RS
|
||||||
e.g. \fBfzf --multi | fzf --sync\fR
|
e.g. \fBfzf --multi | fzf --sync\fR
|
||||||
.RE
|
.RE
|
||||||
|
|
||||||
.SH ENVIRONMENT
|
.SH ENVIRONMENT VARIABLES
|
||||||
.TP
|
.TP
|
||||||
.B FZF_DEFAULT_COMMAND
|
.B FZF_DEFAULT_COMMAND
|
||||||
Default command to use when input is tty
|
Default command to use when input is tty
|
||||||
.TP
|
.TP
|
||||||
.B FZF_DEFAULT_OPTS
|
.B FZF_DEFAULT_OPTS
|
||||||
Default options. e.g. \fB--extended --ansi\fR
|
Default options. e.g. \fBexport FZF_DEFAULT_OPTS="--extended --cycle"\fR
|
||||||
|
|
||||||
.SH EXIT STATUS
|
.SH EXIT STATUS
|
||||||
.BR 0 " Normal exit"
|
.BR 0 " Normal exit"
|
||||||
.br
|
.br
|
||||||
.BR 1 " Interrupted with \fBCTRL-C\fR or \fBESC\fR"
|
.BR 1 " No match"
|
||||||
|
.br
|
||||||
|
.BR 2 " Error"
|
||||||
|
.br
|
||||||
|
.BR 130 " Interrupted with \fBCTRL-C\fR or \fBESC\fR"
|
||||||
|
|
||||||
.SH FIELD INDEX EXPRESSION
|
.SH FIELD INDEX EXPRESSION
|
||||||
|
|
||||||
A field index expression can be a non-zero integer or a range expression
|
A field index expression can be a non-zero integer or a range expression
|
||||||
([BEGIN]..[END]). \fI--nth\fR and \fI--with-nth\fR take a comma-separated list
|
([BEGIN]..[END]). \fB--nth\fR and \fB--with-nth\fR take a comma-separated list
|
||||||
of field index expressions.
|
of field index expressions.
|
||||||
|
|
||||||
.SS Examples
|
.SS Examples
|
||||||
@@ -258,34 +342,164 @@ of field index expressions.
|
|||||||
|
|
||||||
.SH EXTENDED SEARCH MODE
|
.SH EXTENDED SEARCH MODE
|
||||||
|
|
||||||
With \fI-x\fR or \fI--extended\fR option, fzf will start in "extended-search
|
Unless specified otherwise, fzf will start in "extended-search mode". In this
|
||||||
mode". In this mode, you can specify multiple patterns delimited by spaces,
|
mode, you can specify multiple patterns delimited by spaces, such as: \fB'wild
|
||||||
such as: \fB'wild ^music .mp3$ sbtrkt !rmx\fR
|
^music .mp3$ sbtrkt !rmx\fR
|
||||||
|
|
||||||
.SS Exact-match (quoted)
|
.SS Exact-match (quoted)
|
||||||
A term that is prefixed by a single-quote character (') is interpreted as an
|
A term that is prefixed by a single-quote character (\fB'\fR) is interpreted as
|
||||||
"exact-match" (or "non-fuzzy") term. fzf will search for the exact occurrences
|
an "exact-match" (or "non-fuzzy") term. fzf will search for the exact
|
||||||
of the string.
|
occurrences of the string.
|
||||||
|
|
||||||
.SS Anchored-match
|
.SS Anchored-match
|
||||||
A term can be prefixed by ^, or suffixed by $ to become an anchored-match term.
|
A term can be prefixed by \fB^\fR, or suffixed by \fB$\fR to become an
|
||||||
Then fzf will search for the items that start with or end with the given
|
anchored-match term. Then fzf will search for the lines that start with or end
|
||||||
string. An anchored-match term is also an exact-match term.
|
with the given string. An anchored-match term is also an exact-match term.
|
||||||
|
|
||||||
.SS Negation
|
.SS Negation
|
||||||
If a term is prefixed by !, fzf will exclude the items that satisfy the term
|
If a term is prefixed by \fB!\fR, fzf will exclude the lines that satisfy the
|
||||||
from the result.
|
term from the result.
|
||||||
|
|
||||||
.SS Extended-exact mode
|
.SS Exact-match by default
|
||||||
If you don't need fuzzy matching at all and do not wish to "quote" (prefixing
|
If you don't prefer fuzzy matching and do not wish to "quote" (prefixing with
|
||||||
with ') every word, start fzf with \fI-e\fR or \fI--extended-exact\fR option
|
\fB'\fR) every word, start fzf with \fB-e\fR or \fB--exact\fR option. Note that
|
||||||
(instead of \fI-x\fR or \fI--extended\fR).
|
when \fB--exact\fR is set, \fB'\fR-prefix "unquotes" the term.
|
||||||
|
|
||||||
|
.SS OR operator
|
||||||
|
A single bar character term acts as an OR operator. For example, the following
|
||||||
|
query matches entries that start with \fBcore\fR and end with either \fBgo\fR,
|
||||||
|
\fBrb\fR, or \fBpy\fR.
|
||||||
|
|
||||||
|
e.g. \fB^core go$ | rb$ | py$\fR
|
||||||
|
|
||||||
|
.SH KEY BINDINGS
|
||||||
|
You can customize key bindings of fzf with \fB--bind\fR option which takes
|
||||||
|
a comma-separated list of key binding expressions. Each key binding expression
|
||||||
|
follows the following format: \fBKEY:ACTION\fR
|
||||||
|
|
||||||
|
e.g. \fBfzf --bind=ctrl-j:accept,ctrl-k:kill-line\fR
|
||||||
|
|
||||||
|
.B AVAILABLE KEYS: (SYNONYMS)
|
||||||
|
\fIctrl-[a-z]\fR
|
||||||
|
\fIalt-[a-z]\fR
|
||||||
|
\fIf[1-10]\fR
|
||||||
|
\fIenter\fR (\fIreturn\fR \fIctrl-m\fR)
|
||||||
|
\fIspace\fR
|
||||||
|
\fIbspace\fR (\fIbs\fR)
|
||||||
|
\fIalt-enter\fR
|
||||||
|
\fIalt-space\fR
|
||||||
|
\fIalt-bspace\fR (\fIalt-bs\fR)
|
||||||
|
\fIalt-/\fR
|
||||||
|
\fItab\fR
|
||||||
|
\fIbtab\fR (\fIshift-tab\fR)
|
||||||
|
\fIesc\fR
|
||||||
|
\fIdel\fR
|
||||||
|
\fIup\fR
|
||||||
|
\fIdown\fR
|
||||||
|
\fIleft\fR
|
||||||
|
\fIright\fR
|
||||||
|
\fIhome\fR
|
||||||
|
\fIend\fR
|
||||||
|
\fIpgup\fR (\fIpage-up\fR)
|
||||||
|
\fIpgdn\fR (\fIpage-down\fR)
|
||||||
|
\fIshift-left\fR
|
||||||
|
\fIshift-right\fR
|
||||||
|
\fIdouble-click\fR
|
||||||
|
or any single character
|
||||||
|
|
||||||
|
\fBACTION: DEFAULT BINDINGS (NOTES):
|
||||||
|
\fBabort\fR \fIctrl-c ctrl-g ctrl-q esc\fR
|
||||||
|
\fBaccept\fR \fIenter double-click\fR
|
||||||
|
\fBbackward-char\fR \fIctrl-b left\fR
|
||||||
|
\fBbackward-delete-char\fR \fIctrl-h bspace\fR
|
||||||
|
\fBbackward-kill-word\fR \fIalt-bs\fR
|
||||||
|
\fBbackward-word\fR \fIalt-b shift-left\fR
|
||||||
|
\fBbeginning-of-line\fR \fIctrl-a home\fR
|
||||||
|
\fBcancel\fR
|
||||||
|
\fBclear-screen\fR \fIctrl-l\fR
|
||||||
|
\fBdelete-char\fR \fIdel\fR
|
||||||
|
\fBdelete-char/eof\fR \fIctrl-d\fR
|
||||||
|
\fBdeselect-all\fR
|
||||||
|
\fBdown\fR \fIctrl-j ctrl-n down\fR
|
||||||
|
\fBend-of-line\fR \fIctrl-e end\fR
|
||||||
|
\fBexecute(...)\fR (see below for the details)
|
||||||
|
\fBexecute-multi(...)\fR (see below for the details)
|
||||||
|
\fBforward-char\fR \fIctrl-f right\fR
|
||||||
|
\fBforward-word\fR \fIalt-f shift-right\fR
|
||||||
|
\fBignore\fR
|
||||||
|
\fBjump\fR (EasyMotion-like 2-keystroke movement)
|
||||||
|
\fBjump-accept\fR (jump and accept)
|
||||||
|
\fBkill-line\fR
|
||||||
|
\fBkill-word\fR \fIalt-d\fR
|
||||||
|
\fBnext-history\fR (\fIctrl-n\fR on \fB--history\fR)
|
||||||
|
\fBpage-down\fR \fIpgdn\fR
|
||||||
|
\fBpage-up\fR \fIpgup\fR
|
||||||
|
\fBprevious-history\fR (\fIctrl-p\fR on \fB--history\fR)
|
||||||
|
\fBprint-query\fR (print query and exit)
|
||||||
|
\fBselect-all\fR
|
||||||
|
\fBtoggle\fR
|
||||||
|
\fBtoggle-all\fR
|
||||||
|
\fBtoggle-down\fR \fIctrl-i (tab)\fR
|
||||||
|
\fBtoggle-in\fR (\fB--reverse\fR ? \fBtoggle-up\fR : \fBtoggle-down\fR)
|
||||||
|
\fBtoggle-out\fR (\fB--reverse\fR ? \fBtoggle-down\fR : \fBtoggle-up\fR)
|
||||||
|
\fBtoggle-preview\fR
|
||||||
|
\fBtoggle-sort\fR (equivalent to \fB--toggle-sort\fR)
|
||||||
|
\fBtoggle-up\fR \fIbtab (shift-tab)\fR
|
||||||
|
\fBunix-line-discard\fR \fIctrl-u\fR
|
||||||
|
\fBunix-word-rubout\fR \fIctrl-w\fR
|
||||||
|
\fBup\fR \fIctrl-k ctrl-p up\fR
|
||||||
|
\fByank\fR \fIctrl-y\fR
|
||||||
|
|
||||||
|
With \fBexecute(...)\fR action, you can execute arbitrary commands without
|
||||||
|
leaving fzf. For example, you can turn fzf into a simple file browser by
|
||||||
|
binding \fBenter\fR key to \fBless\fR command like follows.
|
||||||
|
|
||||||
|
\fBfzf --bind "enter:execute(less {})"\fR
|
||||||
|
|
||||||
|
\fB{}\fR is the placeholder for the quoted string of the current line.
|
||||||
|
If the command contains parentheses, you can use any of the following
|
||||||
|
alternative notations to avoid parse errors.
|
||||||
|
|
||||||
|
\fBexecute[...]\fR
|
||||||
|
\fBexecute~...~\fR
|
||||||
|
\fBexecute!...!\fR
|
||||||
|
\fBexecute@...@\fR
|
||||||
|
\fBexecute#...#\fR
|
||||||
|
\fBexecute$...$\fR
|
||||||
|
\fBexecute%...%\fR
|
||||||
|
\fBexecute^...^\fR
|
||||||
|
\fBexecute&...&\fR
|
||||||
|
\fBexecute*...*\fR
|
||||||
|
\fBexecute;...;\fR
|
||||||
|
\fBexecute/.../\fR
|
||||||
|
\fBexecute|...|\fR
|
||||||
|
\fBexecute:...\fR
|
||||||
|
.RS
|
||||||
|
This is the special form that frees you from parse errors as it does not expect
|
||||||
|
the closing character. The catch is that it should be the last one in the
|
||||||
|
comma-separated list.
|
||||||
|
.RE
|
||||||
|
|
||||||
|
\fBexecute-multi(...)\fR is an alternative action that executes the command
|
||||||
|
with the selected entries when multi-select is enabled (\fB--multi\fR). With
|
||||||
|
this action, \fB{}\fR is replaced with the quoted strings of the selected
|
||||||
|
entries separated by spaces.
|
||||||
|
|
||||||
.SH AUTHOR
|
.SH AUTHOR
|
||||||
Junegunn Choi (\fIjunegunn.c@gmail.com\fR)
|
Junegunn Choi (\fIjunegunn.c@gmail.com\fR)
|
||||||
|
|
||||||
.SH SEE ALSO
|
.SH SEE ALSO
|
||||||
|
.B Project homepage:
|
||||||
|
.RS
|
||||||
.I https://github.com/junegunn/fzf
|
.I https://github.com/junegunn/fzf
|
||||||
|
.RE
|
||||||
|
.br
|
||||||
|
|
||||||
|
.br
|
||||||
|
.B Extra Vim plugin:
|
||||||
|
.RS
|
||||||
|
.I https://github.com/junegunn/fzf.vim
|
||||||
|
.RE
|
||||||
|
|
||||||
.SH LICENSE
|
.SH LICENSE
|
||||||
MIT
|
MIT
|
||||||
|
412
plugin/fzf.vim
412
plugin/fzf.vim
@@ -1,4 +1,4 @@
|
|||||||
" Copyright (c) 2015 Junegunn Choi
|
" Copyright (c) 2016 Junegunn Choi
|
||||||
"
|
"
|
||||||
" MIT License
|
" MIT License
|
||||||
"
|
"
|
||||||
@@ -21,7 +21,8 @@
|
|||||||
" OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
" OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
" WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
" WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
let s:default_height = '40%'
|
let s:default_layout = { 'down': '~40%' }
|
||||||
|
let s:layout_keys = ['window', 'up', 'down', 'left', 'right']
|
||||||
let s:fzf_go = expand('<sfile>:h:h').'/bin/fzf'
|
let s:fzf_go = expand('<sfile>:h:h').'/bin/fzf'
|
||||||
let s:install = expand('<sfile>:h:h').'/install'
|
let s:install = expand('<sfile>:h:h').'/install'
|
||||||
let s:installed = 0
|
let s:installed = 0
|
||||||
@@ -40,9 +41,7 @@ function! s:fzf_exec()
|
|||||||
\ input('fzf executable not found. Download binary? (y/n) ') =~? '^y'
|
\ input('fzf executable not found. Download binary? (y/n) ') =~? '^y'
|
||||||
redraw
|
redraw
|
||||||
echo
|
echo
|
||||||
echohl WarningMsg
|
call s:warn('Downloading fzf binary. Please wait ...')
|
||||||
echo 'Downloading fzf binary. Please wait ...'
|
|
||||||
echohl None
|
|
||||||
let s:installed = 1
|
let s:installed = 1
|
||||||
call system(s:install.' --bin')
|
call system(s:install.' --bin')
|
||||||
return s:fzf_exec()
|
return s:fzf_exec()
|
||||||
@@ -51,7 +50,7 @@ function! s:fzf_exec()
|
|||||||
throw 'fzf executable not found'
|
throw 'fzf executable not found'
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
return s:exec
|
return s:shellesc(s:exec)
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:tmux_enabled()
|
function! s:tmux_enabled()
|
||||||
@@ -76,7 +75,7 @@ function! s:shellesc(arg)
|
|||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:escape(path)
|
function! s:escape(path)
|
||||||
return escape(a:path, ' %#\')
|
return escape(a:path, ' $%#''"\')
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
" Upgrade legacy options
|
" Upgrade legacy options
|
||||||
@@ -94,14 +93,127 @@ function! s:upgrade(dict)
|
|||||||
return copy
|
return copy
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
|
function! s:error(msg)
|
||||||
|
echohl ErrorMsg
|
||||||
|
echom a:msg
|
||||||
|
echohl None
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
function! s:warn(msg)
|
||||||
|
echohl WarningMsg
|
||||||
|
echom a:msg
|
||||||
|
echohl None
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
function! s:has_any(dict, keys)
|
||||||
|
for key in a:keys
|
||||||
|
if has_key(a:dict, key)
|
||||||
|
return 1
|
||||||
|
endif
|
||||||
|
endfor
|
||||||
|
return 0
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
function! s:open(cmd, target)
|
||||||
|
if stridx('edit', a:cmd) == 0 && fnamemodify(a:target, ':p') ==# expand('%:p')
|
||||||
|
return
|
||||||
|
endif
|
||||||
|
execute a:cmd s:escape(a:target)
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
function! s:common_sink(action, lines) abort
|
||||||
|
if len(a:lines) < 2
|
||||||
|
return
|
||||||
|
endif
|
||||||
|
let key = remove(a:lines, 0)
|
||||||
|
let cmd = get(a:action, key, 'e')
|
||||||
|
if len(a:lines) > 1
|
||||||
|
augroup fzf_swap
|
||||||
|
autocmd SwapExists * let v:swapchoice='o'
|
||||||
|
\| call s:warn('fzf: E325: swap file exists: '.expand('<afile>'))
|
||||||
|
augroup END
|
||||||
|
endif
|
||||||
|
try
|
||||||
|
let empty = empty(expand('%')) && line('$') == 1 && empty(getline(1)) && !&modified
|
||||||
|
let autochdir = &autochdir
|
||||||
|
set noautochdir
|
||||||
|
for item in a:lines
|
||||||
|
if empty
|
||||||
|
execute 'e' s:escape(item)
|
||||||
|
let empty = 0
|
||||||
|
else
|
||||||
|
call s:open(cmd, item)
|
||||||
|
endif
|
||||||
|
if exists('#BufEnter') && isdirectory(item)
|
||||||
|
doautocmd BufEnter
|
||||||
|
endif
|
||||||
|
endfor
|
||||||
|
finally
|
||||||
|
let &autochdir = autochdir
|
||||||
|
silent! autocmd! fzf_swap
|
||||||
|
endtry
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
" [name string,] [opts dict,] [fullscreen boolean]
|
||||||
|
function! fzf#wrap(...)
|
||||||
|
let args = ['', {}, 0]
|
||||||
|
let expects = map(copy(args), 'type(v:val)')
|
||||||
|
let tidx = 0
|
||||||
|
for arg in copy(a:000)
|
||||||
|
let tidx = index(expects, type(arg), tidx)
|
||||||
|
if tidx < 0
|
||||||
|
throw 'invalid arguments (expected: [name string] [opts dict] [fullscreen boolean])'
|
||||||
|
endif
|
||||||
|
let args[tidx] = arg
|
||||||
|
let tidx += 1
|
||||||
|
unlet arg
|
||||||
|
endfor
|
||||||
|
let [name, opts, bang] = args
|
||||||
|
|
||||||
|
" Layout: g:fzf_layout (and deprecated g:fzf_height)
|
||||||
|
if bang
|
||||||
|
for key in s:layout_keys
|
||||||
|
if has_key(opts, key)
|
||||||
|
call remove(opts, key)
|
||||||
|
endif
|
||||||
|
endfor
|
||||||
|
elseif !s:has_any(opts, s:layout_keys)
|
||||||
|
if !exists('g:fzf_layout') && exists('g:fzf_height')
|
||||||
|
let opts.down = g:fzf_height
|
||||||
|
else
|
||||||
|
let opts = extend(opts, get(g:, 'fzf_layout', s:default_layout))
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
" History: g:fzf_history_dir
|
||||||
|
let opts.options = get(opts, 'options', '')
|
||||||
|
if len(name) && len(get(g:, 'fzf_history_dir', ''))
|
||||||
|
let dir = expand(g:fzf_history_dir)
|
||||||
|
if !isdirectory(dir)
|
||||||
|
call mkdir(dir, 'p')
|
||||||
|
endif
|
||||||
|
let opts.options = join(['--history', s:escape(dir.'/'.name), opts.options])
|
||||||
|
endif
|
||||||
|
|
||||||
|
" Action: g:fzf_action
|
||||||
|
if !s:has_any(opts, ['sink', 'sink*'])
|
||||||
|
let opts._action = get(g:, 'fzf_action', s:default_action)
|
||||||
|
let opts.options .= ' --expect='.join(keys(opts._action), ',')
|
||||||
|
function! opts.sink(lines) abort
|
||||||
|
return s:common_sink(self._action, a:lines)
|
||||||
|
endfunction
|
||||||
|
let opts['sink*'] = remove(opts, 'sink')
|
||||||
|
endif
|
||||||
|
|
||||||
|
return opts
|
||||||
|
endfunction
|
||||||
|
|
||||||
function! fzf#run(...) abort
|
function! fzf#run(...) abort
|
||||||
try
|
try
|
||||||
let oshell = &shell
|
let oshell = &shell
|
||||||
set shell=sh
|
set shell=sh
|
||||||
if has('nvim') && bufexists('[FZF]')
|
if has('nvim') && len(filter(range(1, bufnr('$')), 'bufname(v:val) =~# ";#FZF"'))
|
||||||
echohl WarningMsg
|
call s:warn('FZF is already running!')
|
||||||
echomsg 'FZF is already running!'
|
|
||||||
echohl None
|
|
||||||
return []
|
return []
|
||||||
endif
|
endif
|
||||||
let dict = exists('a:1') ? s:upgrade(a:1) : {}
|
let dict = exists('a:1') ? s:upgrade(a:1) : {}
|
||||||
@@ -113,6 +225,12 @@ try
|
|||||||
throw v:exception
|
throw v:exception
|
||||||
endtry
|
endtry
|
||||||
|
|
||||||
|
if !has_key(dict, 'source') && !empty($FZF_DEFAULT_COMMAND)
|
||||||
|
let temps.source = tempname()
|
||||||
|
call writefile(split($FZF_DEFAULT_COMMAND, "\n"), temps.source)
|
||||||
|
let dict.source = (empty($SHELL) ? 'sh' : $SHELL) . ' ' . s:shellesc(temps.source)
|
||||||
|
endif
|
||||||
|
|
||||||
if has_key(dict, 'source')
|
if has_key(dict, 'source')
|
||||||
let source = dict.source
|
let source = dict.source
|
||||||
let type = type(source)
|
let type = type(source)
|
||||||
@@ -123,25 +241,21 @@ try
|
|||||||
call writefile(source, temps.input)
|
call writefile(source, temps.input)
|
||||||
let prefix = 'cat '.s:shellesc(temps.input).'|'
|
let prefix = 'cat '.s:shellesc(temps.input).'|'
|
||||||
else
|
else
|
||||||
throw 'Invalid source type'
|
throw 'invalid source type'
|
||||||
endif
|
endif
|
||||||
else
|
else
|
||||||
let prefix = ''
|
let prefix = ''
|
||||||
endif
|
endif
|
||||||
let tmux = !has('nvim') && s:tmux_enabled() && s:splittable(dict)
|
let tmux = (!has('nvim') || get(g:, 'fzf_prefer_tmux', 0)) && s:tmux_enabled() && s:splittable(dict)
|
||||||
let command = prefix.(tmux ? s:fzf_tmux(dict) : fzf_exec).' '.optstr.' > '.temps.result
|
let command = prefix.(tmux ? s:fzf_tmux(dict) : fzf_exec).' '.optstr.' > '.temps.result
|
||||||
|
|
||||||
try
|
if has('nvim') && !tmux
|
||||||
if tmux
|
|
||||||
return s:execute_tmux(dict, command, temps)
|
|
||||||
elseif has('nvim')
|
|
||||||
return s:execute_term(dict, command, temps)
|
return s:execute_term(dict, command, temps)
|
||||||
else
|
|
||||||
return s:execute(dict, command, temps)
|
|
||||||
endif
|
endif
|
||||||
finally
|
|
||||||
call s:popd(dict)
|
let lines = tmux ? s:execute_tmux(dict, command, temps) : s:execute(dict, command, temps)
|
||||||
endtry
|
call s:callback(dict, lines)
|
||||||
|
return lines
|
||||||
finally
|
finally
|
||||||
let &shell = oshell
|
let &shell = oshell
|
||||||
endtry
|
endtry
|
||||||
@@ -160,12 +274,18 @@ function! s:fzf_tmux(dict)
|
|||||||
let size = ''
|
let size = ''
|
||||||
for o in ['up', 'down', 'left', 'right']
|
for o in ['up', 'down', 'left', 'right']
|
||||||
if s:present(a:dict, o)
|
if s:present(a:dict, o)
|
||||||
let size = '-'.o[0].(a:dict[o] == 1 ? '' : a:dict[o])
|
let spec = a:dict[o]
|
||||||
|
if (o == 'up' || o == 'down') && spec[0] == '~'
|
||||||
|
let size = '-'.o[0].s:calc_size(&lines, spec, a:dict)
|
||||||
|
else
|
||||||
|
" Legacy boolean option
|
||||||
|
let size = '-'.o[0].(spec == 1 ? '' : substitute(spec, '^\~', '', ''))
|
||||||
|
endif
|
||||||
break
|
break
|
||||||
endif
|
endif
|
||||||
endfor
|
endfor
|
||||||
return printf('LINES=%d COLUMNS=%d %s %s %s --',
|
return printf('LINES=%d COLUMNS=%d %s %s %s --',
|
||||||
\ &lines, &columns, s:fzf_tmux, size, (has_key(a:dict, 'source') ? '' : '-'))
|
\ &lines, &columns, s:shellesc(s:fzf_tmux), size, (has_key(a:dict, 'source') ? '' : '-'))
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:splittable(dict)
|
function! s:splittable(dict)
|
||||||
@@ -179,23 +299,30 @@ function! s:pushd(dict)
|
|||||||
return 1
|
return 1
|
||||||
endif
|
endif
|
||||||
let a:dict.prev_dir = cwd
|
let a:dict.prev_dir = cwd
|
||||||
execute 'chdir '.s:escape(a:dict.dir)
|
execute 'lcd' s:escape(a:dict.dir)
|
||||||
let a:dict.dir = getcwd()
|
let a:dict.dir = getcwd()
|
||||||
return 1
|
return 1
|
||||||
endif
|
endif
|
||||||
return 0
|
return 0
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:popd(dict)
|
augroup fzf_popd
|
||||||
if has_key(a:dict, 'prev_dir')
|
autocmd!
|
||||||
execute 'chdir '.s:escape(remove(a:dict, 'prev_dir'))
|
autocmd WinEnter * call s:dopopd()
|
||||||
|
augroup END
|
||||||
|
|
||||||
|
function! s:dopopd()
|
||||||
|
if !exists('w:fzf_prev_dir') || exists('*haslocaldir') && !haslocaldir()
|
||||||
|
return
|
||||||
endif
|
endif
|
||||||
|
execute 'lcd' s:escape(w:fzf_prev_dir)
|
||||||
|
unlet w:fzf_prev_dir
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:xterm_launcher()
|
function! s:xterm_launcher()
|
||||||
let fmt = 'xterm -T "[fzf]" -bg "\%s" -fg "\%s" -geometry %dx%d+%d+%d -e bash -ic %%s'
|
let fmt = 'xterm -T "[fzf]" -bg "\%s" -fg "\%s" -geometry %dx%d+%d+%d -e bash -ic %%s'
|
||||||
if has('gui_macvim')
|
if has('gui_macvim')
|
||||||
let fmt .= '; osascript -e "tell application \"MacVim\" to activate"'
|
let fmt .= '&& osascript -e "tell application \"MacVim\" to activate"'
|
||||||
endif
|
endif
|
||||||
return printf(fmt,
|
return printf(fmt,
|
||||||
\ synIDattr(hlID("Normal"), "bg"), synIDattr(hlID("Normal"), "fg"),
|
\ synIDattr(hlID("Normal"), "bg"), synIDattr(hlID("Normal"), "fg"),
|
||||||
@@ -204,31 +331,37 @@ endfunction
|
|||||||
unlet! s:launcher
|
unlet! s:launcher
|
||||||
let s:launcher = function('s:xterm_launcher')
|
let s:launcher = function('s:xterm_launcher')
|
||||||
|
|
||||||
function! s:execute(dict, command, temps)
|
function! s:exit_handler(code, command, ...)
|
||||||
|
if a:code == 130
|
||||||
|
return 0
|
||||||
|
elseif a:code > 1
|
||||||
|
call s:error('Error running ' . a:command)
|
||||||
|
if !empty(a:000)
|
||||||
|
sleep
|
||||||
|
endif
|
||||||
|
return 0
|
||||||
|
endif
|
||||||
|
return 1
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
function! s:execute(dict, command, temps) abort
|
||||||
call s:pushd(a:dict)
|
call s:pushd(a:dict)
|
||||||
silent! !clear 2> /dev/null
|
silent! !clear 2> /dev/null
|
||||||
|
let escaped = escape(substitute(a:command, '\n', '\\n', 'g'), '%#')
|
||||||
if has('gui_running')
|
if has('gui_running')
|
||||||
let Launcher = get(a:dict, 'launcher', get(g:, 'Fzf_launcher', get(g:, 'fzf_launcher', s:launcher)))
|
let Launcher = get(a:dict, 'launcher', get(g:, 'Fzf_launcher', get(g:, 'fzf_launcher', s:launcher)))
|
||||||
let fmt = type(Launcher) == 2 ? call(Launcher, []) : Launcher
|
let fmt = type(Launcher) == 2 ? call(Launcher, []) : Launcher
|
||||||
let command = printf(fmt, "'".substitute(a:command, "'", "'\"'\"'", 'g')."'")
|
let command = printf(fmt, "'".substitute(escaped, "'", "'\"'\"'", 'g')."'")
|
||||||
else
|
else
|
||||||
let command = a:command
|
let command = escaped
|
||||||
endif
|
endif
|
||||||
execute 'silent !'.command
|
execute 'silent !'.command
|
||||||
|
let exit_status = v:shell_error
|
||||||
redraw!
|
redraw!
|
||||||
if v:shell_error
|
return s:exit_handler(exit_status, command) ? s:collect(a:temps) : []
|
||||||
" Do not print error message on exit status 1
|
|
||||||
if v:shell_error > 1
|
|
||||||
echohl ErrorMsg
|
|
||||||
echo 'Error running ' . command
|
|
||||||
endif
|
|
||||||
return []
|
|
||||||
else
|
|
||||||
return s:callback(a:dict, a:temps)
|
|
||||||
endif
|
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:execute_tmux(dict, command, temps)
|
function! s:execute_tmux(dict, command, temps) abort
|
||||||
let command = a:command
|
let command = a:command
|
||||||
if s:pushd(a:dict)
|
if s:pushd(a:dict)
|
||||||
" -c '#{pane_current_path}' is only available on tmux 1.9 or above
|
" -c '#{pane_current_path}' is only available on tmux 1.9 or above
|
||||||
@@ -236,16 +369,32 @@ function! s:execute_tmux(dict, command, temps)
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
call system(command)
|
call system(command)
|
||||||
|
let exit_status = v:shell_error
|
||||||
redraw!
|
redraw!
|
||||||
return s:callback(a:dict, a:temps)
|
return s:exit_handler(exit_status, command) ? s:collect(a:temps) : []
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:calc_size(max, val)
|
function! s:calc_size(max, val, dict)
|
||||||
if a:val =~ '%$'
|
let val = substitute(a:val, '^\~', '', '')
|
||||||
return a:max * str2nr(a:val[:-2]) / 100
|
if val =~ '%$'
|
||||||
|
let size = a:max * str2nr(val[:-2]) / 100
|
||||||
else
|
else
|
||||||
return min([a:max, a:val])
|
let size = min([a:max, str2nr(val)])
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
let srcsz = -1
|
||||||
|
if type(get(a:dict, 'source', 0)) == type([])
|
||||||
|
let srcsz = len(a:dict.source)
|
||||||
|
endif
|
||||||
|
|
||||||
|
let opts = get(a:dict, 'options', '').$FZF_DEFAULT_OPTS
|
||||||
|
let margin = stridx(opts, '--inline-info') > stridx(opts, '--no-inline-info') ? 1 : 2
|
||||||
|
let margin += stridx(opts, '--header') > stridx(opts, '--no-header')
|
||||||
|
return srcsz >= 0 ? min([srcsz + margin, size]) : size
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
function! s:getpos()
|
||||||
|
return {'tab': tabpagenr(), 'win': winnr(), 'cnt': winnr('$'), 'tcnt': tabpagenr('$')}
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:split(dict)
|
function! s:split(dict)
|
||||||
@@ -254,70 +403,131 @@ function! s:split(dict)
|
|||||||
\ 'down': ['botright', 'resize', &lines],
|
\ 'down': ['botright', 'resize', &lines],
|
||||||
\ 'left': ['vertical topleft', 'vertical resize', &columns],
|
\ 'left': ['vertical topleft', 'vertical resize', &columns],
|
||||||
\ 'right': ['vertical botright', 'vertical resize', &columns] }
|
\ 'right': ['vertical botright', 'vertical resize', &columns] }
|
||||||
let s:ptab = tabpagenr()
|
let ppos = s:getpos()
|
||||||
try
|
try
|
||||||
for [dir, triple] in items(directions)
|
for [dir, triple] in items(directions)
|
||||||
let val = get(a:dict, dir, '')
|
let val = get(a:dict, dir, '')
|
||||||
if !empty(val)
|
if !empty(val)
|
||||||
let [cmd, resz, max] = triple
|
let [cmd, resz, max] = triple
|
||||||
let sz = s:calc_size(max, val)
|
if (dir == 'up' || dir == 'down') && val[0] == '~'
|
||||||
|
let sz = s:calc_size(max, val, a:dict)
|
||||||
|
else
|
||||||
|
let sz = s:calc_size(max, val, {})
|
||||||
|
endif
|
||||||
execute cmd sz.'new'
|
execute cmd sz.'new'
|
||||||
execute resz sz
|
execute resz sz
|
||||||
return
|
return [ppos, {}]
|
||||||
endif
|
endif
|
||||||
endfor
|
endfor
|
||||||
if s:present(a:dict, 'window')
|
if s:present(a:dict, 'window')
|
||||||
execute a:dict.window
|
execute a:dict.window
|
||||||
else
|
else
|
||||||
tabnew
|
execute (tabpagenr()-1).'tabnew'
|
||||||
endif
|
endif
|
||||||
|
return [ppos, { '&l:wfw': &l:wfw, '&l:wfh': &l:wfh }]
|
||||||
finally
|
finally
|
||||||
setlocal winfixwidth winfixheight buftype=nofile bufhidden=wipe nobuflisted
|
setlocal winfixwidth winfixheight
|
||||||
endtry
|
endtry
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:execute_term(dict, command, temps)
|
function! s:execute_term(dict, command, temps) abort
|
||||||
call s:split(a:dict)
|
let winrest = winrestcmd()
|
||||||
call s:pushd(a:dict)
|
let [ppos, winopts] = s:split(a:dict)
|
||||||
|
let fzf = { 'buf': bufnr('%'), 'ppos': ppos, 'dict': a:dict, 'temps': a:temps,
|
||||||
let fzf = { 'buf': bufnr('%'), 'dict': a:dict, 'temps': a:temps }
|
\ 'winopts': winopts, 'winrest': winrest, 'lines': &lines,
|
||||||
|
\ 'columns': &columns, 'command': a:command }
|
||||||
|
function! fzf.switch_back(inplace)
|
||||||
|
if a:inplace && bufnr('') == self.buf
|
||||||
|
" FIXME: Can't re-enter normal mode from terminal mode
|
||||||
|
" execute "normal! \<c-^>"
|
||||||
|
b #
|
||||||
|
" No other listed buffer
|
||||||
|
if bufnr('') == self.buf
|
||||||
|
enew
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
endfunction
|
||||||
function! fzf.on_exit(id, code)
|
function! fzf.on_exit(id, code)
|
||||||
let tab = tabpagenr()
|
if s:getpos() == self.ppos " {'window': 'enew'}
|
||||||
|
for [opt, val] in items(self.winopts)
|
||||||
|
execute 'let' opt '=' val
|
||||||
|
endfor
|
||||||
|
call self.switch_back(1)
|
||||||
|
else
|
||||||
if bufnr('') == self.buf
|
if bufnr('') == self.buf
|
||||||
" We use close instead of bd! since Vim does not close the split when
|
" We use close instead of bd! since Vim does not close the split when
|
||||||
" there's no other listed buffer
|
" there's no other listed buffer (nvim +'set nobuflisted')
|
||||||
close
|
close
|
||||||
" FIXME This should be unnecessary due to `bufhidden=wipe` but in some
|
|
||||||
" cases Neovim fails to clean up the buffer and `bufexists('[FZF]')
|
|
||||||
" returns 1 even when it cannot be seen anywhere else. e.g. `FZF!`
|
|
||||||
silent! execute 'bd!' self.buf
|
|
||||||
endif
|
endif
|
||||||
if s:ptab == tab
|
execute 'tabnext' self.ppos.tab
|
||||||
wincmd p
|
execute self.ppos.win.'wincmd w'
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
if bufexists(self.buf)
|
||||||
|
execute 'bd!' self.buf
|
||||||
|
endif
|
||||||
|
|
||||||
|
if &lines == self.lines && &columns == self.columns && s:getpos() == self.ppos
|
||||||
|
execute self.winrest
|
||||||
|
endif
|
||||||
|
|
||||||
|
if !s:exit_handler(a:code, self.command, 1)
|
||||||
|
return
|
||||||
|
endif
|
||||||
|
|
||||||
call s:pushd(self.dict)
|
call s:pushd(self.dict)
|
||||||
try
|
let lines = s:collect(self.temps)
|
||||||
redraw!
|
call s:callback(self.dict, lines)
|
||||||
call s:callback(self.dict, self.temps)
|
call self.switch_back(s:getpos() == self.ppos)
|
||||||
finally
|
|
||||||
call s:popd(self.dict)
|
|
||||||
endtry
|
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
call termopen(a:command, fzf)
|
try
|
||||||
silent file [FZF]
|
if s:present(a:dict, 'dir')
|
||||||
|
execute 'lcd' s:escape(a:dict.dir)
|
||||||
|
endif
|
||||||
|
call termopen(a:command . ';#FZF', fzf)
|
||||||
|
finally
|
||||||
|
if s:present(a:dict, 'dir')
|
||||||
|
lcd -
|
||||||
|
endif
|
||||||
|
endtry
|
||||||
|
setlocal nospell bufhidden=wipe nobuflisted
|
||||||
|
setf fzf
|
||||||
startinsert
|
startinsert
|
||||||
return []
|
return []
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
function! s:callback(dict, temps)
|
function! s:collect(temps) abort
|
||||||
|
try
|
||||||
|
return filereadable(a:temps.result) ? readfile(a:temps.result) : []
|
||||||
|
finally
|
||||||
|
for tf in values(a:temps)
|
||||||
|
silent! call delete(tf)
|
||||||
|
endfor
|
||||||
|
endtry
|
||||||
|
endfunction
|
||||||
|
|
||||||
|
function! s:callback(dict, lines) abort
|
||||||
|
" Since anything can be done in the sink function, there is no telling that
|
||||||
|
" the change of the working directory was made by &autochdir setting.
|
||||||
|
"
|
||||||
|
" We use the following heuristic to determine whether to restore CWD:
|
||||||
|
" - Always restore the current directory when &autochdir is disabled.
|
||||||
|
" FIXME This makes it impossible to change directory from inside the sink
|
||||||
|
" function when &autochdir is not used.
|
||||||
|
" - In case of an error or an interrupt, a:lines will be empty.
|
||||||
|
" And it will be an array of a single empty string when fzf was finished
|
||||||
|
" without a match. In these cases, we presume that the change of the
|
||||||
|
" directory is not expected and should be undone.
|
||||||
|
let popd = has_key(a:dict, 'prev_dir') &&
|
||||||
|
\ (!&autochdir || (empty(a:lines) || len(a:lines) == 1 && empty(a:lines[0])))
|
||||||
|
if popd
|
||||||
|
let w:fzf_prev_dir = a:dict.prev_dir
|
||||||
|
endif
|
||||||
|
|
||||||
try
|
try
|
||||||
if !filereadable(a:temps.result)
|
|
||||||
let lines = []
|
|
||||||
else
|
|
||||||
let lines = readfile(a:temps.result)
|
|
||||||
if has_key(a:dict, 'sink')
|
if has_key(a:dict, 'sink')
|
||||||
for line in lines
|
for line in a:lines
|
||||||
if type(a:dict.sink) == 2
|
if type(a:dict.sink) == 2
|
||||||
call a:dict.sink(line)
|
call a:dict.sink(line)
|
||||||
else
|
else
|
||||||
@@ -326,54 +536,36 @@ try
|
|||||||
endfor
|
endfor
|
||||||
endif
|
endif
|
||||||
if has_key(a:dict, 'sink*')
|
if has_key(a:dict, 'sink*')
|
||||||
call a:dict['sink*'](lines)
|
call a:dict['sink*'](a:lines)
|
||||||
endif
|
endif
|
||||||
endif
|
|
||||||
|
|
||||||
for tf in values(a:temps)
|
|
||||||
silent! call delete(tf)
|
|
||||||
endfor
|
|
||||||
|
|
||||||
return lines
|
|
||||||
catch
|
catch
|
||||||
if stridx(v:exception, ':E325:') < 0
|
if stridx(v:exception, ':E325:') < 0
|
||||||
echoerr v:exception
|
echoerr v:exception
|
||||||
endif
|
endif
|
||||||
endtry
|
endtry
|
||||||
|
|
||||||
|
" We may have opened a new window or tab
|
||||||
|
if popd
|
||||||
|
let w:fzf_prev_dir = a:dict.prev_dir
|
||||||
|
call s:dopopd()
|
||||||
|
endif
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
let s:default_action = {
|
let s:default_action = {
|
||||||
\ 'ctrl-m': 'e',
|
\ 'ctrl-t': 'tab split',
|
||||||
\ 'ctrl-t': 'tabedit',
|
|
||||||
\ 'ctrl-x': 'split',
|
\ 'ctrl-x': 'split',
|
||||||
\ 'ctrl-v': 'vsplit' }
|
\ 'ctrl-v': 'vsplit' }
|
||||||
|
|
||||||
function! s:cmd_callback(lines) abort
|
|
||||||
if empty(a:lines)
|
|
||||||
return
|
|
||||||
endif
|
|
||||||
let key = remove(a:lines, 0)
|
|
||||||
let cmd = get(s:action, key, 'e')
|
|
||||||
for item in a:lines
|
|
||||||
execute cmd s:escape(item)
|
|
||||||
endfor
|
|
||||||
endfunction
|
|
||||||
|
|
||||||
function! s:cmd(bang, ...) abort
|
function! s:cmd(bang, ...) abort
|
||||||
let s:action = get(g:, 'fzf_action', s:default_action)
|
let args = copy(a:000)
|
||||||
let args = extend(['--expect='.join(keys(s:action), ',')], a:000)
|
|
||||||
let opts = {}
|
let opts = {}
|
||||||
if len(args) > 0 && isdirectory(expand(args[-1]))
|
if len(args) && isdirectory(expand(args[-1]))
|
||||||
let opts.dir = remove(args, -1)
|
let opts.dir = substitute(remove(args, -1), '\\\(["'']\)', '\1', 'g')
|
||||||
endif
|
endif
|
||||||
if !a:bang
|
call fzf#run(fzf#wrap('FZF', extend({'options': join(args)}, opts), a:bang))
|
||||||
let opts.down = get(g:, 'fzf_height', get(g:, 'fzf_tmux_height', s:default_height))
|
|
||||||
endif
|
|
||||||
call fzf#run(extend({'options': join(args), 'sink*': function('<sid>cmd_callback')}, opts))
|
|
||||||
endfunction
|
endfunction
|
||||||
|
|
||||||
command! -nargs=* -complete=dir -bang FZF call s:cmd(<bang>0, <f-args>)
|
command! -nargs=* -complete=dir -bang FZF call s:cmd(<bang>0, <f-args>)
|
||||||
|
|
||||||
let &cpo = s:cpo_save
|
let &cpo = s:cpo_save
|
||||||
unlet s:cpo_save
|
unlet s:cpo_save
|
||||||
|
|
||||||
|
@@ -10,9 +10,29 @@
|
|||||||
# - $FZF_COMPLETION_TRIGGER (default: '**')
|
# - $FZF_COMPLETION_TRIGGER (default: '**')
|
||||||
# - $FZF_COMPLETION_OPTS (default: empty)
|
# - $FZF_COMPLETION_OPTS (default: empty)
|
||||||
|
|
||||||
|
# To use custom commands instead of find, override _fzf_compgen_{path,dir}
|
||||||
|
if ! declare -f _fzf_compgen_path > /dev/null; then
|
||||||
|
_fzf_compgen_path() {
|
||||||
|
echo "$1"
|
||||||
|
command find -L "$1" \
|
||||||
|
-name .git -prune -o -name .svn -prune -o \( -type d -o -type f -o -type l \) \
|
||||||
|
-a -not -path "$1" -print 2> /dev/null | sed 's@^\./@@'
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! declare -f _fzf_compgen_dir > /dev/null; then
|
||||||
|
_fzf_compgen_dir() {
|
||||||
|
command find -L "$1" \
|
||||||
|
-name .git -prune -o -name .svn -prune -o -type d \
|
||||||
|
-a -not -path "$1" -print 2> /dev/null | sed 's@^\./@@'
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
###########################################################
|
||||||
|
|
||||||
_fzf_orig_completion_filter() {
|
_fzf_orig_completion_filter() {
|
||||||
sed 's/.*-F *\([^ ]*\).* \([^ ]*\)$/export _fzf_orig_completion_\2=\1;/' |
|
sed 's/^\(.*-F\) *\([^ ]*\).* \([^ ]*\)$/export _fzf_orig_completion_\3="\1 %s \3 #\2";/' |
|
||||||
sed 's/[^a-z0-9_= ;]/_/g'
|
awk -F= '{gsub(/[^a-z0-9_= ;]/, "_", $1); print $1"="$2}'
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_opts_completion() {
|
_fzf_opts_completion() {
|
||||||
@@ -22,7 +42,7 @@ _fzf_opts_completion() {
|
|||||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||||
opts="
|
opts="
|
||||||
-x --extended
|
-x --extended
|
||||||
-e --extended-exact
|
-e --exact
|
||||||
-i +i
|
-i +i
|
||||||
-n --nth
|
-n --nth
|
||||||
-d --delimiter
|
-d --delimiter
|
||||||
@@ -45,21 +65,31 @@ _fzf_opts_completion() {
|
|||||||
--print-query
|
--print-query
|
||||||
--expect
|
--expect
|
||||||
--toggle-sort
|
--toggle-sort
|
||||||
--sync"
|
--sync
|
||||||
|
--cycle
|
||||||
|
--history
|
||||||
|
--history-size
|
||||||
|
--header
|
||||||
|
--header-lines
|
||||||
|
--margin"
|
||||||
|
|
||||||
case "${prev}" in
|
case "${prev}" in
|
||||||
--tiebreak)
|
--tiebreak)
|
||||||
COMPREPLY=( $(compgen -W "length begin end index" -- ${cur}) )
|
COMPREPLY=( $(compgen -W "length begin end index" -- "$cur") )
|
||||||
return 0
|
return 0
|
||||||
;;
|
;;
|
||||||
--color)
|
--color)
|
||||||
COMPREPLY=( $(compgen -W "dark light 16 bw" -- ${cur}) )
|
COMPREPLY=( $(compgen -W "dark light 16 bw" -- "$cur") )
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
--history)
|
||||||
|
COMPREPLY=()
|
||||||
return 0
|
return 0
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
if [[ ${cur} =~ ^-|\+ ]]; then
|
if [[ "$cur" =~ ^-|\+ ]]; then
|
||||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
COMPREPLY=( $(compgen -W "${opts}" -- "$cur") )
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -67,41 +97,43 @@ _fzf_opts_completion() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_fzf_handle_dynamic_completion() {
|
_fzf_handle_dynamic_completion() {
|
||||||
local cmd orig ret
|
local cmd orig_var orig ret orig_cmd
|
||||||
cmd="$1"
|
cmd="$1"
|
||||||
shift
|
shift
|
||||||
|
orig_cmd="$1"
|
||||||
orig=$(eval "echo \$_fzf_orig_completion_$cmd")
|
orig_var="_fzf_orig_completion_$cmd"
|
||||||
|
orig="${!orig_var##*#}"
|
||||||
if [ -n "$orig" ] && type "$orig" > /dev/null 2>&1; then
|
if [ -n "$orig" ] && type "$orig" > /dev/null 2>&1; then
|
||||||
$orig "$@"
|
$orig "$@"
|
||||||
elif [ -n "$_fzf_completion_loader" ]; then
|
elif [ -n "$_fzf_completion_loader" ]; then
|
||||||
_completion_loader "$@"
|
_completion_loader "$@"
|
||||||
ret=$?
|
ret=$?
|
||||||
eval $(complete | \grep "\-F.* $cmd$" | _fzf_orig_completion_filter)
|
eval "$(complete | command grep "\-F.* $orig_cmd$" | _fzf_orig_completion_filter)"
|
||||||
source $BASH_SOURCE
|
source "${BASH_SOURCE[0]}"
|
||||||
return $ret
|
return $ret
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_path_completion() {
|
__fzf_generic_path_completion() {
|
||||||
local cur base dir leftover matches trigger cmd fzf
|
local cur base dir leftover matches trigger cmd fzf
|
||||||
[ ${FZF_TMUX:-1} -eq 1 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
[ "${FZF_TMUX:-1}" != 0 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
||||||
cmd=$(echo ${COMP_WORDS[0]} | sed 's/[^a-z0-9_=]/_/g')
|
cmd=$(echo "${COMP_WORDS[0]}" | sed 's/[^a-z0-9_=]/_/g')
|
||||||
COMPREPLY=()
|
COMPREPLY=()
|
||||||
trigger=${FZF_COMPLETION_TRIGGER-'**'}
|
trigger=${FZF_COMPLETION_TRIGGER-'**'}
|
||||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||||
if [[ ${cur} == *"$trigger" ]]; then
|
if [[ "$cur" == *"$trigger" ]]; then
|
||||||
base=${cur:0:${#cur}-${#trigger}}
|
base=${cur:0:${#cur}-${#trigger}}
|
||||||
eval base=$base
|
eval "base=$base"
|
||||||
|
|
||||||
dir="$base"
|
dir="$base"
|
||||||
while [ 1 ]; do
|
while true; do
|
||||||
if [ -z "$dir" -o -d "$dir" ]; then
|
if [ -z "$dir" ] || [ -d "$dir" ]; then
|
||||||
leftover=${base/#"$dir"}
|
leftover=${base/#"$dir"}
|
||||||
leftover=${leftover/#\/}
|
leftover=${leftover/#\/}
|
||||||
[ "$dir" = './' ] && dir=''
|
[ -z "$dir" ] && dir='.'
|
||||||
|
[ "$dir" != "/" ] && dir="${dir/%\//}"
|
||||||
tput sc
|
tput sc
|
||||||
matches=$(find -L "$dir"* $1 2> /dev/null | $fzf $FZF_COMPLETION_OPTS $2 -q "$leftover" | while read item; do
|
matches=$(eval "$1 $(printf %q "$dir")" | $fzf $FZF_COMPLETION_OPTS $2 -q "$leftover" | while read -r item; do
|
||||||
printf "%q$3 " "$item"
|
printf "%q$3 " "$item"
|
||||||
done)
|
done)
|
||||||
matches=${matches% }
|
matches=${matches% }
|
||||||
@@ -124,19 +156,21 @@ _fzf_path_completion() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_list_completion() {
|
_fzf_complete() {
|
||||||
local cur selected trigger cmd src fzf
|
local cur selected trigger cmd fzf post
|
||||||
[ ${FZF_TMUX:-1} -eq 1 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
post="$(caller 0 | awk '{print $2}')_post"
|
||||||
read -r src
|
type -t "$post" > /dev/null 2>&1 || post=cat
|
||||||
cmd=$(echo ${COMP_WORDS[0]} | sed 's/[^a-z0-9_=]/_/g')
|
[ "${FZF_TMUX:-1}" != 0 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
||||||
|
|
||||||
|
cmd=$(echo "${COMP_WORDS[0]}" | sed 's/[^a-z0-9_=]/_/g')
|
||||||
trigger=${FZF_COMPLETION_TRIGGER-'**'}
|
trigger=${FZF_COMPLETION_TRIGGER-'**'}
|
||||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||||
if [[ ${cur} == *"$trigger" ]]; then
|
if [[ "$cur" == *"$trigger" ]]; then
|
||||||
cur=${cur:0:${#cur}-${#trigger}}
|
cur=${cur:0:${#cur}-${#trigger}}
|
||||||
|
|
||||||
tput sc
|
tput sc
|
||||||
selected=$(eval "$src | $fzf $FZF_COMPLETION_OPTS $1 -q '$cur'" | tr '\n' ' ')
|
selected=$(cat | $fzf $FZF_COMPLETION_OPTS $1 -q "$cur" | $post | tr '\n' ' ')
|
||||||
selected=${selected% }
|
selected=${selected% } # Strip trailing space not to repeat "-o nospace"
|
||||||
tput rc
|
tput rc
|
||||||
|
|
||||||
if [ -n "$selected" ]; then
|
if [ -n "$selected" ]; then
|
||||||
@@ -149,29 +183,24 @@ _fzf_list_completion() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_all_completion() {
|
_fzf_path_completion() {
|
||||||
_fzf_path_completion \
|
__fzf_generic_path_completion _fzf_compgen_path "-m" "" "$@"
|
||||||
"-name .git -prune -o -name .svn -prune -o -type d -print -o -type f -print -o -type l -print" \
|
|
||||||
"-m" "" "$@"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Deprecated. No file only completion.
|
||||||
_fzf_file_completion() {
|
_fzf_file_completion() {
|
||||||
_fzf_path_completion \
|
_fzf_path_completion "$@"
|
||||||
"-name .git -prune -o -name .svn -prune -o -type f -print -o -type l -print" \
|
|
||||||
"-m" "" "$@"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_dir_completion() {
|
_fzf_dir_completion() {
|
||||||
_fzf_path_completion \
|
__fzf_generic_path_completion _fzf_compgen_dir "" "/" "$@"
|
||||||
"-name .git -prune -o -name .svn -prune -o -type d -print" \
|
|
||||||
"" "/" "$@"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_kill_completion() {
|
_fzf_complete_kill() {
|
||||||
[ -n "${COMP_WORDS[COMP_CWORD]}" ] && return 1
|
[ -n "${COMP_WORDS[COMP_CWORD]}" ] && return 1
|
||||||
|
|
||||||
local selected fzf
|
local selected fzf
|
||||||
[ ${FZF_TMUX:-1} -eq 1 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
[ "${FZF_TMUX:-1}" != 0 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
||||||
tput sc
|
tput sc
|
||||||
selected=$(ps -ef | sed 1d | $fzf -m $FZF_COMPLETION_OPTS | awk '{print $2}' | tr '\n' ' ')
|
selected=$(ps -ef | sed 1d | $fzf -m $FZF_COMPLETION_OPTS | awk '{print $2}' | tr '\n' ' ')
|
||||||
tput rc
|
tput rc
|
||||||
@@ -182,40 +211,49 @@ _fzf_kill_completion() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_telnet_completion() {
|
_fzf_complete_telnet() {
|
||||||
_fzf_list_completion '+m' "$@" << "EOF"
|
_fzf_complete '+m' "$@" < <(
|
||||||
\grep -v '^\s*\(#\|$\)' /etc/hosts | \grep -Fv '0.0.0.0' | awk '{if (length($2) > 0) {print $2}}' | sort -u
|
command grep -v '^\s*\(#\|$\)' /etc/hosts | command grep -Fv '0.0.0.0' |
|
||||||
EOF
|
awk '{if (length($2) > 0) {print $2}}' | sort -u
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_ssh_completion() {
|
_fzf_complete_ssh() {
|
||||||
_fzf_list_completion '+m' "$@" << "EOF"
|
_fzf_complete '+m' "$@" < <(
|
||||||
cat <(cat ~/.ssh/config /etc/ssh/ssh_config 2> /dev/null | \grep -i '^host' | \grep -v '*') <(\grep -v '^\s*\(#\|$\)' /etc/hosts | \grep -Fv '0.0.0.0') | awk '{if (length($2) > 0) {print $2}}' | sort -u
|
cat <(cat ~/.ssh/config /etc/ssh/ssh_config 2> /dev/null | command grep -i '^host' | command grep -v '*') \
|
||||||
EOF
|
<(command grep -oE '^[^ ]+' ~/.ssh/known_hosts | tr ',' '\n' | awk '{ print $1 " " $1 }') \
|
||||||
|
<(command grep -v '^\s*\(#\|$\)' /etc/hosts | command grep -Fv '0.0.0.0') |
|
||||||
|
awk '{if (length($2) > 0) {print $2}}' | sort -u
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_env_var_completion() {
|
_fzf_complete_unset() {
|
||||||
_fzf_list_completion '-m' "$@" << "EOF"
|
_fzf_complete '-m' "$@" < <(
|
||||||
declare -xp | sed 's/=.*//' | sed 's/.* //'
|
declare -xp | sed 's/=.*//' | sed 's/.* //'
|
||||||
EOF
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_alias_completion() {
|
_fzf_complete_export() {
|
||||||
_fzf_list_completion '-m' "$@" << "EOF"
|
_fzf_complete '-m' "$@" < <(
|
||||||
|
declare -xp | sed 's/=.*//' | sed 's/.* //'
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
_fzf_complete_unalias() {
|
||||||
|
_fzf_complete '-m' "$@" < <(
|
||||||
alias | sed 's/=.*//' | sed 's/.* //'
|
alias | sed 's/=.*//' | sed 's/.* //'
|
||||||
EOF
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
# fzf options
|
# fzf options
|
||||||
complete -F _fzf_opts_completion fzf
|
complete -o default -F _fzf_opts_completion fzf
|
||||||
|
|
||||||
d_cmds="cd pushd rmdir"
|
d_cmds="${FZF_COMPLETION_DIR_COMMANDS:-cd pushd rmdir}"
|
||||||
f_cmds="
|
|
||||||
awk cat diff diff3
|
|
||||||
emacs ex file ftp g++ gcc gvim head hg java
|
|
||||||
javac ld less more mvim patch perl python ruby
|
|
||||||
sed sftp sort source tail tee uniq vi view vim wc"
|
|
||||||
a_cmds="
|
a_cmds="
|
||||||
|
awk cat diff diff3
|
||||||
|
emacs emacsclient ex file ftp g++ gcc gvim head hg java
|
||||||
|
javac ld less more mvim nvim patch perl python ruby
|
||||||
|
sed sftp sort source tail tee uniq vi view vim wc xdg-open
|
||||||
basename bunzip2 bzip2 chmod chown curl cp dirname du
|
basename bunzip2 bzip2 chmod chown curl cp dirname du
|
||||||
find git grep gunzip gzip hg jar
|
find git grep gunzip gzip hg jar
|
||||||
ln ls mv open rm rsync scp
|
ln ls mv open rm rsync scp
|
||||||
@@ -223,42 +261,54 @@ a_cmds="
|
|||||||
x_cmds="kill ssh telnet unset unalias export"
|
x_cmds="kill ssh telnet unset unalias export"
|
||||||
|
|
||||||
# Preserve existing completion
|
# Preserve existing completion
|
||||||
if [ "$_fzf_completion_loaded" != '0.9.12' ]; then
|
if [ "$_fzf_completion_loaded" != '0.11.3' ]; then
|
||||||
# Really wish I could use associative array but OSX comes with bash 3.2 :(
|
# Really wish I could use associative array but OSX comes with bash 3.2 :(
|
||||||
eval $(complete | \grep '\-F' | \grep -v _fzf_ |
|
eval $(complete | command grep '\-F' | command grep -v _fzf_ |
|
||||||
\grep -E " ($(echo $d_cmds $f_cmds $a_cmds $x_cmds | sed 's/ /|/g' | sed 's/+/\\+/g'))$" | _fzf_orig_completion_filter)
|
command grep -E " ($(echo $d_cmds $a_cmds $x_cmds | sed 's/ /|/g' | sed 's/+/\\+/g'))$" | _fzf_orig_completion_filter)
|
||||||
export _fzf_completion_loaded=0.9.12
|
export _fzf_completion_loaded=0.11.3
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if type _completion_loader > /dev/null 2>&1; then
|
if type _completion_loader > /dev/null 2>&1; then
|
||||||
_fzf_completion_loader=1
|
_fzf_completion_loader=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Directory
|
_fzf_defc() {
|
||||||
for cmd in $d_cmds; do
|
local cmd func opts orig_var orig def
|
||||||
complete -F _fzf_dir_completion -o nospace -o plusdirs $cmd
|
cmd="$1"
|
||||||
done
|
func="$2"
|
||||||
|
opts="$3"
|
||||||
# File
|
orig_var="_fzf_orig_completion_$cmd"
|
||||||
for cmd in $f_cmds; do
|
orig="${!orig_var}"
|
||||||
complete -F _fzf_file_completion -o default -o bashdefault $cmd
|
if [ -n "$orig" ]; then
|
||||||
done
|
printf -v def "$orig" "$func"
|
||||||
|
eval "$def"
|
||||||
|
else
|
||||||
|
complete -F "$func" $opts "$cmd"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# Anything
|
# Anything
|
||||||
for cmd in $a_cmds; do
|
for cmd in $a_cmds; do
|
||||||
complete -F _fzf_all_completion -o default -o bashdefault $cmd
|
_fzf_defc "$cmd" _fzf_path_completion "-o default -o bashdefault"
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# Directory
|
||||||
|
for cmd in $d_cmds; do
|
||||||
|
_fzf_defc "$cmd" _fzf_dir_completion "-o nospace -o plusdirs"
|
||||||
|
done
|
||||||
|
|
||||||
|
unset _fzf_defc
|
||||||
|
|
||||||
# Kill completion
|
# Kill completion
|
||||||
complete -F _fzf_kill_completion -o nospace -o default -o bashdefault kill
|
complete -F _fzf_complete_kill -o nospace -o default -o bashdefault kill
|
||||||
|
|
||||||
# Host completion
|
# Host completion
|
||||||
complete -F _fzf_ssh_completion -o default -o bashdefault ssh
|
complete -F _fzf_complete_ssh -o default -o bashdefault ssh
|
||||||
complete -F _fzf_telnet_completion -o default -o bashdefault telnet
|
complete -F _fzf_complete_telnet -o default -o bashdefault telnet
|
||||||
|
|
||||||
# Environment variables / Aliases
|
# Environment variables / Aliases
|
||||||
complete -F _fzf_env_var_completion -o default -o bashdefault unset
|
complete -F _fzf_complete_unset -o default -o bashdefault unset
|
||||||
complete -F _fzf_env_var_completion -o default -o bashdefault export
|
complete -F _fzf_complete_export -o default -o bashdefault export
|
||||||
complete -F _fzf_alias_completion -o default -o bashdefault unalias
|
complete -F _fzf_complete_unalias -o default -o bashdefault unalias
|
||||||
|
|
||||||
unset cmd d_cmds f_cmds a_cmds x_cmds
|
unset cmd d_cmds a_cmds x_cmds
|
||||||
|
@@ -10,102 +10,141 @@
|
|||||||
# - $FZF_COMPLETION_TRIGGER (default: '**')
|
# - $FZF_COMPLETION_TRIGGER (default: '**')
|
||||||
# - $FZF_COMPLETION_OPTS (default: empty)
|
# - $FZF_COMPLETION_OPTS (default: empty)
|
||||||
|
|
||||||
_fzf_path_completion() {
|
# To use custom commands instead of find, override _fzf_compgen_{path,dir}
|
||||||
local base lbuf find_opts fzf_opts suffix tail fzf dir leftover matches nnm
|
if ! declare -f _fzf_compgen_path > /dev/null; then
|
||||||
|
_fzf_compgen_path() {
|
||||||
|
echo "$1"
|
||||||
|
command find -L "$1" \
|
||||||
|
-name .git -prune -o -name .svn -prune -o \( -type d -o -type f -o -type l \) \
|
||||||
|
-a -not -path "$1" -print 2> /dev/null | sed 's@^\./@@'
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! declare -f _fzf_compgen_dir > /dev/null; then
|
||||||
|
_fzf_compgen_dir() {
|
||||||
|
command find -L "$1" \
|
||||||
|
-name .git -prune -o -name .svn -prune -o -type d \
|
||||||
|
-a -not -path "$1" -print 2> /dev/null | sed 's@^\./@@'
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
###########################################################
|
||||||
|
|
||||||
|
__fzf_generic_path_completion() {
|
||||||
|
local base lbuf compgen fzf_opts suffix tail fzf dir leftover matches
|
||||||
|
# (Q) flag removes a quoting level: "foo\ bar" => "foo bar"
|
||||||
base=${(Q)1}
|
base=${(Q)1}
|
||||||
lbuf=$2
|
lbuf=$2
|
||||||
find_opts=$3
|
compgen=$3
|
||||||
fzf_opts=$4
|
fzf_opts=$4
|
||||||
suffix=$5
|
suffix=$5
|
||||||
tail=$6
|
tail=$6
|
||||||
[ ${FZF_TMUX:-1} -eq 1 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
[ ${FZF_TMUX:-1} -eq 1 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
||||||
|
|
||||||
if ! setopt | grep nonomatch > /dev/null; then
|
setopt localoptions nonomatch
|
||||||
nnm=1
|
|
||||||
setopt nonomatch
|
|
||||||
fi
|
|
||||||
dir="$base"
|
dir="$base"
|
||||||
while [ 1 ]; do
|
while [ 1 ]; do
|
||||||
if [ -z "$dir" -o -d ${~dir} ]; then
|
if [ -z "$dir" -o -d ${~dir} ]; then
|
||||||
leftover=${base/#"$dir"}
|
leftover=${base/#"$dir"}
|
||||||
leftover=${leftover/#\/}
|
leftover=${leftover/#\/}
|
||||||
[ "$dir" = './' ] && dir=''
|
[ -z "$dir" ] && dir='.'
|
||||||
|
[ "$dir" != "/" ] && dir="${dir/%\//}"
|
||||||
dir=${~dir}
|
dir=${~dir}
|
||||||
matches=$(\find -L $dir* ${=find_opts} 2> /dev/null | ${=fzf} ${=FZF_COMPLETION_OPTS} ${=fzf_opts} -q "$leftover" | while read item; do
|
matches=$(eval "$compgen $(printf %q "$dir")" | ${=fzf} ${=FZF_COMPLETION_OPTS} ${=fzf_opts} -q "$leftover" | while read item; do
|
||||||
printf "%q$suffix " "$item"
|
echo -n "${(q)item}$suffix "
|
||||||
done)
|
done)
|
||||||
matches=${matches% }
|
matches=${matches% }
|
||||||
if [ -n "$matches" ]; then
|
if [ -n "$matches" ]; then
|
||||||
LBUFFER="$lbuf$matches$tail"
|
LBUFFER="$lbuf$matches$tail"
|
||||||
fi
|
fi
|
||||||
zle redisplay
|
zle redisplay
|
||||||
|
typeset -f zle-line-init >/dev/null && zle zle-line-init
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
dir=$(dirname "$dir")
|
dir=$(dirname "$dir")
|
||||||
dir=${dir%/}/
|
dir=${dir%/}/
|
||||||
done
|
done
|
||||||
[ -n "$nnm" ] && unsetopt nonomatch
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_all_completion() {
|
_fzf_path_completion() {
|
||||||
_fzf_path_completion "$1" "$2" \
|
__fzf_generic_path_completion "$1" "$2" _fzf_compgen_path \
|
||||||
"-name .git -prune -o -name .svn -prune -o -type d -print -o -type f -print -o -type l -print" \
|
|
||||||
"-m" "" " "
|
"-m" "" " "
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_dir_completion() {
|
_fzf_dir_completion() {
|
||||||
_fzf_path_completion "$1" "$2" \
|
__fzf_generic_path_completion "$1" "$2" _fzf_compgen_dir \
|
||||||
"-name .git -prune -o -name .svn -prune -o -type d -print" \
|
|
||||||
"" "/" ""
|
"" "/" ""
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_list_completion() {
|
_fzf_feed_fifo() (
|
||||||
local prefix lbuf fzf_opts src fzf matches
|
command rm -f "$1"
|
||||||
prefix=$1
|
mkfifo "$1"
|
||||||
|
cat <&0 > "$1" &
|
||||||
|
)
|
||||||
|
|
||||||
|
_fzf_complete() {
|
||||||
|
local fifo fzf_opts lbuf fzf matches post
|
||||||
|
fifo="${TMPDIR:-/tmp}/fzf-complete-fifo-$$"
|
||||||
|
fzf_opts=$1
|
||||||
lbuf=$2
|
lbuf=$2
|
||||||
fzf_opts=$3
|
post="${funcstack[2]}_post"
|
||||||
read -r src
|
type $post > /dev/null 2>&1 || post=cat
|
||||||
|
|
||||||
[ ${FZF_TMUX:-1} -eq 1 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
[ ${FZF_TMUX:-1} -eq 1 ] && fzf="fzf-tmux -d ${FZF_TMUX_HEIGHT:-40%}" || fzf="fzf"
|
||||||
|
|
||||||
matches=$(eval "$src" | ${=fzf} ${=FZF_COMPLETION_OPTS} ${=fzf_opts} -q "$prefix")
|
_fzf_feed_fifo "$fifo"
|
||||||
|
matches=$(cat "$fifo" | ${=fzf} ${=FZF_COMPLETION_OPTS} ${=fzf_opts} -q "${(Q)prefix}" | $post | tr '\n' ' ')
|
||||||
if [ -n "$matches" ]; then
|
if [ -n "$matches" ]; then
|
||||||
LBUFFER="$lbuf$matches"
|
LBUFFER="$lbuf$matches"
|
||||||
fi
|
fi
|
||||||
zle redisplay
|
zle redisplay
|
||||||
|
typeset -f zle-line-init >/dev/null && zle zle-line-init
|
||||||
|
command rm -f "$fifo"
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_telnet_completion() {
|
_fzf_complete_telnet() {
|
||||||
_fzf_list_completion "$1" "$2" '+m' << "EOF"
|
_fzf_complete '+m' "$@" < <(
|
||||||
\grep -v '^\s*\(#\|$\)' /etc/hosts | \grep -Fv '0.0.0.0' | awk '{if (length($2) > 0) {print $2}}' | sort -u
|
command grep -v '^\s*\(#\|$\)' /etc/hosts | command grep -Fv '0.0.0.0' |
|
||||||
EOF
|
awk '{if (length($2) > 0) {print $2}}' | sort -u
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_ssh_completion() {
|
_fzf_complete_ssh() {
|
||||||
_fzf_list_completion "$1" "$2" '+m' << "EOF"
|
_fzf_complete '+m' "$@" < <(
|
||||||
cat <(cat ~/.ssh/config /etc/ssh/ssh_config 2> /dev/null | \grep -i '^host' | \grep -v '*') <(\grep -v '^\s*\(#\|$\)' /etc/hosts | \grep -Fv '0.0.0.0') | awk '{if (length($2) > 0) {print $2}}' | sort -u
|
cat <(cat ~/.ssh/config /etc/ssh/ssh_config 2> /dev/null | command grep -i '^host' | command grep -v '*') \
|
||||||
EOF
|
<(command grep -oE '^[^ ]+' ~/.ssh/known_hosts | tr ',' '\n' | awk '{ print $1 " " $1 }') \
|
||||||
|
<(command grep -v '^\s*\(#\|$\)' /etc/hosts | command grep -Fv '0.0.0.0') |
|
||||||
|
awk '{if (length($2) > 0) {print $2}}' | sort -u
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_env_var_completion() {
|
_fzf_complete_export() {
|
||||||
_fzf_list_completion "$1" "$2" '+m' << "EOF"
|
_fzf_complete '-m' "$@" < <(
|
||||||
declare -xp | sed 's/=.*//' | sed 's/.* //'
|
declare -xp | sed 's/=.*//' | sed 's/.* //'
|
||||||
EOF
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
_fzf_alias_completion() {
|
_fzf_complete_unset() {
|
||||||
_fzf_list_completion "$1" "$2" '+m' << "EOF"
|
_fzf_complete '-m' "$@" < <(
|
||||||
|
declare -xp | sed 's/=.*//' | sed 's/.* //'
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
_fzf_complete_unalias() {
|
||||||
|
_fzf_complete '+m' "$@" < <(
|
||||||
alias | sed 's/=.*//'
|
alias | sed 's/=.*//'
|
||||||
EOF
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fzf-completion() {
|
fzf-completion() {
|
||||||
local tokens cmd prefix trigger tail fzf matches lbuf d_cmds
|
local tokens cmd prefix trigger tail fzf matches lbuf d_cmds
|
||||||
|
setopt localoptions noshwordsplit noksh_arrays
|
||||||
|
|
||||||
# http://zsh.sourceforge.net/FAQ/zshfaq03.html
|
# http://zsh.sourceforge.net/FAQ/zshfaq03.html
|
||||||
# http://zsh.sourceforge.net/Doc/Release/Expansion.html#Parameter-Expansion-Flags
|
# http://zsh.sourceforge.net/Doc/Release/Expansion.html#Parameter-Expansion-Flags
|
||||||
tokens=(${(z)LBUFFER})
|
tokens=(${(z)LBUFFER})
|
||||||
if [ ${#tokens} -lt 1 ]; then
|
if [ ${#tokens} -lt 1 ]; then
|
||||||
eval "zle ${fzf_default_completion:-expand-or-complete}"
|
zle ${fzf_default_completion:-expand-or-complete}
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -124,35 +163,32 @@ fzf-completion() {
|
|||||||
LBUFFER="$LBUFFER$matches"
|
LBUFFER="$LBUFFER$matches"
|
||||||
fi
|
fi
|
||||||
zle redisplay
|
zle redisplay
|
||||||
|
typeset -f zle-line-init >/dev/null && zle zle-line-init
|
||||||
# Trigger sequence given
|
# Trigger sequence given
|
||||||
elif [ ${#tokens} -gt 1 -a "$tail" = "$trigger" ]; then
|
elif [ ${#tokens} -gt 1 -a "$tail" = "$trigger" ]; then
|
||||||
d_cmds=(cd pushd rmdir)
|
d_cmds=(${=FZF_COMPLETION_DIR_COMMANDS:-cd pushd rmdir})
|
||||||
|
|
||||||
[ -z "$trigger" ] && prefix=${tokens[-1]} || prefix=${tokens[-1]:0:-${#trigger}}
|
[ -z "$trigger" ] && prefix=${tokens[-1]} || prefix=${tokens[-1]:0:-${#trigger}}
|
||||||
[ -z "${tokens[-1]}" ] && lbuf=$LBUFFER || lbuf=${LBUFFER:0:-${#tokens[-1]}}
|
[ -z "${tokens[-1]}" ] && lbuf=$LBUFFER || lbuf=${LBUFFER:0:-${#tokens[-1]}}
|
||||||
|
|
||||||
if [ ${d_cmds[(i)$cmd]} -le ${#d_cmds} ]; then
|
if eval "type _fzf_complete_${cmd} > /dev/null"; then
|
||||||
_fzf_dir_completion "$prefix" $lbuf
|
eval "prefix=\"$prefix\" _fzf_complete_${cmd} \"$lbuf\""
|
||||||
elif [ $cmd = telnet ]; then
|
elif [ ${d_cmds[(i)$cmd]} -le ${#d_cmds} ]; then
|
||||||
_fzf_telnet_completion "$prefix" $lbuf
|
_fzf_dir_completion "$prefix" "$lbuf"
|
||||||
elif [ $cmd = ssh ]; then
|
|
||||||
_fzf_ssh_completion "$prefix" $lbuf
|
|
||||||
elif [ $cmd = unset -o $cmd = export ]; then
|
|
||||||
_fzf_env_var_completion "$prefix" $lbuf
|
|
||||||
elif [ $cmd = unalias ]; then
|
|
||||||
_fzf_alias_completion "$prefix" $lbuf
|
|
||||||
else
|
else
|
||||||
_fzf_all_completion "$prefix" $lbuf
|
_fzf_path_completion "$prefix" "$lbuf"
|
||||||
fi
|
fi
|
||||||
# Fall back to default completion
|
# Fall back to default completion
|
||||||
else
|
else
|
||||||
eval "zle ${fzf_default_completion:-expand-or-complete}"
|
zle ${fzf_default_completion:-expand-or-complete}
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
[ -z "$fzf_default_completion" ] &&
|
[ -z "$fzf_default_completion" ] && {
|
||||||
fzf_default_completion=$(bindkey '^I' | grep -v undefined-key | awk '{print $2}')
|
binding=$(bindkey '^I')
|
||||||
|
[[ $binding =~ 'undefined-key' ]] || fzf_default_completion=$binding[(s: :w)2]
|
||||||
|
unset binding
|
||||||
|
}
|
||||||
|
|
||||||
zle -N fzf-completion
|
zle -N fzf-completion
|
||||||
bindkey '^I' fzf-completion
|
bindkey '^I' fzf-completion
|
||||||
|
|
||||||
|
@@ -1,10 +1,11 @@
|
|||||||
# Key bindings
|
# Key bindings
|
||||||
# ------------
|
# ------------
|
||||||
__fzf_select__() {
|
__fzf_select__() {
|
||||||
command find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
|
local cmd="${FZF_CTRL_T_COMMAND:-"command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
||||||
-o -type f -print \
|
-o -type f -print \
|
||||||
-o -type d -print \
|
-o -type d -print \
|
||||||
-o -type l -print 2> /dev/null | sed 1d | cut -b3- | fzf -m | while read item; do
|
-o -type l -print 2> /dev/null | sed 1d | cut -b3-"}"
|
||||||
|
eval "$cmd | fzf -m $FZF_CTRL_T_OPTS" | while read -r item; do
|
||||||
printf '%q ' "$item"
|
printf '%q ' "$item"
|
||||||
done
|
done
|
||||||
echo
|
echo
|
||||||
@@ -13,7 +14,7 @@ __fzf_select__() {
|
|||||||
if [[ $- =~ i ]]; then
|
if [[ $- =~ i ]]; then
|
||||||
|
|
||||||
__fzfcmd() {
|
__fzfcmd() {
|
||||||
[ ${FZF_TMUX:-1} -eq 1 ] && echo "fzf-tmux -d${FZF_TMUX_HEIGHT:-40%}" || echo "fzf"
|
[ "${FZF_TMUX:-1}" != 0 ] && echo "fzf-tmux -d${FZF_TMUX_HEIGHT:-40%}" || echo "fzf"
|
||||||
}
|
}
|
||||||
|
|
||||||
__fzf_select_tmux__() {
|
__fzf_select_tmux__() {
|
||||||
@@ -24,66 +25,101 @@ __fzf_select_tmux__() {
|
|||||||
else
|
else
|
||||||
height="-l $height"
|
height="-l $height"
|
||||||
fi
|
fi
|
||||||
tmux split-window $height "cd $(printf %q "$PWD");bash -c 'source ~/.fzf.bash; tmux send-keys -t $TMUX_PANE \"\$(__fzf_select__)\"'"
|
|
||||||
|
tmux split-window $height "cd $(printf %q "$PWD"); FZF_DEFAULT_OPTS=$(printf %q "$FZF_DEFAULT_OPTS") PATH=$(printf %q "$PATH") FZF_CTRL_T_COMMAND=$(printf %q "$FZF_CTRL_T_COMMAND") FZF_CTRL_T_OPTS=$(printf %q "$FZF_CTRL_T_OPTS") bash -c 'source \"${BASH_SOURCE[0]}\"; RESULT=\"\$(__fzf_select__)\"; tmux setb -b fzf \"\$RESULT\" \\; pasteb -b fzf -t $TMUX_PANE \\; deleteb -b fzf || tmux send-keys -t $TMUX_PANE \"\$RESULT\"'"
|
||||||
|
}
|
||||||
|
|
||||||
|
fzf-file-widget() {
|
||||||
|
if __fzf_use_tmux__; then
|
||||||
|
__fzf_select_tmux__
|
||||||
|
else
|
||||||
|
local selected="$(__fzf_select__)"
|
||||||
|
READLINE_LINE="${READLINE_LINE:0:$READLINE_POINT}$selected${READLINE_LINE:$READLINE_POINT}"
|
||||||
|
READLINE_POINT=$(( READLINE_POINT + ${#selected} ))
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
__fzf_cd__() {
|
__fzf_cd__() {
|
||||||
local dir
|
local cmd dir
|
||||||
dir=$(command find -L ${1:-.} \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
|
cmd="${FZF_ALT_C_COMMAND:-"command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
||||||
-o -type d -print 2> /dev/null | sed 1d | cut -b3- | $(__fzfcmd) +m) && printf 'cd %q' "$dir"
|
-o -type d -print 2> /dev/null | sed 1d | cut -b3-"}"
|
||||||
|
dir=$(eval "$cmd | $(__fzfcmd) +m $FZF_ALT_C_OPTS") && printf 'cd %q' "$dir"
|
||||||
}
|
}
|
||||||
|
|
||||||
__fzf_history__() {
|
__fzf_history__() (
|
||||||
local line
|
local line
|
||||||
|
shopt -u nocaseglob nocasematch
|
||||||
line=$(
|
line=$(
|
||||||
HISTTIMEFORMAT= history |
|
HISTTIMEFORMAT= history |
|
||||||
$(__fzfcmd) +s --tac +m -n2..,.. --tiebreak=index --toggle-sort=ctrl-r |
|
eval "$(__fzfcmd) +s --tac +m -n2..,.. --tiebreak=index --toggle-sort=ctrl-r $FZF_CTRL_R_OPTS" |
|
||||||
\grep '^ *[0-9]') && sed 's/ *\([0-9]*\)\** .*/!\1/' <<< "$line"
|
command grep '^ *[0-9]') &&
|
||||||
|
if [[ $- =~ H ]]; then
|
||||||
|
sed 's/^ *\([0-9]*\)\** .*/!\1/' <<< "$line"
|
||||||
|
else
|
||||||
|
sed 's/^ *\([0-9]*\)\** *//' <<< "$line"
|
||||||
|
fi
|
||||||
|
)
|
||||||
|
|
||||||
|
__fzf_use_tmux__() {
|
||||||
|
[ -n "$TMUX_PANE" ] && [ "${FZF_TMUX:-1}" != 0 ] && [ ${LINES:-40} -gt 15 ]
|
||||||
}
|
}
|
||||||
|
|
||||||
__use_tmux=0
|
[ $BASH_VERSINFO -gt 3 ] && __use_bind_x=1 || __use_bind_x=0
|
||||||
[ -n "$TMUX_PANE" -a ${FZF_TMUX:-1} -ne 0 -a ${LINES:-40} -gt 15 ] && __use_tmux=1
|
__fzf_use_tmux__ && __use_tmux=1 || __use_tmux=0
|
||||||
|
|
||||||
if [ -z "$(set -o | \grep '^vi.*on')" ]; then
|
if [[ ! -o vi ]]; then
|
||||||
# Required to refresh the prompt after fzf
|
# Required to refresh the prompt after fzf
|
||||||
bind '"\er": redraw-current-line'
|
bind '"\er": redraw-current-line'
|
||||||
bind '"\e^": history-expand-line'
|
bind '"\e^": history-expand-line'
|
||||||
|
|
||||||
# CTRL-T - Paste the selected file path into the command line
|
# CTRL-T - Paste the selected file path into the command line
|
||||||
if [ $__use_tmux -eq 1 ]; then
|
if [ $__use_bind_x -eq 1 ]; then
|
||||||
|
bind -x '"\C-t": "fzf-file-widget"'
|
||||||
|
elif [ $__use_tmux -eq 1 ]; then
|
||||||
bind '"\C-t": " \C-u \C-a\C-k$(__fzf_select_tmux__)\e\C-e\C-y\C-a\C-d\C-y\ey\C-h"'
|
bind '"\C-t": " \C-u \C-a\C-k$(__fzf_select_tmux__)\e\C-e\C-y\C-a\C-d\C-y\ey\C-h"'
|
||||||
else
|
else
|
||||||
bind '"\C-t": " \C-u \C-a\C-k$(__fzf_select__)\e\C-e\C-y\C-a\C-y\ey\C-h\C-e\er \C-h"'
|
bind '"\C-t": " \C-u \C-a\C-k$(__fzf_select__)\e\C-e\C-y\C-a\C-y\ey\C-h\C-e\er \C-h"'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# CTRL-R - Paste the selected command from history into the command line
|
# CTRL-R - Paste the selected command from history into the command line
|
||||||
bind '"\C-r": " \C-e\C-u$(__fzf_history__)\e\C-e\e^\er"'
|
bind '"\C-r": " \C-e\C-u`__fzf_history__`\e\C-e\e^\er"'
|
||||||
|
|
||||||
# ALT-C - cd into the selected directory
|
# ALT-C - cd into the selected directory
|
||||||
bind '"\ec": " \C-e\C-u$(__fzf_cd__)\e\C-e\er\C-m"'
|
bind '"\ec": " \C-e\C-u`__fzf_cd__`\e\C-e\er\C-m"'
|
||||||
else
|
else
|
||||||
|
# We'd usually use "\e" to enter vi-movement-mode so we can do our magic,
|
||||||
|
# but this incurs a very noticeable delay of a half second or so,
|
||||||
|
# because many other commands start with "\e".
|
||||||
|
# Instead, we bind an unused key, "\C-x\C-a",
|
||||||
|
# to also enter vi-movement-mode,
|
||||||
|
# and then use that thereafter.
|
||||||
|
# (We imagine that "\C-x\C-a" is relatively unlikely to be in use.)
|
||||||
|
bind '"\C-x\C-a": vi-movement-mode'
|
||||||
|
|
||||||
bind '"\C-x\C-e": shell-expand-line'
|
bind '"\C-x\C-e": shell-expand-line'
|
||||||
bind '"\C-x\C-r": redraw-current-line'
|
bind '"\C-x\C-r": redraw-current-line'
|
||||||
bind '"\C-x^": history-expand-line'
|
bind '"\C-x^": history-expand-line'
|
||||||
|
|
||||||
# CTRL-T - Paste the selected file path into the command line
|
# CTRL-T - Paste the selected file path into the command line
|
||||||
# - FIXME: Selected items are attached to the end regardless of cursor position
|
# - FIXME: Selected items are attached to the end regardless of cursor position
|
||||||
if [ $__use_tmux -eq 1 ]; then
|
if [ $__use_bind_x -eq 1 ]; then
|
||||||
bind '"\C-t": "\e$a \eddi$(__fzf_select_tmux__)\C-x\C-e\e0P$xa"'
|
bind -x '"\C-t": "fzf-file-widget"'
|
||||||
|
elif [ $__use_tmux -eq 1 ]; then
|
||||||
|
bind '"\C-t": "\C-x\C-a$a \C-x\C-addi$(__fzf_select_tmux__)\C-x\C-e\C-x\C-a0P$xa"'
|
||||||
else
|
else
|
||||||
bind '"\C-t": "\e$a \eddi$(__fzf_select__)\C-x\C-e\e0Px$a \C-x\C-r\exa "'
|
bind '"\C-t": "\C-x\C-a$a \C-x\C-addi$(__fzf_select__)\C-x\C-e\C-x\C-a0Px$a \C-x\C-r\C-x\C-axa "'
|
||||||
fi
|
fi
|
||||||
bind -m vi-command '"\C-t": "i\C-t"'
|
bind -m vi-command '"\C-t": "i\C-t"'
|
||||||
|
|
||||||
# CTRL-R - Paste the selected command from history into the command line
|
# CTRL-R - Paste the selected command from history into the command line
|
||||||
bind '"\C-r": "\eddi$(__fzf_history__)\C-x\C-e\C-x^\e$a\C-x\C-r"'
|
bind '"\C-r": "\C-x\C-addi$(__fzf_history__)\C-x\C-e\C-x^\C-x\C-a$a\C-x\C-r"'
|
||||||
bind -m vi-command '"\C-r": "i\C-r"'
|
bind -m vi-command '"\C-r": "i\C-r"'
|
||||||
|
|
||||||
# ALT-C - cd into the selected directory
|
# ALT-C - cd into the selected directory
|
||||||
bind '"\ec": "\eddi$(__fzf_cd__)\C-x\C-e\C-x\C-r\C-m"'
|
bind '"\ec": "\C-x\C-addi$(__fzf_cd__)\C-x\C-e\C-x\C-r\C-m"'
|
||||||
bind -m vi-command '"\ec": "i\ec"'
|
bind -m vi-command '"\ec": "ddi$(__fzf_cd__)\C-x\C-e\C-x\C-r\C-m"'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
unset __use_tmux
|
unset -v __use_tmux __use_bind_x
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
@@ -13,27 +13,31 @@ function fzf_key_bindings
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
function __fzf_ctrl_t
|
function fzf-file-widget
|
||||||
command find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
|
set -q FZF_CTRL_T_COMMAND; or set -l FZF_CTRL_T_COMMAND "
|
||||||
|
command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
||||||
-o -type f -print \
|
-o -type f -print \
|
||||||
-o -type d -print \
|
-o -type d -print \
|
||||||
-o -type l -print 2> /dev/null | sed 1d | cut -b3- | eval (__fzfcmd) -m > $TMPDIR/fzf.result
|
-o -type l -print 2> /dev/null | sed 1d | cut -b3-"
|
||||||
and commandline -i (cat $TMPDIR/fzf.result | __fzf_escape)
|
eval "$FZF_CTRL_T_COMMAND | "(__fzfcmd)" -m $FZF_CTRL_T_OPTS > $TMPDIR/fzf.result"
|
||||||
|
and for i in (seq 20); commandline -i (cat $TMPDIR/fzf.result | __fzf_escape) 2> /dev/null; and break; sleep 0.1; end
|
||||||
commandline -f repaint
|
commandline -f repaint
|
||||||
rm -f $TMPDIR/fzf.result
|
rm -f $TMPDIR/fzf.result
|
||||||
end
|
end
|
||||||
|
|
||||||
function __fzf_ctrl_r
|
function fzf-history-widget
|
||||||
history | eval (__fzfcmd) +s +m --tiebreak=index --toggle-sort=ctrl-r > $TMPDIR/fzf.result
|
history | eval (__fzfcmd) +s +m --tiebreak=index --toggle-sort=ctrl-r $FZF_CTRL_R_OPTS > $TMPDIR/fzf.result
|
||||||
and commandline (cat $TMPDIR/fzf.result)
|
and commandline (cat $TMPDIR/fzf.result)
|
||||||
commandline -f repaint
|
commandline -f repaint
|
||||||
rm -f $TMPDIR/fzf.result
|
rm -f $TMPDIR/fzf.result
|
||||||
end
|
end
|
||||||
|
|
||||||
function __fzf_alt_c
|
function fzf-cd-widget
|
||||||
|
set -q FZF_ALT_C_COMMAND; or set -l FZF_ALT_C_COMMAND "
|
||||||
|
command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
||||||
|
-o -type d -print 2> /dev/null | sed 1d | cut -b3-"
|
||||||
# Fish hangs if the command before pipe redirects (2> /dev/null)
|
# Fish hangs if the command before pipe redirects (2> /dev/null)
|
||||||
command find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) \
|
eval "$FZF_ALT_C_COMMAND | "(__fzfcmd)" +m $FZF_ALT_C_OPTS > $TMPDIR/fzf.result"
|
||||||
-prune -o -type d -print 2> /dev/null | sed 1d | cut -b3- | eval (__fzfcmd) +m > $TMPDIR/fzf.result
|
|
||||||
[ (cat $TMPDIR/fzf.result | wc -l) -gt 0 ]
|
[ (cat $TMPDIR/fzf.result | wc -l) -gt 0 ]
|
||||||
and cd (cat $TMPDIR/fzf.result)
|
and cd (cat $TMPDIR/fzf.result)
|
||||||
commandline -f repaint
|
commandline -f repaint
|
||||||
@@ -54,14 +58,14 @@ function fzf_key_bindings
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
bind \ct '__fzf_ctrl_t'
|
bind \ct fzf-file-widget
|
||||||
bind \cr '__fzf_ctrl_r'
|
bind \cr fzf-history-widget
|
||||||
bind \ec '__fzf_alt_c'
|
bind \ec fzf-cd-widget
|
||||||
|
|
||||||
if bind -M insert > /dev/null 2>&1
|
if bind -M insert > /dev/null 2>&1
|
||||||
bind -M insert \ct '__fzf_ctrl_t'
|
bind -M insert \ct fzf-file-widget
|
||||||
bind -M insert \cr '__fzf_ctrl_r'
|
bind -M insert \cr fzf-history-widget
|
||||||
bind -M insert \ec '__fzf_alt_c'
|
bind -M insert \ec fzf-cd-widget
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@@ -1,54 +1,65 @@
|
|||||||
# Key bindings
|
# Key bindings
|
||||||
# ------------
|
# ------------
|
||||||
|
if [[ $- == *i* ]]; then
|
||||||
|
|
||||||
# CTRL-T - Paste the selected file path(s) into the command line
|
# CTRL-T - Paste the selected file path(s) into the command line
|
||||||
__fsel() {
|
__fsel() {
|
||||||
command find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
|
local cmd="${FZF_CTRL_T_COMMAND:-"command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
||||||
-o -type f -print \
|
-o -type f -print \
|
||||||
-o -type d -print \
|
-o -type d -print \
|
||||||
-o -type l -print 2> /dev/null | sed 1d | cut -b3- | $(__fzfcmd) -m | while read item; do
|
-o -type l -print 2> /dev/null | sed 1d | cut -b3-"}"
|
||||||
printf '%q ' "$item"
|
setopt localoptions pipefail 2> /dev/null
|
||||||
|
eval "$cmd | $(__fzfcmd) -m $FZF_CTRL_T_OPTS" | while read item; do
|
||||||
|
echo -n "${(q)item} "
|
||||||
done
|
done
|
||||||
|
local ret=$?
|
||||||
echo
|
echo
|
||||||
|
return $ret
|
||||||
}
|
}
|
||||||
|
|
||||||
__fzfcmd() {
|
__fzfcmd() {
|
||||||
[ ${FZF_TMUX:-1} -eq 1 ] && echo "fzf-tmux -d${FZF_TMUX_HEIGHT:-40%}" || echo "fzf"
|
[ ${FZF_TMUX:-1} -eq 1 ] && echo "fzf-tmux -d${FZF_TMUX_HEIGHT:-40%}" || echo "fzf"
|
||||||
}
|
}
|
||||||
|
|
||||||
if [[ $- =~ i ]]; then
|
|
||||||
|
|
||||||
fzf-file-widget() {
|
fzf-file-widget() {
|
||||||
LBUFFER="${LBUFFER}$(__fsel)"
|
LBUFFER="${LBUFFER}$(__fsel)"
|
||||||
|
local ret=$?
|
||||||
zle redisplay
|
zle redisplay
|
||||||
|
typeset -f zle-line-init >/dev/null && zle zle-line-init
|
||||||
|
return $ret
|
||||||
}
|
}
|
||||||
zle -N fzf-file-widget
|
zle -N fzf-file-widget
|
||||||
bindkey '^T' fzf-file-widget
|
bindkey '^T' fzf-file-widget
|
||||||
|
|
||||||
# ALT-C - cd into the selected directory
|
# ALT-C - cd into the selected directory
|
||||||
fzf-cd-widget() {
|
fzf-cd-widget() {
|
||||||
cd "${$(command find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
|
local cmd="${FZF_ALT_C_COMMAND:-"command find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \
|
||||||
-o -type d -print 2> /dev/null | sed 1d | cut -b3- | $(__fzfcmd) +m):-.}"
|
-o -type d -print 2> /dev/null | sed 1d | cut -b3-"}"
|
||||||
|
setopt localoptions pipefail 2> /dev/null
|
||||||
|
cd "${$(eval "$cmd | $(__fzfcmd) +m $FZF_ALT_C_OPTS"):-.}"
|
||||||
|
local ret=$?
|
||||||
zle reset-prompt
|
zle reset-prompt
|
||||||
|
typeset -f zle-line-init >/dev/null && zle zle-line-init
|
||||||
|
return $ret
|
||||||
}
|
}
|
||||||
zle -N fzf-cd-widget
|
zle -N fzf-cd-widget
|
||||||
bindkey '\ec' fzf-cd-widget
|
bindkey '\ec' fzf-cd-widget
|
||||||
|
|
||||||
# CTRL-R - Paste the selected command from history into the command line
|
# CTRL-R - Paste the selected command from history into the command line
|
||||||
fzf-history-widget() {
|
fzf-history-widget() {
|
||||||
local selected restore_no_bang_hist
|
local selected num
|
||||||
if selected=$(fc -l 1 | $(__fzfcmd) +s --tac +m -n2..,.. --tiebreak=index --toggle-sort=ctrl-r -q "$LBUFFER"); then
|
setopt localoptions noglobsubst pipefail 2> /dev/null
|
||||||
num=$(echo "$selected" | head -n1 | awk '{print $1}' | sed 's/[^0-9]//g')
|
selected=( $(fc -l 1 | eval "$(__fzfcmd) +s --tac +m -n2..,.. --tiebreak=index --toggle-sort=ctrl-r $FZF_CTRL_R_OPTS -q ${(q)LBUFFER}") )
|
||||||
|
local ret=$?
|
||||||
|
if [ -n "$selected" ]; then
|
||||||
|
num=$selected[1]
|
||||||
if [ -n "$num" ]; then
|
if [ -n "$num" ]; then
|
||||||
LBUFFER=!$num
|
zle vi-fetch-history -n $num
|
||||||
if setopt | grep nobanghist > /dev/null; then
|
|
||||||
restore_no_bang_hist=1
|
|
||||||
unsetopt no_bang_hist
|
|
||||||
fi
|
|
||||||
zle expand-history
|
|
||||||
[ -n "$restore_no_bang_hist" ] && setopt no_bang_hist
|
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
zle redisplay
|
zle redisplay
|
||||||
|
typeset -f zle-line-init >/dev/null && zle zle-line-init
|
||||||
|
return $ret
|
||||||
}
|
}
|
||||||
zle -N fzf-history-widget
|
zle -N fzf-history-widget
|
||||||
bindkey '^R' fzf-history-widget
|
bindkey '^R' fzf-history-widget
|
||||||
|
40
src/Dockerfile.android
Normal file
40
src/Dockerfile.android
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
FROM ubuntu:14.04
|
||||||
|
MAINTAINER Junegunn Choi <junegunn.c@gmail.com>
|
||||||
|
|
||||||
|
# apt-get
|
||||||
|
RUN apt-get update && apt-get -y upgrade && \
|
||||||
|
apt-get install -y --force-yes git curl build-essential
|
||||||
|
|
||||||
|
# Install Go 1.4
|
||||||
|
RUN cd / && curl \
|
||||||
|
https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz | \
|
||||||
|
tar -xz && mv go go1.4 && \
|
||||||
|
sed -i 's@#define PTHREAD_KEYS_MAX 128@@' /go1.4/src/runtime/cgo/gcc_android_arm.c
|
||||||
|
|
||||||
|
ENV GOROOT /go1.4
|
||||||
|
ENV PATH /go1.4/bin:$PATH
|
||||||
|
|
||||||
|
RUN cd / && \
|
||||||
|
curl -O http://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin && \
|
||||||
|
chmod 755 /android-ndk* && /android-ndk-r10e-linux-x86_64.bin && \
|
||||||
|
mv android-ndk-r10e /android-ndk
|
||||||
|
|
||||||
|
RUN cd /android-ndk && bash ./build/tools/make-standalone-toolchain.sh --platform=android-21 --install-dir=/ndk --arch=arm
|
||||||
|
|
||||||
|
ENV NDK_CC /ndk/bin/arm-linux-androideabi-gcc
|
||||||
|
|
||||||
|
RUN cd $GOROOT/src && \
|
||||||
|
CC_FOR_TARGET=$NDK_CC GOOS=android GOARCH=arm GOARM=7 ./make.bash
|
||||||
|
|
||||||
|
RUN cd / && curl \
|
||||||
|
http://ftp.gnu.org/gnu/ncurses/ncurses-5.9.tar.gz | \
|
||||||
|
tar -xz && cd /ncurses-5.9 && \
|
||||||
|
./configure CC=$NDK_CC CFLAGS="-fPIE -march=armv7-a -mfpu=neon -mhard-float -Wl,--no-warn-mismatch" LDFLAGS="-march=armv7-a -Wl,--no-warn-mismatch" --host=arm-linux --enable-overwrite --enable-const --without-cxx-binding --without-shared --without-debug --enable-widec --enable-ext-colors --enable-ext-mouse --enable-pc-files --with-pkg-config-libdir=$PKG_CONFIG_LIBDIR --without-manpages --without-ada --disable-shared --without-tests --prefix=/ndk/sysroot/usr --with-default-terminfo-dirs=/usr/share/terminfo --with-terminfo-dirs=/usr/share/terminfo ac_cv_header_locale_h=n ac_cv_func_getpwent=no ac_cv_func_getpwnam=no ac_cv_func_getpwuid=no && \
|
||||||
|
sed -i 's@#define HAVE_LOCALE_H 1@/* #undef HAVE_LOCALE_H */@' include/ncurses_cfg.h && \
|
||||||
|
make && \
|
||||||
|
sed -i '0,/echo.*/{s/echo.*/exit 0/}' misc/run_tic.sh && \
|
||||||
|
make install && \
|
||||||
|
mv /ndk/sysroot/usr/lib/libncursesw.a /ndk/sysroot/usr/lib/libncurses.a
|
||||||
|
|
||||||
|
# Default CMD
|
||||||
|
CMD cd /fzf/src && /bin/bash
|
@@ -2,6 +2,7 @@ FROM base/archlinux:2014.07.03
|
|||||||
MAINTAINER Junegunn Choi <junegunn.c@gmail.com>
|
MAINTAINER Junegunn Choi <junegunn.c@gmail.com>
|
||||||
|
|
||||||
# apt-get
|
# apt-get
|
||||||
|
RUN pacman-key --populate archlinux && pacman-key --refresh-keys
|
||||||
RUN pacman-db-upgrade && pacman -Syu --noconfirm base-devel git
|
RUN pacman-db-upgrade && pacman -Syu --noconfirm base-devel git
|
||||||
|
|
||||||
# Install Go 1.4
|
# Install Go 1.4
|
||||||
@@ -9,7 +10,6 @@ RUN cd / && curl \
|
|||||||
https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz | \
|
https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz | \
|
||||||
tar -xz && mv go go1.4
|
tar -xz && mv go go1.4
|
||||||
|
|
||||||
ENV GOPATH /go
|
|
||||||
ENV GOROOT /go1.4
|
ENV GOROOT /go1.4
|
||||||
ENV PATH /go1.4/bin:$PATH
|
ENV PATH /go1.4/bin:$PATH
|
||||||
|
|
||||||
@@ -19,9 +19,6 @@ RUN echo '[multilib]' >> /etc/pacman.conf && \
|
|||||||
pacman-db-upgrade && yes | pacman -Sy gcc-multilib lib32-ncurses && \
|
pacman-db-upgrade && yes | pacman -Sy gcc-multilib lib32-ncurses && \
|
||||||
cd $GOROOT/src && GOARCH=386 ./make.bash
|
cd $GOROOT/src && GOARCH=386 ./make.bash
|
||||||
|
|
||||||
# Volume
|
|
||||||
VOLUME /go
|
|
||||||
|
|
||||||
# Default CMD
|
# Default CMD
|
||||||
CMD cd /go/src/github.com/junegunn/fzf/src && /bin/bash
|
CMD cd /fzf/src && /bin/bash
|
||||||
|
|
||||||
|
@@ -1,21 +1,32 @@
|
|||||||
FROM centos:centos7
|
FROM centos:centos6
|
||||||
MAINTAINER Junegunn Choi <junegunn.c@gmail.com>
|
MAINTAINER Junegunn Choi <junegunn.c@gmail.com>
|
||||||
|
|
||||||
# yum
|
# yum
|
||||||
RUN yum install -y git gcc make tar ncurses-devel
|
RUN yum install -y git gcc make tar glibc-devel glibc-devel.i686 \
|
||||||
|
ncurses-devel ncurses-static ncurses-devel.i686 \
|
||||||
|
gpm-devel gpm-static libgcc.i686
|
||||||
|
|
||||||
# Install Go 1.4
|
# Install Go 1.4
|
||||||
RUN cd / && curl \
|
RUN cd / && curl \
|
||||||
https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz | \
|
https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz | \
|
||||||
tar -xz && mv go go1.4
|
tar -xz && mv go go1.4
|
||||||
|
|
||||||
ENV GOPATH /go
|
# Install Go 1.7
|
||||||
ENV GOROOT /go1.4
|
RUN cd / && curl \
|
||||||
ENV PATH /go1.4/bin:$PATH
|
https://storage.googleapis.com/golang/go1.7.linux-amd64.tar.gz | \
|
||||||
|
tar -xz && mv go go1.7
|
||||||
|
|
||||||
# Volume
|
# Install RPMs for building static 32-bit binary
|
||||||
VOLUME /go
|
RUN curl ftp://ftp.pbone.net/mirror/ftp.centos.org/6.8/os/i386/Packages/ncurses-static-5.7-4.20090207.el6.i686.rpm -o rpm && rpm -i rpm && \
|
||||||
|
curl ftp://ftp.pbone.net/mirror/ftp.centos.org/6.8/os/i386/Packages/gpm-static-1.20.6-12.el6.i686.rpm -o rpm && rpm -i rpm
|
||||||
|
|
||||||
|
ENV GOROOT_BOOTSTRAP /go1.4
|
||||||
|
ENV GOROOT /go1.7
|
||||||
|
ENV PATH /go1.7/bin:$PATH
|
||||||
|
|
||||||
|
# For i386 build
|
||||||
|
RUN cd $GOROOT/src && GOARCH=386 ./make.bash
|
||||||
|
|
||||||
# Default CMD
|
# Default CMD
|
||||||
CMD cd /go/src/github.com/junegunn/fzf/src && /bin/bash
|
CMD cd /fzf/src && /bin/bash
|
||||||
|
|
||||||
|
@@ -3,14 +3,13 @@ MAINTAINER Junegunn Choi <junegunn.c@gmail.com>
|
|||||||
|
|
||||||
# apt-get
|
# apt-get
|
||||||
RUN apt-get update && apt-get -y upgrade && \
|
RUN apt-get update && apt-get -y upgrade && \
|
||||||
apt-get install -y --force-yes git curl build-essential libncurses-dev
|
apt-get install -y --force-yes git curl build-essential libncurses-dev libgpm-dev
|
||||||
|
|
||||||
# Install Go 1.4
|
# Install Go 1.4
|
||||||
RUN cd / && curl \
|
RUN cd / && curl \
|
||||||
https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz | \
|
https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz | \
|
||||||
tar -xz && mv go go1.4
|
tar -xz && mv go go1.4
|
||||||
|
|
||||||
ENV GOPATH /go
|
|
||||||
ENV GOROOT /go1.4
|
ENV GOROOT /go1.4
|
||||||
ENV PATH /go1.4/bin:$PATH
|
ENV PATH /go1.4/bin:$PATH
|
||||||
|
|
||||||
@@ -18,9 +17,6 @@ ENV PATH /go1.4/bin:$PATH
|
|||||||
RUN apt-get install -y lib32ncurses5-dev && \
|
RUN apt-get install -y lib32ncurses5-dev && \
|
||||||
cd $GOROOT/src && GOARCH=386 ./make.bash
|
cd $GOROOT/src && GOARCH=386 ./make.bash
|
||||||
|
|
||||||
# Volume
|
|
||||||
VOLUME /go
|
|
||||||
|
|
||||||
# Default CMD
|
# Default CMD
|
||||||
CMD cd /go/src/github.com/junegunn/fzf/src && /bin/bash
|
CMD cd /fzf/src && /bin/bash
|
||||||
|
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright (c) 2015 Junegunn Choi
|
Copyright (c) 2016 Junegunn Choi
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
126
src/Makefile
126
src/Makefile
@@ -1,74 +1,116 @@
|
|||||||
ifndef GOPATH
|
ifndef GOOS
|
||||||
$(error GOPATH is undefined)
|
|
||||||
endif
|
|
||||||
|
|
||||||
UNAME_S := $(shell uname -s)
|
UNAME_S := $(shell uname -s)
|
||||||
ifeq ($(UNAME_S),Darwin)
|
ifeq ($(UNAME_S),Darwin)
|
||||||
GOOS := darwin
|
GOOS := darwin
|
||||||
else ifeq ($(UNAME_S),Linux)
|
else ifeq ($(UNAME_S),Linux)
|
||||||
GOOS := linux
|
GOOS := linux
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(shell uname -m),x86_64)
|
|
||||||
$(error "Build on $(UNAME_M) is not supported, yet.")
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
SOURCES := $(wildcard *.go */*.go)
|
SOURCES := $(wildcard *.go */*.go)
|
||||||
BINDIR := ../bin
|
ROOTDIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
|
||||||
|
BINDIR := $(shell dirname $(ROOTDIR))/bin
|
||||||
|
GOPATH := $(shell dirname $(ROOTDIR))/gopath
|
||||||
|
SRCDIR := $(GOPATH)/src/github.com/junegunn/fzf/src
|
||||||
|
DOCKEROPTS := -i -t -v $(ROOTDIR):/fzf/src
|
||||||
BINARY32 := fzf-$(GOOS)_386
|
BINARY32 := fzf-$(GOOS)_386
|
||||||
BINARY64 := fzf-$(GOOS)_amd64
|
BINARY64 := fzf-$(GOOS)_amd64
|
||||||
VERSION = $(shell fzf/$(BINARY64) --version)
|
BINARYARM7 := fzf-$(GOOS)_arm7
|
||||||
RELEASE32 = fzf-$(VERSION)-$(GOOS)_386
|
VERSION := $(shell awk -F= '/version =/ {print $$2}' constants.go | tr -d "\" ")
|
||||||
RELEASE64 = fzf-$(VERSION)-$(GOOS)_amd64
|
RELEASE32 := fzf-$(VERSION)-$(GOOS)_386
|
||||||
|
RELEASE64 := fzf-$(VERSION)-$(GOOS)_amd64
|
||||||
|
RELEASEARM7 := fzf-$(VERSION)-$(GOOS)_arm7
|
||||||
|
export GOPATH
|
||||||
|
|
||||||
all: release
|
UNAME_M := $(shell uname -m)
|
||||||
|
ifeq ($(UNAME_M),x86_64)
|
||||||
|
BINARY := $(BINARY64)
|
||||||
|
else ifeq ($(UNAME_M),i686)
|
||||||
|
BINARY := $(BINARY32)
|
||||||
|
else
|
||||||
|
$(error "Build on $(UNAME_M) is not supported, yet.")
|
||||||
|
endif
|
||||||
|
|
||||||
release: build
|
all: fzf/$(BINARY)
|
||||||
cd fzf && \
|
|
||||||
cp $(BINARY32) $(RELEASE32) && tar -czf $(RELEASE32).tgz $(RELEASE32) && \
|
|
||||||
cp $(BINARY64) $(RELEASE64) && tar -czf $(RELEASE64).tgz $(RELEASE64) && \
|
|
||||||
rm $(RELEASE32) $(RELEASE64)
|
|
||||||
|
|
||||||
build: test fzf/$(BINARY32) fzf/$(BINARY64)
|
release: test fzf/$(BINARY32) fzf/$(BINARY64)
|
||||||
|
-cd fzf && cp $(BINARY32) $(RELEASE32) && tar -czf $(RELEASE32).tgz $(RELEASE32)
|
||||||
|
cd fzf && cp $(BINARY64) $(RELEASE64) && tar -czf $(RELEASE64).tgz $(RELEASE64) && \
|
||||||
|
rm -f $(RELEASE32) $(RELEASE64)
|
||||||
|
|
||||||
test:
|
$(SRCDIR):
|
||||||
go get
|
mkdir -p $(shell dirname $(SRCDIR))
|
||||||
go test -v ./...
|
ln -s $(ROOTDIR) $(SRCDIR)
|
||||||
|
|
||||||
|
deps: $(SRCDIR) $(SOURCES)
|
||||||
|
cd $(SRCDIR) && go get
|
||||||
|
|
||||||
|
android-build: $(SRCDIR)
|
||||||
|
cd $(SRCDIR) && GOARCH=arm GOARM=7 CGO_ENABLED=1 go get
|
||||||
|
cd $(SRCDIR)/fzf && GOARCH=arm GOARM=7 CGO_ENABLED=1 go build -a -ldflags="-w -extldflags=-pie" -o $(BINARYARM7)
|
||||||
|
cd $(SRCDIR)/fzf && cp $(BINARYARM7) $(RELEASEARM7) && tar -czf $(RELEASEARM7).tgz $(RELEASEARM7) && \
|
||||||
|
rm -f $(RELEASEARM7)
|
||||||
|
|
||||||
|
test: deps
|
||||||
|
SHELL=/bin/sh go test -v ./...
|
||||||
|
|
||||||
install: $(BINDIR)/fzf
|
install: $(BINDIR)/fzf
|
||||||
|
|
||||||
uninstall:
|
uninstall:
|
||||||
rm -f $(BINDIR)/fzf $(BINDIR)/$(BINARY64)
|
rm -f $(BINDIR)/fzf $(BINDIR)/$(BINARY)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
cd fzf && rm -f $(BINARY32) $(BINARY64) $(RELEASE32).tgz $(RELEASE64).tgz
|
cd fzf && rm -f fzf-*
|
||||||
|
|
||||||
fzf/$(BINARY32): $(SOURCES)
|
fzf/$(BINARY32): deps
|
||||||
cd fzf && GOARCH=386 CGO_ENABLED=1 go build -o $(BINARY32)
|
cd fzf && GOARCH=386 CGO_ENABLED=1 go build -a -ldflags -w -tags "$(TAGS)" -o $(BINARY32)
|
||||||
|
|
||||||
fzf/$(BINARY64): $(SOURCES)
|
fzf/$(BINARY64): deps
|
||||||
cd fzf && go build -o $(BINARY64)
|
cd fzf && go build -a -ldflags -w -tags "$(TAGS)" -o $(BINARY64)
|
||||||
|
|
||||||
$(BINDIR)/fzf: fzf/$(BINARY64) | $(BINDIR)
|
$(BINDIR)/fzf: fzf/$(BINARY) | $(BINDIR)
|
||||||
cp -f fzf/$(BINARY64) $(BINDIR)
|
cp -f fzf/$(BINARY) $(BINDIR)
|
||||||
cd $(BINDIR) && ln -sf $(BINARY64) fzf
|
cd $(BINDIR) && ln -sf $(BINARY) fzf
|
||||||
|
|
||||||
$(BINDIR):
|
$(BINDIR):
|
||||||
mkdir -p $@
|
mkdir -p $@
|
||||||
|
|
||||||
# Linux distribution to build fzf on
|
docker-arch:
|
||||||
DISTRO := arch
|
docker build -t junegunn/arch-sandbox - < Dockerfile.arch
|
||||||
|
|
||||||
docker:
|
docker-ubuntu:
|
||||||
docker build -t junegunn/$(DISTRO)-sandbox - < Dockerfile.$(DISTRO)
|
docker build -t junegunn/ubuntu-sandbox - < Dockerfile.ubuntu
|
||||||
|
|
||||||
linux: docker
|
docker-centos:
|
||||||
docker run -i -t -v $(GOPATH):/go junegunn/$(DISTRO)-sandbox \
|
docker build -t junegunn/centos-sandbox - < Dockerfile.centos
|
||||||
/bin/bash -ci 'cd /go/src/github.com/junegunn/fzf/src; make'
|
|
||||||
|
|
||||||
$(DISTRO): docker
|
docker-android:
|
||||||
docker run -i -t -v $(GOPATH):/go junegunn/$(DISTRO)-sandbox \
|
docker build -t junegunn/android-sandbox - < Dockerfile.android
|
||||||
sh -c 'cd /go/src/github.com/junegunn/fzf/src; /bin/bash'
|
|
||||||
|
|
||||||
.PHONY: all build release test install uninstall clean docker linux $(DISTRO)
|
arch: docker-arch
|
||||||
|
docker run $(DOCKEROPTS) junegunn/$@-sandbox \
|
||||||
|
sh -c 'cd /fzf/src; /bin/bash'
|
||||||
|
|
||||||
|
ubuntu: docker-ubuntu
|
||||||
|
docker run $(DOCKEROPTS) junegunn/$@-sandbox \
|
||||||
|
sh -c 'cd /fzf/src; /bin/bash'
|
||||||
|
|
||||||
|
centos: docker-centos
|
||||||
|
docker run $(DOCKEROPTS) junegunn/$@-sandbox \
|
||||||
|
sh -c 'cd /fzf/src; /bin/bash'
|
||||||
|
|
||||||
|
linux: docker-centos
|
||||||
|
docker run $(DOCKEROPTS) junegunn/centos-sandbox \
|
||||||
|
/bin/bash -ci 'cd /fzf/src; make TAGS=static release'
|
||||||
|
|
||||||
|
ubuntu-android: docker-android
|
||||||
|
docker run $(DOCKEROPTS) junegunn/android-sandbox \
|
||||||
|
sh -c 'cd /fzf/src; /bin/bash'
|
||||||
|
|
||||||
|
android: docker-android
|
||||||
|
docker run $(DOCKEROPTS) junegunn/android-sandbox \
|
||||||
|
/bin/bash -ci 'cd /fzf/src; GOOS=android make android-build'
|
||||||
|
|
||||||
|
.PHONY: all deps release test install uninstall clean \
|
||||||
|
linux arch ubuntu centos docker-arch docker-ubuntu docker-centos \
|
||||||
|
android-build docker-android ubuntu-android android
|
||||||
|
@@ -47,39 +47,12 @@ proportional to the number of CPU cores. On my MacBook Pro (Mid 2012), the new
|
|||||||
version was shown to be an order of magnitude faster on certain cases. It also
|
version was shown to be an order of magnitude faster on certain cases. It also
|
||||||
starts much faster though the difference may not be noticeable.
|
starts much faster though the difference may not be noticeable.
|
||||||
|
|
||||||
Differences with Ruby version
|
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
The Go version is designed to be perfectly compatible with the previous Ruby
|
|
||||||
version. The only behavioral difference is that the new version ignores the
|
|
||||||
numeric argument to `--sort=N` option and always sorts the result regardless
|
|
||||||
of the number of matches. The value was introduced to limit the response time
|
|
||||||
of the query, but the Go version is blazingly fast (almost instant response
|
|
||||||
even for 1M+ items) so I decided that it's no longer required.
|
|
||||||
|
|
||||||
System requirements
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
Currently, prebuilt binaries are provided only for OS X and Linux. The install
|
|
||||||
script will fall back to the legacy Ruby version on the other systems, but if
|
|
||||||
you have Go 1.4 installed, you can try building it yourself.
|
|
||||||
|
|
||||||
However, as pointed out in [golang.org/doc/install][req], the Go version may
|
|
||||||
not run on CentOS/RHEL 5.x, and if that's the case, the install script will
|
|
||||||
choose the Ruby version instead.
|
|
||||||
|
|
||||||
The Go version depends on [ncurses][ncurses] and some Unix system calls, so it
|
|
||||||
shouldn't run natively on Windows at the moment. But it won't be impossible to
|
|
||||||
support Windows by falling back to a cross-platform alternative such as
|
|
||||||
[termbox][termbox] only on Windows. If you're interested in making fzf work on
|
|
||||||
Windows, please let me know.
|
|
||||||
|
|
||||||
Build
|
Build
|
||||||
-----
|
-----
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
# Build fzf executables and tarballs
|
# Build fzf executables and tarballs
|
||||||
make
|
make release
|
||||||
|
|
||||||
# Install the executable to ../bin directory
|
# Install the executable to ../bin directory
|
||||||
make install
|
make install
|
||||||
@@ -88,16 +61,22 @@ make install
|
|||||||
make linux
|
make linux
|
||||||
```
|
```
|
||||||
|
|
||||||
Contribution
|
Test
|
||||||
------------
|
----
|
||||||
|
|
||||||
For the time being, I will not add or accept any new features until we can be
|
Unit tests can be run with `make test`. Integration tests are written in Ruby
|
||||||
sure that the implementation is stable and we have a sufficient number of test
|
script that should be run on tmux.
|
||||||
cases. However, fixes for obvious bugs and new test cases are welcome.
|
|
||||||
|
|
||||||
I also care much about the performance of the implementation, so please make
|
```sh
|
||||||
sure that your change does not result in performance regression. And please be
|
# Unit tests
|
||||||
noted that we don't have a quantitative measure of the performance yet.
|
make test
|
||||||
|
|
||||||
|
# Install the executable to ../bin directory
|
||||||
|
make install
|
||||||
|
|
||||||
|
# Integration tests
|
||||||
|
ruby ../test/test_go.rb
|
||||||
|
```
|
||||||
|
|
||||||
Third-party libraries used
|
Third-party libraries used
|
||||||
--------------------------
|
--------------------------
|
||||||
|
648
src/algo/algo.go
648
src/algo/algo.go
@@ -1,39 +1,506 @@
|
|||||||
package algo
|
package algo
|
||||||
|
|
||||||
|
/*
|
||||||
|
|
||||||
|
Algorithm
|
||||||
|
---------
|
||||||
|
|
||||||
|
FuzzyMatchV1 finds the first "fuzzy" occurrence of the pattern within the given
|
||||||
|
text in O(n) time where n is the length of the text. Once the position of the
|
||||||
|
last character is located, it traverses backwards to see if there's a shorter
|
||||||
|
substring that matches the pattern.
|
||||||
|
|
||||||
|
a_____b___abc__ To find "abc"
|
||||||
|
*-----*-----*> 1. Forward scan
|
||||||
|
<*** 2. Backward scan
|
||||||
|
|
||||||
|
The algorithm is simple and fast, but as it only sees the first occurrence,
|
||||||
|
it is not guaranteed to find the occurrence with the highest score.
|
||||||
|
|
||||||
|
a_____b__c__abc
|
||||||
|
*-----*--* ***
|
||||||
|
|
||||||
|
FuzzyMatchV2 implements a modified version of Smith-Waterman algorithm to find
|
||||||
|
the optimal solution (highest score) according to the scoring criteria. Unlike
|
||||||
|
the original algorithm, omission or mismatch of a character in the pattern is
|
||||||
|
not allowed.
|
||||||
|
|
||||||
|
Performance
|
||||||
|
-----------
|
||||||
|
|
||||||
|
The new V2 algorithm is slower than V1 as it examines all occurrences of the
|
||||||
|
pattern instead of stopping immediately after finding the first one. The time
|
||||||
|
complexity of the algorithm is O(nm) if a match is found and O(n) otherwise
|
||||||
|
where n is the length of the item and m is the length of the pattern. Thus, the
|
||||||
|
performance overhead may not be noticeable for a query with high selectivity.
|
||||||
|
However, if the performance is more important than the quality of the result,
|
||||||
|
you can still choose v1 algorithm with --algo=v1.
|
||||||
|
|
||||||
|
Scoring criteria
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- We prefer matches at special positions, such as the start of a word, or
|
||||||
|
uppercase character in camelCase words.
|
||||||
|
|
||||||
|
- That is, we prefer an occurrence of the pattern with more characters
|
||||||
|
matching at special positions, even if the total match length is longer.
|
||||||
|
e.g. "fuzzyfinder" vs. "fuzzy-finder" on "ff"
|
||||||
|
````````````
|
||||||
|
- Also, if the first character in the pattern appears at one of the special
|
||||||
|
positions, the bonus point for the position is multiplied by a constant
|
||||||
|
as it is extremely likely that the first character in the typed pattern
|
||||||
|
has more significance than the rest.
|
||||||
|
e.g. "fo-bar" vs. "foob-r" on "br"
|
||||||
|
``````
|
||||||
|
- But since fzf is still a fuzzy finder, not an acronym finder, we should also
|
||||||
|
consider the total length of the matched substring. This is why we have the
|
||||||
|
gap penalty. The gap penalty increases as the length of the gap (distance
|
||||||
|
between the matching characters) increases, so the effect of the bonus is
|
||||||
|
eventually cancelled at some point.
|
||||||
|
e.g. "fuzzyfinder" vs. "fuzzy-blurry-finder" on "ff"
|
||||||
|
```````````
|
||||||
|
- Consequently, it is crucial to find the right balance between the bonus
|
||||||
|
and the gap penalty. The parameters were chosen that the bonus is cancelled
|
||||||
|
when the gap size increases beyond 8 characters.
|
||||||
|
|
||||||
|
- The bonus mechanism can have the undesirable side effect where consecutive
|
||||||
|
matches are ranked lower than the ones with gaps.
|
||||||
|
e.g. "foobar" vs. "foo-bar" on "foob"
|
||||||
|
```````
|
||||||
|
- To correct this anomaly, we also give extra bonus point to each character
|
||||||
|
in a consecutive matching chunk.
|
||||||
|
e.g. "foobar" vs. "foo-bar" on "foob"
|
||||||
|
``````
|
||||||
|
- The amount of consecutive bonus is primarily determined by the bonus of the
|
||||||
|
first character in the chunk.
|
||||||
|
e.g. "foobar" vs. "out-of-bound" on "oob"
|
||||||
|
````````````
|
||||||
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/util"
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
var DEBUG bool
|
||||||
* String matching algorithms here do not use strings.ToLower to avoid
|
|
||||||
* performance penalty. And they assume pattern runes are given in lowercase
|
|
||||||
* letters when caseSensitive is false.
|
|
||||||
*
|
|
||||||
* In short: They try to do as little work as possible.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// FuzzyMatch performs fuzzy-match
|
func indexAt(index int, max int, forward bool) int {
|
||||||
func FuzzyMatch(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
|
if forward {
|
||||||
if len(pattern) == 0 {
|
return index
|
||||||
return 0, 0
|
}
|
||||||
|
return max - index - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Result contains the results of running a match function.
|
||||||
|
type Result struct {
|
||||||
|
// TODO int32 should suffice
|
||||||
|
Start int
|
||||||
|
End int
|
||||||
|
Score int
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
scoreMatch = 16
|
||||||
|
scoreGapStart = -3
|
||||||
|
scoreGapExtention = -1
|
||||||
|
|
||||||
|
// We prefer matches at the beginning of a word, but the bonus should not be
|
||||||
|
// too great to prevent the longer acronym matches from always winning over
|
||||||
|
// shorter fuzzy matches. The bonus point here was specifically chosen that
|
||||||
|
// the bonus is cancelled when the gap between the acronyms grows over
|
||||||
|
// 8 characters, which is approximately the average length of the words found
|
||||||
|
// in web2 dictionary and my file system.
|
||||||
|
bonusBoundary = scoreMatch / 2
|
||||||
|
|
||||||
|
// Although bonus point for non-word characters is non-contextual, we need it
|
||||||
|
// for computing bonus points for consecutive chunks starting with a non-word
|
||||||
|
// character.
|
||||||
|
bonusNonWord = scoreMatch / 2
|
||||||
|
|
||||||
|
// Edge-triggered bonus for matches in camelCase words.
|
||||||
|
// Compared to word-boundary case, they don't accompany single-character gaps
|
||||||
|
// (e.g. FooBar vs. foo-bar), so we deduct bonus point accordingly.
|
||||||
|
bonusCamel123 = bonusBoundary + scoreGapExtention
|
||||||
|
|
||||||
|
// Minimum bonus point given to characters in consecutive chunks.
|
||||||
|
// Note that bonus points for consecutive matches shouldn't have needed if we
|
||||||
|
// used fixed match score as in the original algorithm.
|
||||||
|
bonusConsecutive = -(scoreGapStart + scoreGapExtention)
|
||||||
|
|
||||||
|
// The first character in the typed pattern usually has more significance
|
||||||
|
// than the rest so it's important that it appears at special positions where
|
||||||
|
// bonus points are given. e.g. "to-go" vs. "ongoing" on "og" or on "ogo".
|
||||||
|
// The amount of the extra bonus should be limited so that the gap penalty is
|
||||||
|
// still respected.
|
||||||
|
bonusFirstCharMultiplier = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
type charClass int
|
||||||
|
|
||||||
|
const (
|
||||||
|
charNonWord charClass = iota
|
||||||
|
charLower
|
||||||
|
charUpper
|
||||||
|
charLetter
|
||||||
|
charNumber
|
||||||
|
)
|
||||||
|
|
||||||
|
func posArray(withPos bool, len int) *[]int {
|
||||||
|
if withPos {
|
||||||
|
pos := make([]int, 0, len)
|
||||||
|
return &pos
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func alloc16(offset int, slab *util.Slab, size int, clear bool) (int, []int16) {
|
||||||
|
if slab != nil && cap(slab.I16) > offset+size {
|
||||||
|
slice := slab.I16[offset : offset+size]
|
||||||
|
if clear {
|
||||||
|
for idx := range slice {
|
||||||
|
slice[idx] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return offset + size, slice
|
||||||
|
}
|
||||||
|
return offset, make([]int16, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func alloc32(offset int, slab *util.Slab, size int, clear bool) (int, []int32) {
|
||||||
|
if slab != nil && cap(slab.I32) > offset+size {
|
||||||
|
slice := slab.I32[offset : offset+size]
|
||||||
|
if clear {
|
||||||
|
for idx := range slice {
|
||||||
|
slice[idx] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return offset + size, slice
|
||||||
|
}
|
||||||
|
return offset, make([]int32, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func charClassOfAscii(char rune) charClass {
|
||||||
|
if char >= 'a' && char <= 'z' {
|
||||||
|
return charLower
|
||||||
|
} else if char >= 'A' && char <= 'Z' {
|
||||||
|
return charUpper
|
||||||
|
} else if char >= '0' && char <= '9' {
|
||||||
|
return charNumber
|
||||||
|
}
|
||||||
|
return charNonWord
|
||||||
|
}
|
||||||
|
|
||||||
|
func charClassOfNonAscii(char rune) charClass {
|
||||||
|
if unicode.IsLower(char) {
|
||||||
|
return charLower
|
||||||
|
} else if unicode.IsUpper(char) {
|
||||||
|
return charUpper
|
||||||
|
} else if unicode.IsNumber(char) {
|
||||||
|
return charNumber
|
||||||
|
} else if unicode.IsLetter(char) {
|
||||||
|
return charLetter
|
||||||
|
}
|
||||||
|
return charNonWord
|
||||||
|
}
|
||||||
|
|
||||||
|
func charClassOf(char rune) charClass {
|
||||||
|
if char <= unicode.MaxASCII {
|
||||||
|
return charClassOfAscii(char)
|
||||||
|
}
|
||||||
|
return charClassOfNonAscii(char)
|
||||||
|
}
|
||||||
|
|
||||||
|
func bonusFor(prevClass charClass, class charClass) int16 {
|
||||||
|
if prevClass == charNonWord && class != charNonWord {
|
||||||
|
// Word boundary
|
||||||
|
return bonusBoundary
|
||||||
|
} else if prevClass == charLower && class == charUpper ||
|
||||||
|
prevClass != charNumber && class == charNumber {
|
||||||
|
// camelCase letter123
|
||||||
|
return bonusCamel123
|
||||||
|
} else if class == charNonWord {
|
||||||
|
return bonusNonWord
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func bonusAt(input util.Chars, idx int) int16 {
|
||||||
|
if idx == 0 {
|
||||||
|
return bonusBoundary
|
||||||
|
}
|
||||||
|
return bonusFor(charClassOf(input.Get(idx-1)), charClassOf(input.Get(idx)))
|
||||||
|
}
|
||||||
|
|
||||||
|
type Algo func(caseSensitive bool, forward bool, input util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int)
|
||||||
|
|
||||||
|
func FuzzyMatchV2(caseSensitive bool, forward bool, input util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
|
// Assume that pattern is given in lowercase if case-insensitive.
|
||||||
|
// First check if there's a match and calculate bonus for each position.
|
||||||
|
// If the input string is too long, consider finding the matching chars in
|
||||||
|
// this phase as well (non-optimal alignment).
|
||||||
|
N := input.Length()
|
||||||
|
M := len(pattern)
|
||||||
|
switch M {
|
||||||
|
case 0:
|
||||||
|
return Result{0, 0, 0}, posArray(withPos, M)
|
||||||
|
case 1:
|
||||||
|
return ExactMatchNaive(caseSensitive, forward, input, pattern[0:1], withPos, slab)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since O(nm) algorithm can be prohibitively expensive for large input,
|
||||||
|
// we fall back to the greedy algorithm.
|
||||||
|
if slab != nil && N*M > cap(slab.I16) {
|
||||||
|
return FuzzyMatchV1(caseSensitive, forward, input, pattern, withPos, slab)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reuse pre-allocated integer slice to avoid unnecessary sweeping of garbages
|
||||||
|
offset := 0
|
||||||
|
// Bonus point for each position
|
||||||
|
offset, B := alloc16(offset, slab, N, false)
|
||||||
|
// The first occurrence of each character in the pattern
|
||||||
|
offset, F := alloc16(offset, slab, M, false)
|
||||||
|
// Rune array
|
||||||
|
_, T := alloc32(0, slab, N, false)
|
||||||
|
|
||||||
|
// Phase 1. Check if there's a match and calculate bonus for each point
|
||||||
|
pidx, lastIdx, prevClass := 0, 0, charNonWord
|
||||||
|
for idx := 0; idx < N; idx++ {
|
||||||
|
char := input.Get(idx)
|
||||||
|
var class charClass
|
||||||
|
if char <= unicode.MaxASCII {
|
||||||
|
class = charClassOfAscii(char)
|
||||||
|
} else {
|
||||||
|
class = charClassOfNonAscii(char)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !caseSensitive && class == charUpper {
|
||||||
|
if char <= unicode.MaxASCII {
|
||||||
|
char += 32
|
||||||
|
} else {
|
||||||
|
char = unicode.To(unicode.LowerCase, char)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
T[idx] = char
|
||||||
|
B[idx] = bonusFor(prevClass, class)
|
||||||
|
prevClass = class
|
||||||
|
|
||||||
|
if pidx < M {
|
||||||
|
if char == pattern[pidx] {
|
||||||
|
lastIdx = idx
|
||||||
|
F[pidx] = int16(idx)
|
||||||
|
pidx++
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if char == pattern[M-1] {
|
||||||
|
lastIdx = idx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pidx != M {
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 2. Fill in score matrix (H)
|
||||||
|
// Unlike the original algorithm, we do not allow omission.
|
||||||
|
width := lastIdx - int(F[0]) + 1
|
||||||
|
offset, H := alloc16(offset, slab, width*M, false)
|
||||||
|
|
||||||
|
// Possible length of consecutive chunk at each position.
|
||||||
|
offset, C := alloc16(offset, slab, width*M, false)
|
||||||
|
|
||||||
|
maxScore, maxScorePos := int16(0), 0
|
||||||
|
for i := 0; i < M; i++ {
|
||||||
|
I := i * width
|
||||||
|
inGap := false
|
||||||
|
for j := int(F[i]); j <= lastIdx; j++ {
|
||||||
|
j0 := j - int(F[0])
|
||||||
|
var s1, s2, consecutive int16
|
||||||
|
|
||||||
|
if j > int(F[i]) {
|
||||||
|
if inGap {
|
||||||
|
s2 = H[I+j0-1] + scoreGapExtention
|
||||||
|
} else {
|
||||||
|
s2 = H[I+j0-1] + scoreGapStart
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pattern[i] == T[j] {
|
||||||
|
var diag int16
|
||||||
|
if i > 0 && j0 > 0 {
|
||||||
|
diag = H[I-width+j0-1]
|
||||||
|
}
|
||||||
|
s1 = diag + scoreMatch
|
||||||
|
b := B[j]
|
||||||
|
if i > 0 {
|
||||||
|
// j > 0 if i > 0
|
||||||
|
consecutive = C[I-width+j0-1] + 1
|
||||||
|
// Break consecutive chunk
|
||||||
|
if b == bonusBoundary {
|
||||||
|
consecutive = 1
|
||||||
|
} else if consecutive > 1 {
|
||||||
|
b = util.Max16(b, util.Max16(bonusConsecutive, B[j-int(consecutive)+1]))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
consecutive = 1
|
||||||
|
b *= bonusFirstCharMultiplier
|
||||||
|
}
|
||||||
|
if s1+b < s2 {
|
||||||
|
s1 += B[j]
|
||||||
|
consecutive = 0
|
||||||
|
} else {
|
||||||
|
s1 += b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
C[I+j0] = consecutive
|
||||||
|
|
||||||
|
inGap = s1 < s2
|
||||||
|
score := util.Max16(util.Max16(s1, s2), 0)
|
||||||
|
if i == M-1 && (forward && score > maxScore || !forward && score >= maxScore) {
|
||||||
|
maxScore, maxScorePos = score, j
|
||||||
|
}
|
||||||
|
H[I+j0] = score
|
||||||
|
}
|
||||||
|
|
||||||
|
if DEBUG {
|
||||||
|
if i == 0 {
|
||||||
|
fmt.Print(" ")
|
||||||
|
for j := int(F[i]); j <= lastIdx; j++ {
|
||||||
|
fmt.Printf(" " + string(input.Get(j)) + " ")
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
fmt.Print(string(pattern[i]) + " ")
|
||||||
|
for idx := int(F[0]); idx < int(F[i]); idx++ {
|
||||||
|
fmt.Print(" 0 ")
|
||||||
|
}
|
||||||
|
for idx := int(F[i]); idx <= lastIdx; idx++ {
|
||||||
|
fmt.Printf("%2d ", H[i*width+idx-int(F[0])])
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
fmt.Print(" ")
|
||||||
|
for idx, p := range C[I : I+width] {
|
||||||
|
if idx+int(F[0]) < int(F[i]) {
|
||||||
|
p = 0
|
||||||
|
}
|
||||||
|
fmt.Printf("%2d ", p)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 3. (Optional) Backtrace to find character positions
|
||||||
|
pos := posArray(withPos, M)
|
||||||
|
j := int(F[0])
|
||||||
|
if withPos {
|
||||||
|
i := M - 1
|
||||||
|
j = maxScorePos
|
||||||
|
preferMatch := true
|
||||||
|
for {
|
||||||
|
I := i * width
|
||||||
|
j0 := j - int(F[0])
|
||||||
|
s := H[I+j0]
|
||||||
|
|
||||||
|
var s1, s2 int16
|
||||||
|
if i > 0 && j >= int(F[i]) {
|
||||||
|
s1 = H[I-width+j0-1]
|
||||||
|
}
|
||||||
|
if j > int(F[i]) {
|
||||||
|
s2 = H[I+j0-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if s > s1 && (s > s2 || s == s2 && preferMatch) {
|
||||||
|
*pos = append(*pos, j)
|
||||||
|
if i == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
preferMatch = C[I+j0] > 1 || I+width+j0+1 < len(C) && C[I+width+j0+1] > 0
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Start offset we return here is only relevant when begin tiebreak is used.
|
||||||
|
// However finding the accurate offset requires backtracking, and we don't
|
||||||
|
// want to pay extra cost for the option that has lost its importance.
|
||||||
|
return Result{j, maxScorePos + 1, int(maxScore)}, pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement the same sorting criteria as V2
|
||||||
|
func calculateScore(caseSensitive bool, text util.Chars, pattern []rune, sidx int, eidx int, withPos bool) (int, *[]int) {
|
||||||
|
pidx, score, inGap, consecutive, firstBonus := 0, 0, false, 0, int16(0)
|
||||||
|
pos := posArray(withPos, len(pattern))
|
||||||
|
prevClass := charNonWord
|
||||||
|
if sidx > 0 {
|
||||||
|
prevClass = charClassOf(text.Get(sidx - 1))
|
||||||
|
}
|
||||||
|
for idx := sidx; idx < eidx; idx++ {
|
||||||
|
char := text.Get(idx)
|
||||||
|
class := charClassOf(char)
|
||||||
|
if !caseSensitive {
|
||||||
|
if char >= 'A' && char <= 'Z' {
|
||||||
|
char += 32
|
||||||
|
} else if char > unicode.MaxASCII {
|
||||||
|
char = unicode.To(unicode.LowerCase, char)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if char == pattern[pidx] {
|
||||||
|
if withPos {
|
||||||
|
*pos = append(*pos, idx)
|
||||||
|
}
|
||||||
|
score += scoreMatch
|
||||||
|
bonus := bonusFor(prevClass, class)
|
||||||
|
if consecutive == 0 {
|
||||||
|
firstBonus = bonus
|
||||||
|
} else {
|
||||||
|
// Break consecutive chunk
|
||||||
|
if bonus == bonusBoundary {
|
||||||
|
firstBonus = bonus
|
||||||
|
}
|
||||||
|
bonus = util.Max16(util.Max16(bonus, firstBonus), bonusConsecutive)
|
||||||
|
}
|
||||||
|
if pidx == 0 {
|
||||||
|
score += int(bonus * bonusFirstCharMultiplier)
|
||||||
|
} else {
|
||||||
|
score += int(bonus)
|
||||||
|
}
|
||||||
|
inGap = false
|
||||||
|
consecutive++
|
||||||
|
pidx++
|
||||||
|
} else {
|
||||||
|
if inGap {
|
||||||
|
score += scoreGapExtention
|
||||||
|
} else {
|
||||||
|
score += scoreGapStart
|
||||||
|
}
|
||||||
|
inGap = true
|
||||||
|
consecutive = 0
|
||||||
|
firstBonus = 0
|
||||||
|
}
|
||||||
|
prevClass = class
|
||||||
|
}
|
||||||
|
return score, pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// FuzzyMatchV1 performs fuzzy-match
|
||||||
|
func FuzzyMatchV1(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
|
if len(pattern) == 0 {
|
||||||
|
return Result{0, 0, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// 0. (FIXME) How to find the shortest match?
|
|
||||||
// a_____b__c__abc
|
|
||||||
// ^^^^^^^^^^ ^^^
|
|
||||||
// 1. forward scan (abc)
|
|
||||||
// *-----*-----*>
|
|
||||||
// a_____b___abc__
|
|
||||||
// 2. reverse scan (cba)
|
|
||||||
// a_____b___abc__
|
|
||||||
// <***
|
|
||||||
pidx := 0
|
pidx := 0
|
||||||
sidx := -1
|
sidx := -1
|
||||||
eidx := -1
|
eidx := -1
|
||||||
|
|
||||||
for index, char := range *runes {
|
lenRunes := text.Length()
|
||||||
|
lenPattern := len(pattern)
|
||||||
|
|
||||||
|
for index := 0; index < lenRunes; index++ {
|
||||||
|
char := text.Get(indexAt(index, lenRunes, forward))
|
||||||
// This is considerably faster than blindly applying strings.ToLower to the
|
// This is considerably faster than blindly applying strings.ToLower to the
|
||||||
// whole string
|
// whole string
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
@@ -46,11 +513,12 @@ func FuzzyMatch(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
|
|||||||
char = unicode.To(unicode.LowerCase, char)
|
char = unicode.To(unicode.LowerCase, char)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if char == pattern[pidx] {
|
pchar := pattern[indexAt(pidx, lenPattern, forward)]
|
||||||
|
if char == pchar {
|
||||||
if sidx < 0 {
|
if sidx < 0 {
|
||||||
sidx = index
|
sidx = index
|
||||||
}
|
}
|
||||||
if pidx++; pidx == len(pattern) {
|
if pidx++; pidx == lenPattern {
|
||||||
eidx = index + 1
|
eidx = index + 1
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -60,7 +528,8 @@ func FuzzyMatch(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
|
|||||||
if sidx >= 0 && eidx >= 0 {
|
if sidx >= 0 && eidx >= 0 {
|
||||||
pidx--
|
pidx--
|
||||||
for index := eidx - 1; index >= sidx; index-- {
|
for index := eidx - 1; index >= sidx; index-- {
|
||||||
char := (*runes)[index]
|
tidx := indexAt(index, lenRunes, forward)
|
||||||
|
char := text.Get(tidx)
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
if char >= 'A' && char <= 'Z' {
|
if char >= 'A' && char <= 'Z' {
|
||||||
char += 32
|
char += 32
|
||||||
@@ -68,16 +537,25 @@ func FuzzyMatch(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
|
|||||||
char = unicode.To(unicode.LowerCase, char)
|
char = unicode.To(unicode.LowerCase, char)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if char == pattern[pidx] {
|
|
||||||
|
pidx_ := indexAt(pidx, lenPattern, forward)
|
||||||
|
pchar := pattern[pidx_]
|
||||||
|
if char == pchar {
|
||||||
if pidx--; pidx < 0 {
|
if pidx--; pidx < 0 {
|
||||||
sidx = index
|
sidx = index
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return sidx, eidx
|
|
||||||
|
if !forward {
|
||||||
|
sidx, eidx = lenRunes-eidx, lenRunes-sidx
|
||||||
}
|
}
|
||||||
return -1, -1
|
|
||||||
|
score, pos := calculateScore(caseSensitive, text, pattern, sidx, eidx, withPos)
|
||||||
|
return Result{sidx, eidx, score}, pos
|
||||||
|
}
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExactMatchNaive is a basic string searching algorithm that handles case
|
// ExactMatchNaive is a basic string searching algorithm that handles case
|
||||||
@@ -85,22 +563,28 @@ func FuzzyMatch(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
|
|||||||
// of strings.ToLower + strings.Index for typical fzf use cases where input
|
// of strings.ToLower + strings.Index for typical fzf use cases where input
|
||||||
// strings and patterns are not very long.
|
// strings and patterns are not very long.
|
||||||
//
|
//
|
||||||
// We might try to implement better algorithms in the future:
|
// Since 0.15.0, this function searches for the match with the highest
|
||||||
// http://en.wikipedia.org/wiki/String_searching_algorithm
|
// bonus point, instead of stopping immediately after finding the first match.
|
||||||
func ExactMatchNaive(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
|
// The solution is much cheaper since there is only one possible alignment of
|
||||||
|
// the pattern.
|
||||||
|
func ExactMatchNaive(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
if len(pattern) == 0 {
|
if len(pattern) == 0 {
|
||||||
return 0, 0
|
return Result{0, 0, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
numRunes := len(*runes)
|
lenRunes := text.Length()
|
||||||
plen := len(pattern)
|
lenPattern := len(pattern)
|
||||||
if numRunes < plen {
|
|
||||||
return -1, -1
|
if lenRunes < lenPattern {
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For simplicity, only look at the bonus at the first character position
|
||||||
pidx := 0
|
pidx := 0
|
||||||
for index := 0; index < numRunes; index++ {
|
bestPos, bonus, bestBonus := -1, int16(0), int16(-1)
|
||||||
char := (*runes)[index]
|
for index := 0; index < lenRunes; index++ {
|
||||||
|
index_ := indexAt(index, lenRunes, forward)
|
||||||
|
char := text.Get(index_)
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
if char >= 'A' && char <= 'Z' {
|
if char >= 'A' && char <= 'Z' {
|
||||||
char += 32
|
char += 32
|
||||||
@@ -108,54 +592,108 @@ func ExactMatchNaive(caseSensitive bool, runes *[]rune, pattern []rune) (int, in
|
|||||||
char = unicode.To(unicode.LowerCase, char)
|
char = unicode.To(unicode.LowerCase, char)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if pattern[pidx] == char {
|
pidx_ := indexAt(pidx, lenPattern, forward)
|
||||||
|
pchar := pattern[pidx_]
|
||||||
|
if pchar == char {
|
||||||
|
if pidx_ == 0 {
|
||||||
|
bonus = bonusAt(text, index_)
|
||||||
|
}
|
||||||
pidx++
|
pidx++
|
||||||
if pidx == plen {
|
if pidx == lenPattern {
|
||||||
return index - plen + 1, index + 1
|
if bonus > bestBonus {
|
||||||
|
bestPos, bestBonus = index, bonus
|
||||||
|
}
|
||||||
|
if bonus == bonusBoundary {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
index -= pidx - 1
|
||||||
|
pidx, bonus = 0, 0
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
index -= pidx
|
index -= pidx
|
||||||
pidx = 0
|
pidx, bonus = 0, 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return -1, -1
|
if bestPos >= 0 {
|
||||||
|
var sidx, eidx int
|
||||||
|
if forward {
|
||||||
|
sidx = bestPos - lenPattern + 1
|
||||||
|
eidx = bestPos + 1
|
||||||
|
} else {
|
||||||
|
sidx = lenRunes - (bestPos + 1)
|
||||||
|
eidx = lenRunes - (bestPos - lenPattern + 1)
|
||||||
|
}
|
||||||
|
score, _ := calculateScore(caseSensitive, text, pattern, sidx, eidx, false)
|
||||||
|
return Result{sidx, eidx, score}, nil
|
||||||
|
}
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrefixMatch performs prefix-match
|
// PrefixMatch performs prefix-match
|
||||||
func PrefixMatch(caseSensitive bool, runes *[]rune, pattern []rune) (int, int) {
|
func PrefixMatch(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
if len(*runes) < len(pattern) {
|
if len(pattern) == 0 {
|
||||||
return -1, -1
|
return Result{0, 0, 0}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if text.Length() < len(pattern) {
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for index, r := range pattern {
|
for index, r := range pattern {
|
||||||
char := (*runes)[index]
|
char := text.Get(index)
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
char = unicode.ToLower(char)
|
char = unicode.ToLower(char)
|
||||||
}
|
}
|
||||||
if char != r {
|
if char != r {
|
||||||
return -1, -1
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0, len(pattern)
|
lenPattern := len(pattern)
|
||||||
|
score, _ := calculateScore(caseSensitive, text, pattern, 0, lenPattern, false)
|
||||||
|
return Result{0, lenPattern, score}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SuffixMatch performs suffix-match
|
// SuffixMatch performs suffix-match
|
||||||
func SuffixMatch(caseSensitive bool, input *[]rune, pattern []rune) (int, int) {
|
func SuffixMatch(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
runes := util.TrimRight(input)
|
lenRunes := text.Length()
|
||||||
trimmedLen := len(runes)
|
trimmedLen := lenRunes - text.TrailingWhitespaces()
|
||||||
|
if len(pattern) == 0 {
|
||||||
|
return Result{trimmedLen, trimmedLen, 0}, nil
|
||||||
|
}
|
||||||
diff := trimmedLen - len(pattern)
|
diff := trimmedLen - len(pattern)
|
||||||
if diff < 0 {
|
if diff < 0 {
|
||||||
return -1, -1
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for index, r := range pattern {
|
for index, r := range pattern {
|
||||||
char := runes[index+diff]
|
char := text.Get(index + diff)
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
char = unicode.ToLower(char)
|
char = unicode.ToLower(char)
|
||||||
}
|
}
|
||||||
if char != r {
|
if char != r {
|
||||||
return -1, -1
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return trimmedLen - len(pattern), trimmedLen
|
lenPattern := len(pattern)
|
||||||
|
sidx := trimmedLen - lenPattern
|
||||||
|
eidx := trimmedLen
|
||||||
|
score, _ := calculateScore(caseSensitive, text, pattern, sidx, eidx, false)
|
||||||
|
return Result{sidx, eidx, score}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EqualMatch performs equal-match
|
||||||
|
func EqualMatch(caseSensitive bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||||
|
lenPattern := len(pattern)
|
||||||
|
if text.Length() != lenPattern {
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
|
}
|
||||||
|
runesStr := text.ToString()
|
||||||
|
if !caseSensitive {
|
||||||
|
runesStr = strings.ToLower(runesStr)
|
||||||
|
}
|
||||||
|
if runesStr == string(pattern) {
|
||||||
|
return Result{0, lenPattern, (scoreMatch+bonusBoundary)*lenPattern +
|
||||||
|
(bonusFirstCharMultiplier-1)*bonusBoundary}, nil
|
||||||
|
}
|
||||||
|
return Result{-1, -1, 0}, nil
|
||||||
}
|
}
|
||||||
|
@@ -1,52 +1,156 @@
|
|||||||
package algo
|
package algo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func assertMatch(t *testing.T, fun func(bool, *[]rune, []rune) (int, int), caseSensitive bool, input string, pattern string, sidx int, eidx int) {
|
func assertMatch(t *testing.T, fun Algo, caseSensitive, forward bool, input, pattern string, sidx int, eidx int, score int) {
|
||||||
if !caseSensitive {
|
if !caseSensitive {
|
||||||
pattern = strings.ToLower(pattern)
|
pattern = strings.ToLower(pattern)
|
||||||
}
|
}
|
||||||
runes := []rune(input)
|
res, pos := fun(caseSensitive, forward, util.RunesToChars([]rune(input)), []rune(pattern), true, nil)
|
||||||
s, e := fun(caseSensitive, &runes, []rune(pattern))
|
var start, end int
|
||||||
if s != sidx {
|
if pos == nil || len(*pos) == 0 {
|
||||||
t.Errorf("Invalid start index: %d (expected: %d, %s / %s)", s, sidx, input, pattern)
|
start = res.Start
|
||||||
|
end = res.End
|
||||||
|
} else {
|
||||||
|
sort.Ints(*pos)
|
||||||
|
start = (*pos)[0]
|
||||||
|
end = (*pos)[len(*pos)-1] + 1
|
||||||
}
|
}
|
||||||
if e != eidx {
|
if start != sidx {
|
||||||
t.Errorf("Invalid end index: %d (expected: %d, %s / %s)", e, eidx, input, pattern)
|
t.Errorf("Invalid start index: %d (expected: %d, %s / %s)", start, sidx, input, pattern)
|
||||||
|
}
|
||||||
|
if end != eidx {
|
||||||
|
t.Errorf("Invalid end index: %d (expected: %d, %s / %s)", end, eidx, input, pattern)
|
||||||
|
}
|
||||||
|
if res.Score != score {
|
||||||
|
t.Errorf("Invalid score: %d (expected: %d, %s / %s)", res.Score, score, input, pattern)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFuzzyMatch(t *testing.T) {
|
func TestFuzzyMatch(t *testing.T) {
|
||||||
assertMatch(t, FuzzyMatch, false, "fooBarbaz", "oBZ", 2, 9)
|
for _, fn := range []Algo{FuzzyMatchV1, FuzzyMatchV2} {
|
||||||
assertMatch(t, FuzzyMatch, true, "fooBarbaz", "oBZ", -1, -1)
|
for _, forward := range []bool{true, false} {
|
||||||
assertMatch(t, FuzzyMatch, true, "fooBarbaz", "oBz", 2, 9)
|
assertMatch(t, fn, false, forward, "fooBarbaz1", "oBZ", 2, 9,
|
||||||
assertMatch(t, FuzzyMatch, true, "fooBarbaz", "fooBarbazz", -1, -1)
|
scoreMatch*3+bonusCamel123+scoreGapStart+scoreGapExtention*3)
|
||||||
|
assertMatch(t, fn, false, forward, "foo bar baz", "fbb", 0, 9,
|
||||||
|
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+
|
||||||
|
bonusBoundary*2+2*scoreGapStart+4*scoreGapExtention)
|
||||||
|
assertMatch(t, fn, false, forward, "/AutomatorDocument.icns", "rdoc", 9, 13,
|
||||||
|
scoreMatch*4+bonusCamel123+bonusConsecutive*2)
|
||||||
|
assertMatch(t, fn, false, forward, "/man1/zshcompctl.1", "zshc", 6, 10,
|
||||||
|
scoreMatch*4+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary*3)
|
||||||
|
assertMatch(t, fn, false, forward, "/.oh-my-zsh/cache", "zshc", 8, 13,
|
||||||
|
scoreMatch*4+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary*3+scoreGapStart)
|
||||||
|
assertMatch(t, fn, false, forward, "ab0123 456", "12356", 3, 10,
|
||||||
|
scoreMatch*5+bonusConsecutive*3+scoreGapStart+scoreGapExtention)
|
||||||
|
assertMatch(t, fn, false, forward, "abc123 456", "12356", 3, 10,
|
||||||
|
scoreMatch*5+bonusCamel123*bonusFirstCharMultiplier+bonusCamel123*2+bonusConsecutive+scoreGapStart+scoreGapExtention)
|
||||||
|
assertMatch(t, fn, false, forward, "foo/bar/baz", "fbb", 0, 9,
|
||||||
|
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+
|
||||||
|
bonusBoundary*2+2*scoreGapStart+4*scoreGapExtention)
|
||||||
|
assertMatch(t, fn, false, forward, "fooBarBaz", "fbb", 0, 7,
|
||||||
|
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+
|
||||||
|
bonusCamel123*2+2*scoreGapStart+2*scoreGapExtention)
|
||||||
|
assertMatch(t, fn, false, forward, "foo barbaz", "fbb", 0, 8,
|
||||||
|
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary+
|
||||||
|
scoreGapStart*2+scoreGapExtention*3)
|
||||||
|
assertMatch(t, fn, false, forward, "fooBar Baz", "foob", 0, 4,
|
||||||
|
scoreMatch*4+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary*3)
|
||||||
|
assertMatch(t, fn, false, forward, "xFoo-Bar Baz", "foo-b", 1, 6,
|
||||||
|
scoreMatch*5+bonusCamel123*bonusFirstCharMultiplier+bonusCamel123*2+
|
||||||
|
bonusNonWord+bonusBoundary)
|
||||||
|
|
||||||
|
assertMatch(t, fn, true, forward, "fooBarbaz", "oBz", 2, 9,
|
||||||
|
scoreMatch*3+bonusCamel123+scoreGapStart+scoreGapExtention*3)
|
||||||
|
assertMatch(t, fn, true, forward, "Foo/Bar/Baz", "FBB", 0, 9,
|
||||||
|
scoreMatch*3+bonusBoundary*(bonusFirstCharMultiplier+2)+
|
||||||
|
scoreGapStart*2+scoreGapExtention*4)
|
||||||
|
assertMatch(t, fn, true, forward, "FooBarBaz", "FBB", 0, 7,
|
||||||
|
scoreMatch*3+bonusBoundary*bonusFirstCharMultiplier+bonusCamel123*2+
|
||||||
|
scoreGapStart*2+scoreGapExtention*2)
|
||||||
|
assertMatch(t, fn, true, forward, "FooBar Baz", "FooB", 0, 4,
|
||||||
|
scoreMatch*4+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary*2+
|
||||||
|
util.Max(bonusCamel123, bonusBoundary))
|
||||||
|
|
||||||
|
// Consecutive bonus updated
|
||||||
|
assertMatch(t, fn, true, forward, "foo-bar", "o-ba", 2, 6,
|
||||||
|
scoreMatch*4+bonusBoundary*3)
|
||||||
|
|
||||||
|
// Non-match
|
||||||
|
assertMatch(t, fn, true, forward, "fooBarbaz", "oBZ", -1, -1, 0)
|
||||||
|
assertMatch(t, fn, true, forward, "Foo Bar Baz", "fbb", -1, -1, 0)
|
||||||
|
assertMatch(t, fn, true, forward, "fooBarbaz", "fooBarbazz", -1, -1, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFuzzyMatchBackward(t *testing.T) {
|
||||||
|
assertMatch(t, FuzzyMatchV1, false, true, "foobar fb", "fb", 0, 4,
|
||||||
|
scoreMatch*2+bonusBoundary*bonusFirstCharMultiplier+
|
||||||
|
scoreGapStart+scoreGapExtention)
|
||||||
|
assertMatch(t, FuzzyMatchV1, false, false, "foobar fb", "fb", 7, 9,
|
||||||
|
scoreMatch*2+bonusBoundary*bonusFirstCharMultiplier+bonusBoundary)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExactMatchNaive(t *testing.T) {
|
func TestExactMatchNaive(t *testing.T) {
|
||||||
assertMatch(t, ExactMatchNaive, false, "fooBarbaz", "oBA", 2, 5)
|
for _, dir := range []bool{true, false} {
|
||||||
assertMatch(t, ExactMatchNaive, true, "fooBarbaz", "oBA", -1, -1)
|
assertMatch(t, ExactMatchNaive, true, dir, "fooBarbaz", "oBA", -1, -1, 0)
|
||||||
assertMatch(t, ExactMatchNaive, true, "fooBarbaz", "fooBarbazz", -1, -1)
|
assertMatch(t, ExactMatchNaive, true, dir, "fooBarbaz", "fooBarbazz", -1, -1, 0)
|
||||||
|
|
||||||
|
assertMatch(t, ExactMatchNaive, false, dir, "fooBarbaz", "oBA", 2, 5,
|
||||||
|
scoreMatch*3+bonusCamel123+bonusConsecutive)
|
||||||
|
assertMatch(t, ExactMatchNaive, false, dir, "/AutomatorDocument.icns", "rdoc", 9, 13,
|
||||||
|
scoreMatch*4+bonusCamel123+bonusConsecutive*2)
|
||||||
|
assertMatch(t, ExactMatchNaive, false, dir, "/man1/zshcompctl.1", "zshc", 6, 10,
|
||||||
|
scoreMatch*4+bonusBoundary*(bonusFirstCharMultiplier+3))
|
||||||
|
assertMatch(t, ExactMatchNaive, false, dir, "/.oh-my-zsh/cache", "zsh/c", 8, 13,
|
||||||
|
scoreMatch*5+bonusBoundary*(bonusFirstCharMultiplier+4))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExactMatchNaiveBackward(t *testing.T) {
|
||||||
|
assertMatch(t, ExactMatchNaive, false, true, "foobar foob", "oo", 1, 3,
|
||||||
|
scoreMatch*2+bonusConsecutive)
|
||||||
|
assertMatch(t, ExactMatchNaive, false, false, "foobar foob", "oo", 8, 10,
|
||||||
|
scoreMatch*2+bonusConsecutive)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPrefixMatch(t *testing.T) {
|
func TestPrefixMatch(t *testing.T) {
|
||||||
assertMatch(t, PrefixMatch, false, "fooBarbaz", "Foo", 0, 3)
|
score := (scoreMatch+bonusBoundary)*3 + bonusBoundary*(bonusFirstCharMultiplier-1)
|
||||||
assertMatch(t, PrefixMatch, true, "fooBarbaz", "Foo", -1, -1)
|
|
||||||
assertMatch(t, PrefixMatch, false, "fooBarbaz", "baz", -1, -1)
|
for _, dir := range []bool{true, false} {
|
||||||
|
assertMatch(t, PrefixMatch, true, dir, "fooBarbaz", "Foo", -1, -1, 0)
|
||||||
|
assertMatch(t, PrefixMatch, false, dir, "fooBarBaz", "baz", -1, -1, 0)
|
||||||
|
assertMatch(t, PrefixMatch, false, dir, "fooBarbaz", "Foo", 0, 3, score)
|
||||||
|
assertMatch(t, PrefixMatch, false, dir, "foOBarBaZ", "foo", 0, 3, score)
|
||||||
|
assertMatch(t, PrefixMatch, false, dir, "f-oBarbaz", "f-o", 0, 3, score)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSuffixMatch(t *testing.T) {
|
func TestSuffixMatch(t *testing.T) {
|
||||||
assertMatch(t, SuffixMatch, false, "fooBarbaz", "Foo", -1, -1)
|
for _, dir := range []bool{true, false} {
|
||||||
assertMatch(t, SuffixMatch, false, "fooBarbaz", "baz", 6, 9)
|
assertMatch(t, SuffixMatch, true, dir, "fooBarbaz", "Baz", -1, -1, 0)
|
||||||
assertMatch(t, SuffixMatch, true, "fooBarbaz", "Baz", -1, -1)
|
assertMatch(t, SuffixMatch, false, dir, "fooBarbaz", "Foo", -1, -1, 0)
|
||||||
|
|
||||||
|
assertMatch(t, SuffixMatch, false, dir, "fooBarbaz", "baz", 6, 9,
|
||||||
|
scoreMatch*3+bonusConsecutive*2)
|
||||||
|
assertMatch(t, SuffixMatch, false, dir, "fooBarBaZ", "baz", 6, 9,
|
||||||
|
(scoreMatch+bonusCamel123)*3+bonusCamel123*(bonusFirstCharMultiplier-1))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyPattern(t *testing.T) {
|
func TestEmptyPattern(t *testing.T) {
|
||||||
assertMatch(t, FuzzyMatch, true, "foobar", "", 0, 0)
|
for _, dir := range []bool{true, false} {
|
||||||
assertMatch(t, ExactMatchNaive, true, "foobar", "", 0, 0)
|
assertMatch(t, FuzzyMatchV1, true, dir, "foobar", "", 0, 0, 0)
|
||||||
assertMatch(t, PrefixMatch, true, "foobar", "", 0, 0)
|
assertMatch(t, FuzzyMatchV2, true, dir, "foobar", "", 0, 0, 0)
|
||||||
assertMatch(t, SuffixMatch, true, "foobar", "", 6, 6)
|
assertMatch(t, ExactMatchNaive, true, dir, "foobar", "", 0, 0, 0)
|
||||||
|
assertMatch(t, PrefixMatch, true, dir, "foobar", "", 0, 0, 0)
|
||||||
|
assertMatch(t, SuffixMatch, true, dir, "foobar", "", 6, 6, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
29
src/ansi.go
29
src/ansi.go
@@ -36,16 +36,22 @@ func init() {
|
|||||||
ansiRegex = regexp.MustCompile("\x1b\\[[0-9;]*[mK]")
|
ansiRegex = regexp.MustCompile("\x1b\\[[0-9;]*[mK]")
|
||||||
}
|
}
|
||||||
|
|
||||||
func extractColor(str *string) (*string, []ansiOffset) {
|
func extractColor(str string, state *ansiState, proc func(string, *ansiState) bool) (string, *[]ansiOffset, *ansiState) {
|
||||||
var offsets []ansiOffset
|
var offsets []ansiOffset
|
||||||
|
|
||||||
var output bytes.Buffer
|
var output bytes.Buffer
|
||||||
var state *ansiState
|
|
||||||
|
if state != nil {
|
||||||
|
offsets = append(offsets, ansiOffset{[2]int32{0, 0}, *state})
|
||||||
|
}
|
||||||
|
|
||||||
idx := 0
|
idx := 0
|
||||||
for _, offset := range ansiRegex.FindAllStringIndex(*str, -1) {
|
for _, offset := range ansiRegex.FindAllStringIndex(str, -1) {
|
||||||
output.WriteString((*str)[idx:offset[0]])
|
prev := str[idx:offset[0]]
|
||||||
newState := interpretCode((*str)[offset[0]:offset[1]], state)
|
output.WriteString(prev)
|
||||||
|
if proc != nil && !proc(prev, state) {
|
||||||
|
return "", nil, nil
|
||||||
|
}
|
||||||
|
newState := interpretCode(str[offset[0]:offset[1]], state)
|
||||||
|
|
||||||
if !newState.equals(state) {
|
if !newState.equals(state) {
|
||||||
if state != nil {
|
if state != nil {
|
||||||
@@ -67,7 +73,7 @@ func extractColor(str *string) (*string, []ansiOffset) {
|
|||||||
idx = offset[1]
|
idx = offset[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
rest := (*str)[idx:]
|
rest := str[idx:]
|
||||||
if len(rest) > 0 {
|
if len(rest) > 0 {
|
||||||
output.WriteString(rest)
|
output.WriteString(rest)
|
||||||
if state != nil {
|
if state != nil {
|
||||||
@@ -75,8 +81,13 @@ func extractColor(str *string) (*string, []ansiOffset) {
|
|||||||
(&offsets[len(offsets)-1]).offset[1] = int32(utf8.RuneCount(output.Bytes()))
|
(&offsets[len(offsets)-1]).offset[1] = int32(utf8.RuneCount(output.Bytes()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
outputStr := output.String()
|
if proc != nil {
|
||||||
return &outputStr, offsets
|
proc(rest, state)
|
||||||
|
}
|
||||||
|
if len(offsets) == 0 {
|
||||||
|
return output.String(), nil, state
|
||||||
|
}
|
||||||
|
return output.String(), &offsets, state
|
||||||
}
|
}
|
||||||
|
|
||||||
func interpretCode(ansiCode string, prevState *ansiState) *ansiState {
|
func interpretCode(ansiCode string, prevState *ansiState) *ansiState {
|
||||||
|
115
src/ansi_test.go
115
src/ansi_test.go
@@ -14,94 +14,139 @@ func TestExtractColor(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
src := "hello world"
|
src := "hello world"
|
||||||
|
var state *ansiState
|
||||||
clean := "\x1b[0m"
|
clean := "\x1b[0m"
|
||||||
check := func(assertion func(ansiOffsets []ansiOffset)) {
|
check := func(assertion func(ansiOffsets *[]ansiOffset, state *ansiState)) {
|
||||||
output, ansiOffsets := extractColor(&src)
|
output, ansiOffsets, newState := extractColor(src, state, nil)
|
||||||
if *output != "hello world" {
|
state = newState
|
||||||
|
if output != "hello world" {
|
||||||
t.Errorf("Invalid output: {}", output)
|
t.Errorf("Invalid output: {}", output)
|
||||||
}
|
}
|
||||||
fmt.Println(src, ansiOffsets, clean)
|
fmt.Println(src, ansiOffsets, clean)
|
||||||
assertion(ansiOffsets)
|
assertion(ansiOffsets, state)
|
||||||
}
|
}
|
||||||
|
|
||||||
check(func(offsets []ansiOffset) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) > 0 {
|
if offsets != nil {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
state = nil
|
||||||
src = "\x1b[0mhello world"
|
src = "\x1b[0mhello world"
|
||||||
check(func(offsets []ansiOffset) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) > 0 {
|
if offsets != nil {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
state = nil
|
||||||
src = "\x1b[1mhello world"
|
src = "\x1b[1mhello world"
|
||||||
check(func(offsets []ansiOffset) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 0, 11, -1, -1, true)
|
assert((*offsets)[0], 0, 11, -1, -1, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
state = nil
|
||||||
src = "\x1b[1mhello \x1b[mworld"
|
src = "\x1b[1mhello \x1b[mworld"
|
||||||
check(func(offsets []ansiOffset) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 0, 6, -1, -1, true)
|
assert((*offsets)[0], 0, 6, -1, -1, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
state = nil
|
||||||
src = "\x1b[1mhello \x1b[Kworld"
|
src = "\x1b[1mhello \x1b[Kworld"
|
||||||
check(func(offsets []ansiOffset) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 0, 11, -1, -1, true)
|
assert((*offsets)[0], 0, 11, -1, -1, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
state = nil
|
||||||
src = "hello \x1b[34;45;1mworld"
|
src = "hello \x1b[34;45;1mworld"
|
||||||
check(func(offsets []ansiOffset) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 6, 11, 4, 5, true)
|
assert((*offsets)[0], 6, 11, 4, 5, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
state = nil
|
||||||
src = "hello \x1b[34;45;1mwor\x1b[34;45;1mld"
|
src = "hello \x1b[34;45;1mwor\x1b[34;45;1mld"
|
||||||
check(func(offsets []ansiOffset) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 6, 11, 4, 5, true)
|
assert((*offsets)[0], 6, 11, 4, 5, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
state = nil
|
||||||
src = "hello \x1b[34;45;1mwor\x1b[0mld"
|
src = "hello \x1b[34;45;1mwor\x1b[0mld"
|
||||||
check(func(offsets []ansiOffset) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 1 {
|
if len(*offsets) != 1 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 6, 9, 4, 5, true)
|
assert((*offsets)[0], 6, 9, 4, 5, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
state = nil
|
||||||
src = "hello \x1b[34;48;5;233;1mwo\x1b[38;5;161mr\x1b[0ml\x1b[38;5;161md"
|
src = "hello \x1b[34;48;5;233;1mwo\x1b[38;5;161mr\x1b[0ml\x1b[38;5;161md"
|
||||||
check(func(offsets []ansiOffset) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 3 {
|
if len(*offsets) != 3 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 6, 8, 4, 233, true)
|
assert((*offsets)[0], 6, 8, 4, 233, true)
|
||||||
assert(offsets[1], 8, 9, 161, 233, true)
|
assert((*offsets)[1], 8, 9, 161, 233, true)
|
||||||
assert(offsets[2], 10, 11, 161, -1, false)
|
assert((*offsets)[2], 10, 11, 161, -1, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
// {38,48};5;{38,48}
|
// {38,48};5;{38,48}
|
||||||
|
state = nil
|
||||||
src = "hello \x1b[38;5;38;48;5;48;1mwor\x1b[38;5;48;48;5;38ml\x1b[0md"
|
src = "hello \x1b[38;5;38;48;5;48;1mwor\x1b[38;5;48;48;5;38ml\x1b[0md"
|
||||||
check(func(offsets []ansiOffset) {
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
if len(offsets) != 2 {
|
if len(*offsets) != 2 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
assert(offsets[0], 6, 9, 38, 48, true)
|
assert((*offsets)[0], 6, 9, 38, 48, true)
|
||||||
assert(offsets[1], 9, 10, 48, 38, true)
|
assert((*offsets)[1], 9, 10, 48, 38, true)
|
||||||
|
})
|
||||||
|
|
||||||
|
src = "hello \x1b[32;1mworld"
|
||||||
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
|
if len(*offsets) != 1 {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
if state.fg != 2 || state.bg != -1 || !state.bold {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
assert((*offsets)[0], 6, 11, 2, -1, true)
|
||||||
|
})
|
||||||
|
|
||||||
|
src = "hello world"
|
||||||
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
|
if len(*offsets) != 1 {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
if state.fg != 2 || state.bg != -1 || !state.bold {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
assert((*offsets)[0], 0, 11, 2, -1, true)
|
||||||
|
})
|
||||||
|
|
||||||
|
src = "hello \x1b[0;38;5;200;48;5;100mworld"
|
||||||
|
check(func(offsets *[]ansiOffset, state *ansiState) {
|
||||||
|
if len(*offsets) != 2 {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
if state.fg != 200 || state.bg != 100 || state.bold {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
assert((*offsets)[0], 0, 6, 2, -1, true)
|
||||||
|
assert((*offsets)[1], 6, 11, 200, 100, false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@@ -3,7 +3,7 @@ package fzf
|
|||||||
import "sync"
|
import "sync"
|
||||||
|
|
||||||
// queryCache associates strings to lists of items
|
// queryCache associates strings to lists of items
|
||||||
type queryCache map[string][]*Item
|
type queryCache map[string][]*Result
|
||||||
|
|
||||||
// ChunkCache associates Chunk and query string to lists of items
|
// ChunkCache associates Chunk and query string to lists of items
|
||||||
type ChunkCache struct {
|
type ChunkCache struct {
|
||||||
@@ -17,7 +17,7 @@ func NewChunkCache() ChunkCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add adds the list to the cache
|
// Add adds the list to the cache
|
||||||
func (cc *ChunkCache) Add(chunk *Chunk, key string, list []*Item) {
|
func (cc *ChunkCache) Add(chunk *Chunk, key string, list []*Result) {
|
||||||
if len(key) == 0 || !chunk.IsFull() || len(list) > queryCacheMax {
|
if len(key) == 0 || !chunk.IsFull() || len(list) > queryCacheMax {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -34,7 +34,7 @@ func (cc *ChunkCache) Add(chunk *Chunk, key string, list []*Item) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Find is called to lookup ChunkCache
|
// Find is called to lookup ChunkCache
|
||||||
func (cc *ChunkCache) Find(chunk *Chunk, key string) ([]*Item, bool) {
|
func (cc *ChunkCache) Find(chunk *Chunk, key string) ([]*Result, bool) {
|
||||||
if len(key) == 0 || !chunk.IsFull() {
|
if len(key) == 0 || !chunk.IsFull() {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
@@ -7,8 +7,8 @@ func TestChunkCache(t *testing.T) {
|
|||||||
chunk2 := make(Chunk, chunkSize)
|
chunk2 := make(Chunk, chunkSize)
|
||||||
chunk1p := &Chunk{}
|
chunk1p := &Chunk{}
|
||||||
chunk2p := &chunk2
|
chunk2p := &chunk2
|
||||||
items1 := []*Item{&Item{}}
|
items1 := []*Result{&Result{}}
|
||||||
items2 := []*Item{&Item{}, &Item{}}
|
items2 := []*Result{&Result{}, &Result{}}
|
||||||
cache.Add(chunk1p, "foo", items1)
|
cache.Add(chunk1p, "foo", items1)
|
||||||
cache.Add(chunk2p, "foo", items1)
|
cache.Add(chunk2p, "foo", items1)
|
||||||
cache.Add(chunk2p, "bar", items2)
|
cache.Add(chunk2p, "bar", items2)
|
||||||
|
@@ -7,7 +7,7 @@ type Chunk []*Item // >>> []Item
|
|||||||
|
|
||||||
// ItemBuilder is a closure type that builds Item object from a pointer to a
|
// ItemBuilder is a closure type that builds Item object from a pointer to a
|
||||||
// string and an integer
|
// string and an integer
|
||||||
type ItemBuilder func(*string, int) *Item
|
type ItemBuilder func([]byte, int) *Item
|
||||||
|
|
||||||
// ChunkList is a list of Chunks
|
// ChunkList is a list of Chunks
|
||||||
type ChunkList struct {
|
type ChunkList struct {
|
||||||
@@ -26,8 +26,13 @@ func NewChunkList(trans ItemBuilder) *ChunkList {
|
|||||||
trans: trans}
|
trans: trans}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Chunk) push(trans ItemBuilder, data *string, index int) {
|
func (c *Chunk) push(trans ItemBuilder, data []byte, index int) bool {
|
||||||
*c = append(*c, trans(data, index))
|
item := trans(data, index)
|
||||||
|
if item != nil {
|
||||||
|
*c = append(*c, item)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsFull returns true if the Chunk is full
|
// IsFull returns true if the Chunk is full
|
||||||
@@ -48,7 +53,7 @@ func CountItems(cs []*Chunk) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Push adds the item to the list
|
// Push adds the item to the list
|
||||||
func (cl *ChunkList) Push(data string) {
|
func (cl *ChunkList) Push(data []byte) bool {
|
||||||
cl.mutex.Lock()
|
cl.mutex.Lock()
|
||||||
defer cl.mutex.Unlock()
|
defer cl.mutex.Unlock()
|
||||||
|
|
||||||
@@ -57,8 +62,11 @@ func (cl *ChunkList) Push(data string) {
|
|||||||
cl.chunks = append(cl.chunks, &newChunk)
|
cl.chunks = append(cl.chunks, &newChunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
cl.lastChunk().push(cl.trans, &data, cl.count)
|
if cl.lastChunk().push(cl.trans, data, cl.count) {
|
||||||
cl.count++
|
cl.count++
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Snapshot returns immutable snapshot of the ChunkList
|
// Snapshot returns immutable snapshot of the ChunkList
|
||||||
|
@@ -3,11 +3,16 @@ package fzf
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestChunkList(t *testing.T) {
|
func TestChunkList(t *testing.T) {
|
||||||
cl := NewChunkList(func(s *string, i int) *Item {
|
// FIXME global
|
||||||
return &Item{text: s, rank: Rank{0, 0, uint32(i * 2)}}
|
sortCriteria = []criterion{byScore, byLength}
|
||||||
|
|
||||||
|
cl := NewChunkList(func(s []byte, i int) *Item {
|
||||||
|
return &Item{text: util.ToChars(s), index: int32(i * 2)}
|
||||||
})
|
})
|
||||||
|
|
||||||
// Snapshot
|
// Snapshot
|
||||||
@@ -17,8 +22,8 @@ func TestChunkList(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add some data
|
// Add some data
|
||||||
cl.Push("hello")
|
cl.Push([]byte("hello"))
|
||||||
cl.Push("world")
|
cl.Push([]byte("world"))
|
||||||
|
|
||||||
// Previously created snapshot should remain the same
|
// Previously created snapshot should remain the same
|
||||||
if len(snapshot) > 0 {
|
if len(snapshot) > 0 {
|
||||||
@@ -36,8 +41,8 @@ func TestChunkList(t *testing.T) {
|
|||||||
if len(*chunk1) != 2 {
|
if len(*chunk1) != 2 {
|
||||||
t.Error("Snapshot should contain only two items")
|
t.Error("Snapshot should contain only two items")
|
||||||
}
|
}
|
||||||
if *(*chunk1)[0].text != "hello" || (*chunk1)[0].rank.index != 0 ||
|
if (*chunk1)[0].text.ToString() != "hello" || (*chunk1)[0].index != 0 ||
|
||||||
*(*chunk1)[1].text != "world" || (*chunk1)[1].rank.index != 2 {
|
(*chunk1)[1].text.ToString() != "world" || (*chunk1)[1].index != 2 {
|
||||||
t.Error("Invalid data")
|
t.Error("Invalid data")
|
||||||
}
|
}
|
||||||
if chunk1.IsFull() {
|
if chunk1.IsFull() {
|
||||||
@@ -46,7 +51,7 @@ func TestChunkList(t *testing.T) {
|
|||||||
|
|
||||||
// Add more data
|
// Add more data
|
||||||
for i := 0; i < chunkSize*2; i++ {
|
for i := 0; i < chunkSize*2; i++ {
|
||||||
cl.Push(fmt.Sprintf("item %d", i))
|
cl.Push([]byte(fmt.Sprintf("item %d", i)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Previous snapshot should remain the same
|
// Previous snapshot should remain the same
|
||||||
@@ -64,8 +69,8 @@ func TestChunkList(t *testing.T) {
|
|||||||
t.Error("Unexpected number of items")
|
t.Error("Unexpected number of items")
|
||||||
}
|
}
|
||||||
|
|
||||||
cl.Push("hello")
|
cl.Push([]byte("hello"))
|
||||||
cl.Push("world")
|
cl.Push([]byte("world"))
|
||||||
|
|
||||||
lastChunkCount := len(*snapshot[len(snapshot)-1])
|
lastChunkCount := len(*snapshot[len(snapshot)-1])
|
||||||
if lastChunkCount != 2 {
|
if lastChunkCount != 2 {
|
||||||
|
@@ -8,30 +8,45 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Current version
|
// Current version
|
||||||
Version = "0.9.13"
|
version = "0.15.0"
|
||||||
|
|
||||||
// Core
|
// Core
|
||||||
coordinatorDelayMax time.Duration = 100 * time.Millisecond
|
coordinatorDelayMax time.Duration = 100 * time.Millisecond
|
||||||
coordinatorDelayStep time.Duration = 10 * time.Millisecond
|
coordinatorDelayStep time.Duration = 10 * time.Millisecond
|
||||||
|
|
||||||
// Reader
|
// Reader
|
||||||
defaultCommand = `find * -path '*/\.*' -prune -o -type f -print -o -type l -print 2> /dev/null`
|
defaultCommand = `find . -path '*/\.*' -prune -o -type f -print -o -type l -print 2> /dev/null | sed s/^..//`
|
||||||
|
readerBufferSize = 64 * 1024
|
||||||
|
|
||||||
// Terminal
|
// Terminal
|
||||||
initialDelay = 100 * time.Millisecond
|
initialDelay = 20 * time.Millisecond
|
||||||
|
initialDelayTac = 100 * time.Millisecond
|
||||||
spinnerDuration = 200 * time.Millisecond
|
spinnerDuration = 200 * time.Millisecond
|
||||||
|
maxPatternLength = 100
|
||||||
|
|
||||||
// Matcher
|
// Matcher
|
||||||
|
numPartitionsMultiplier = 8
|
||||||
|
maxPartitions = 32
|
||||||
progressMinDuration = 200 * time.Millisecond
|
progressMinDuration = 200 * time.Millisecond
|
||||||
|
|
||||||
// Capacity of each chunk
|
// Capacity of each chunk
|
||||||
chunkSize int = 100
|
chunkSize int = 100
|
||||||
|
|
||||||
|
// Pre-allocated memory slices to minimize GC
|
||||||
|
slab16Size int = 100 * 1024 // 200KB * 32 = 12.8MB
|
||||||
|
slab32Size int = 2048 // 8KB * 32 = 256KB
|
||||||
|
|
||||||
// Do not cache results of low selectivity queries
|
// Do not cache results of low selectivity queries
|
||||||
queryCacheMax int = chunkSize / 5
|
queryCacheMax int = chunkSize / 5
|
||||||
|
|
||||||
// Not to cache mergers with large lists
|
// Not to cache mergers with large lists
|
||||||
mergerCacheMax int = 100000
|
mergerCacheMax int = 100000
|
||||||
|
|
||||||
|
// History
|
||||||
|
defaultHistoryMax int = 1000
|
||||||
|
|
||||||
|
// Jump labels
|
||||||
|
defaultJumpLabels string = "asdfghjklqwertyuiopzxcvbnm1234567890ASDFGHJKLQWERTYUIOPZXCVBNM`~;:,<.>/?'\"!@#$%^&*()[{]}-_=+"
|
||||||
)
|
)
|
||||||
|
|
||||||
// fzf events
|
// fzf events
|
||||||
@@ -41,5 +56,13 @@ const (
|
|||||||
EvtSearchNew
|
EvtSearchNew
|
||||||
EvtSearchProgress
|
EvtSearchProgress
|
||||||
EvtSearchFin
|
EvtSearchFin
|
||||||
|
EvtHeader
|
||||||
EvtClose
|
EvtClose
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
exitOk = 0
|
||||||
|
exitNoMatch = 1
|
||||||
|
exitError = 2
|
||||||
|
exitInterrupt = 130
|
||||||
|
)
|
||||||
|
134
src/core.go
134
src/core.go
@@ -3,7 +3,7 @@ Package fzf implements fzf, a command-line fuzzy finder.
|
|||||||
|
|
||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright (c) 2015 Junegunn Choi
|
Copyright (c) 2016 Junegunn Choi
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
@@ -28,82 +28,93 @@ package fzf
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/util"
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func initProcs() {
|
|
||||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Reader -> EvtReadFin
|
Reader -> EvtReadFin
|
||||||
Reader -> EvtReadNew -> Matcher (restart)
|
Reader -> EvtReadNew -> Matcher (restart)
|
||||||
Terminal -> EvtSearchNew:bool -> Matcher (restart)
|
Terminal -> EvtSearchNew:bool -> Matcher (restart)
|
||||||
Matcher -> EvtSearchProgress -> Terminal (update info)
|
Matcher -> EvtSearchProgress -> Terminal (update info)
|
||||||
Matcher -> EvtSearchFin -> Terminal (update list)
|
Matcher -> EvtSearchFin -> Terminal (update list)
|
||||||
|
Matcher -> EvtHeader -> Terminal (update header)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Run starts fzf
|
// Run starts fzf
|
||||||
func Run(opts *Options) {
|
func Run(opts *Options) {
|
||||||
initProcs()
|
|
||||||
|
|
||||||
sort := opts.Sort > 0
|
sort := opts.Sort > 0
|
||||||
rankTiebreak = opts.Tiebreak
|
sortCriteria = opts.Criteria
|
||||||
|
|
||||||
if opts.Version {
|
if opts.Version {
|
||||||
fmt.Println(Version)
|
fmt.Println(version)
|
||||||
os.Exit(0)
|
os.Exit(exitOk)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Event channel
|
// Event channel
|
||||||
eventBox := util.NewEventBox()
|
eventBox := util.NewEventBox()
|
||||||
|
|
||||||
// ANSI code processor
|
// ANSI code processor
|
||||||
ansiProcessor := func(data *string) (*string, []ansiOffset) {
|
ansiProcessor := func(data []byte) (util.Chars, *[]ansiOffset) {
|
||||||
// By default, we do nothing
|
return util.ToChars(data), nil
|
||||||
return data, nil
|
}
|
||||||
|
ansiProcessorRunes := func(data []rune) (util.Chars, *[]ansiOffset) {
|
||||||
|
return util.RunesToChars(data), nil
|
||||||
}
|
}
|
||||||
if opts.Ansi {
|
if opts.Ansi {
|
||||||
if opts.Theme != nil {
|
if opts.Theme != nil {
|
||||||
ansiProcessor = func(data *string) (*string, []ansiOffset) {
|
var state *ansiState
|
||||||
return extractColor(data)
|
ansiProcessor = func(data []byte) (util.Chars, *[]ansiOffset) {
|
||||||
|
trimmed, offsets, newState := extractColor(string(data), state, nil)
|
||||||
|
state = newState
|
||||||
|
return util.RunesToChars([]rune(trimmed)), offsets
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// When color is disabled but ansi option is given,
|
// When color is disabled but ansi option is given,
|
||||||
// we simply strip out ANSI codes from the input
|
// we simply strip out ANSI codes from the input
|
||||||
ansiProcessor = func(data *string) (*string, []ansiOffset) {
|
ansiProcessor = func(data []byte) (util.Chars, *[]ansiOffset) {
|
||||||
trimmed, _ := extractColor(data)
|
trimmed, _, _ := extractColor(string(data), nil, nil)
|
||||||
return trimmed, nil
|
return util.RunesToChars([]rune(trimmed)), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ansiProcessorRunes = func(data []rune) (util.Chars, *[]ansiOffset) {
|
||||||
|
return ansiProcessor([]byte(string(data)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chunk list
|
// Chunk list
|
||||||
var chunkList *ChunkList
|
var chunkList *ChunkList
|
||||||
|
header := make([]string, 0, opts.HeaderLines)
|
||||||
if len(opts.WithNth) == 0 {
|
if len(opts.WithNth) == 0 {
|
||||||
chunkList = NewChunkList(func(data *string, index int) *Item {
|
chunkList = NewChunkList(func(data []byte, index int) *Item {
|
||||||
data, colors := ansiProcessor(data)
|
if len(header) < opts.HeaderLines {
|
||||||
|
header = append(header, string(data))
|
||||||
|
eventBox.Set(EvtHeader, header)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
chars, colors := ansiProcessor(data)
|
||||||
return &Item{
|
return &Item{
|
||||||
text: data,
|
index: int32(index),
|
||||||
index: uint32(index),
|
text: chars,
|
||||||
colors: colors,
|
colors: colors}
|
||||||
rank: Rank{0, 0, uint32(index)}}
|
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
chunkList = NewChunkList(func(data *string, index int) *Item {
|
chunkList = NewChunkList(func(data []byte, index int) *Item {
|
||||||
tokens := Tokenize(data, opts.Delimiter)
|
tokens := Tokenize(util.ToChars(data), opts.Delimiter)
|
||||||
trans := Transform(tokens, opts.WithNth)
|
trans := Transform(tokens, opts.WithNth)
|
||||||
|
if len(header) < opts.HeaderLines {
|
||||||
|
header = append(header, string(joinTokens(trans)))
|
||||||
|
eventBox.Set(EvtHeader, header)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
textRunes := joinTokens(trans)
|
||||||
item := Item{
|
item := Item{
|
||||||
text: joinTokens(trans),
|
index: int32(index),
|
||||||
origText: data,
|
origText: &data,
|
||||||
index: uint32(index),
|
colors: nil}
|
||||||
colors: nil,
|
|
||||||
rank: Rank{0, 0, uint32(index)}}
|
|
||||||
|
|
||||||
trimmed, colors := ansiProcessor(item.text)
|
trimmed, colors := ansiProcessorRunes(textRunes)
|
||||||
item.text = trimmed
|
item.text = trimmed
|
||||||
item.colors = colors
|
item.colors = colors
|
||||||
return &item
|
return &item
|
||||||
@@ -113,33 +124,52 @@ func Run(opts *Options) {
|
|||||||
// Reader
|
// Reader
|
||||||
streamingFilter := opts.Filter != nil && !sort && !opts.Tac && !opts.Sync
|
streamingFilter := opts.Filter != nil && !sort && !opts.Tac && !opts.Sync
|
||||||
if !streamingFilter {
|
if !streamingFilter {
|
||||||
reader := Reader{func(str string) { chunkList.Push(str) }, eventBox}
|
reader := Reader{func(data []byte) bool {
|
||||||
|
return chunkList.Push(data)
|
||||||
|
}, eventBox, opts.ReadZero}
|
||||||
go reader.ReadSource()
|
go reader.ReadSource()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Matcher
|
// Matcher
|
||||||
|
forward := true
|
||||||
|
for _, cri := range opts.Criteria[1:] {
|
||||||
|
if cri == byEnd {
|
||||||
|
forward = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if cri == byBegin {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
patternBuilder := func(runes []rune) *Pattern {
|
patternBuilder := func(runes []rune) *Pattern {
|
||||||
return BuildPattern(
|
return BuildPattern(
|
||||||
opts.Mode, opts.Case, opts.Nth, opts.Delimiter, runes)
|
opts.Fuzzy, opts.FuzzyAlgo, opts.Extended, opts.Case, forward,
|
||||||
|
opts.Filter == nil, opts.Nth, opts.Delimiter, runes)
|
||||||
}
|
}
|
||||||
matcher := NewMatcher(patternBuilder, sort, opts.Tac, eventBox)
|
matcher := NewMatcher(patternBuilder, sort, opts.Tac, eventBox)
|
||||||
|
|
||||||
// Filtering mode
|
// Filtering mode
|
||||||
if opts.Filter != nil {
|
if opts.Filter != nil {
|
||||||
if opts.PrintQuery {
|
if opts.PrintQuery {
|
||||||
fmt.Println(*opts.Filter)
|
opts.Printer(*opts.Filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
pattern := patternBuilder([]rune(*opts.Filter))
|
pattern := patternBuilder([]rune(*opts.Filter))
|
||||||
|
|
||||||
|
found := false
|
||||||
if streamingFilter {
|
if streamingFilter {
|
||||||
|
slab := util.MakeSlab(slab16Size, slab32Size)
|
||||||
reader := Reader{
|
reader := Reader{
|
||||||
func(str string) {
|
func(runes []byte) bool {
|
||||||
item := chunkList.trans(&str, 0)
|
item := chunkList.trans(runes, 0)
|
||||||
if pattern.MatchItem(item) {
|
if item != nil {
|
||||||
fmt.Println(*item.text)
|
if result, _, _ := pattern.MatchItem(item, false, slab); result != nil {
|
||||||
|
opts.Printer(item.text.ToString())
|
||||||
|
found = true
|
||||||
}
|
}
|
||||||
}, eventBox}
|
}
|
||||||
|
return false
|
||||||
|
}, eventBox, opts.ReadZero}
|
||||||
reader.ReadSource()
|
reader.ReadSource()
|
||||||
} else {
|
} else {
|
||||||
eventBox.Unwatch(EvtReadNew)
|
eventBox.Unwatch(EvtReadNew)
|
||||||
@@ -150,10 +180,14 @@ func Run(opts *Options) {
|
|||||||
chunks: snapshot,
|
chunks: snapshot,
|
||||||
pattern: pattern})
|
pattern: pattern})
|
||||||
for i := 0; i < merger.Length(); i++ {
|
for i := 0; i < merger.Length(); i++ {
|
||||||
fmt.Println(merger.Get(i).AsString())
|
opts.Printer(merger.Get(i).item.AsString(opts.Ansi))
|
||||||
|
found = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
os.Exit(0)
|
if found {
|
||||||
|
os.Exit(exitOk)
|
||||||
|
}
|
||||||
|
os.Exit(exitNoMatch)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Synchronous search
|
// Synchronous search
|
||||||
@@ -206,6 +240,9 @@ func Run(opts *Options) {
|
|||||||
terminal.UpdateProgress(val)
|
terminal.UpdateProgress(val)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case EvtHeader:
|
||||||
|
terminal.UpdateHeader(value.([]string))
|
||||||
|
|
||||||
case EvtSearchFin:
|
case EvtSearchFin:
|
||||||
switch val := value.(type) {
|
switch val := value.(type) {
|
||||||
case *Merger:
|
case *Merger:
|
||||||
@@ -217,15 +254,18 @@ func Run(opts *Options) {
|
|||||||
} else if val.final {
|
} else if val.final {
|
||||||
if opts.Exit0 && count == 0 || opts.Select1 && count == 1 {
|
if opts.Exit0 && count == 0 || opts.Select1 && count == 1 {
|
||||||
if opts.PrintQuery {
|
if opts.PrintQuery {
|
||||||
fmt.Println(opts.Query)
|
opts.Printer(opts.Query)
|
||||||
}
|
}
|
||||||
if len(opts.Expect) > 0 {
|
if len(opts.Expect) > 0 {
|
||||||
fmt.Println()
|
opts.Printer("")
|
||||||
}
|
}
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
fmt.Println(val.Get(i).AsString())
|
opts.Printer(val.Get(i).item.AsString(opts.Ansi))
|
||||||
}
|
}
|
||||||
os.Exit(0)
|
if count > 0 {
|
||||||
|
os.Exit(exitOk)
|
||||||
|
}
|
||||||
|
os.Exit(exitNoMatch)
|
||||||
}
|
}
|
||||||
deferred = false
|
deferred = false
|
||||||
terminal.startChan <- true
|
terminal.startChan <- true
|
||||||
|
@@ -3,18 +3,21 @@ package curses
|
|||||||
/*
|
/*
|
||||||
#include <ncurses.h>
|
#include <ncurses.h>
|
||||||
#include <locale.h>
|
#include <locale.h>
|
||||||
#cgo LDFLAGS: -lncurses
|
#cgo !static LDFLAGS: -lncurses
|
||||||
void swapOutput() {
|
#cgo static LDFLAGS: -l:libncursesw.a -l:libtinfo.a -l:libgpm.a -ldl
|
||||||
FILE* temp = stdout;
|
#cgo android static LDFLAGS: -l:libncurses.a -fPIE -march=armv7-a -mfpu=neon -mhard-float -Wl,--no-warn-mismatch
|
||||||
stdout = stderr;
|
|
||||||
stderr = temp;
|
SCREEN *c_newterm () {
|
||||||
|
return newterm(NULL, stderr, stdin);
|
||||||
}
|
}
|
||||||
|
|
||||||
*/
|
*/
|
||||||
import "C"
|
import "C"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
@@ -54,8 +57,10 @@ const (
|
|||||||
|
|
||||||
Invalid
|
Invalid
|
||||||
Mouse
|
Mouse
|
||||||
|
DoubleClick
|
||||||
|
|
||||||
BTab
|
BTab
|
||||||
|
BSpace
|
||||||
|
|
||||||
Del
|
Del
|
||||||
PgUp
|
PgUp
|
||||||
@@ -75,7 +80,16 @@ const (
|
|||||||
F2
|
F2
|
||||||
F3
|
F3
|
||||||
F4
|
F4
|
||||||
|
F5
|
||||||
|
F6
|
||||||
|
F7
|
||||||
|
F8
|
||||||
|
F9
|
||||||
|
F10
|
||||||
|
|
||||||
|
AltEnter
|
||||||
|
AltSpace
|
||||||
|
AltSlash
|
||||||
AltBS
|
AltBS
|
||||||
AltA
|
AltA
|
||||||
AltB
|
AltB
|
||||||
@@ -98,11 +112,15 @@ const (
|
|||||||
ColInfo
|
ColInfo
|
||||||
ColCursor
|
ColCursor
|
||||||
ColSelected
|
ColSelected
|
||||||
ColUser
|
ColHeader
|
||||||
|
ColBorder
|
||||||
|
ColUser // Should be the last entry
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
doubleClickDuration = 500 * time.Millisecond
|
doubleClickDuration = 500 * time.Millisecond
|
||||||
|
colDefault = -1
|
||||||
|
colUndefined = -2
|
||||||
)
|
)
|
||||||
|
|
||||||
type ColorTheme struct {
|
type ColorTheme struct {
|
||||||
@@ -118,6 +136,8 @@ type ColorTheme struct {
|
|||||||
Info int16
|
Info int16
|
||||||
Cursor int16
|
Cursor int16
|
||||||
Selected int16
|
Selected int16
|
||||||
|
Header int16
|
||||||
|
Border int16
|
||||||
}
|
}
|
||||||
|
|
||||||
type Event struct {
|
type Event struct {
|
||||||
@@ -142,6 +162,7 @@ var (
|
|||||||
_colorMap map[int]int
|
_colorMap map[int]int
|
||||||
_prevDownTime time.Time
|
_prevDownTime time.Time
|
||||||
_clickY []int
|
_clickY []int
|
||||||
|
_screen *C.SCREEN
|
||||||
Default16 *ColorTheme
|
Default16 *ColorTheme
|
||||||
Dark256 *ColorTheme
|
Dark256 *ColorTheme
|
||||||
Light256 *ColorTheme
|
Light256 *ColorTheme
|
||||||
@@ -151,6 +172,49 @@ var (
|
|||||||
DarkBG int
|
DarkBG int
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Window struct {
|
||||||
|
win *C.WINDOW
|
||||||
|
Top int
|
||||||
|
Left int
|
||||||
|
Width int
|
||||||
|
Height int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWindow(top int, left int, width int, height int, border bool) *Window {
|
||||||
|
win := C.newwin(C.int(height), C.int(width), C.int(top), C.int(left))
|
||||||
|
if border {
|
||||||
|
attr := _color(ColBorder, false)
|
||||||
|
C.wattron(win, attr)
|
||||||
|
C.box(win, 0, 0)
|
||||||
|
C.wattroff(win, attr)
|
||||||
|
}
|
||||||
|
return &Window{
|
||||||
|
win: win,
|
||||||
|
Top: top,
|
||||||
|
Left: left,
|
||||||
|
Width: width,
|
||||||
|
Height: height,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func EmptyTheme() *ColorTheme {
|
||||||
|
return &ColorTheme{
|
||||||
|
UseDefault: true,
|
||||||
|
Fg: colUndefined,
|
||||||
|
Bg: colUndefined,
|
||||||
|
DarkBg: colUndefined,
|
||||||
|
Prompt: colUndefined,
|
||||||
|
Match: colUndefined,
|
||||||
|
Current: colUndefined,
|
||||||
|
CurrentMatch: colUndefined,
|
||||||
|
Spinner: colUndefined,
|
||||||
|
Info: colUndefined,
|
||||||
|
Cursor: colUndefined,
|
||||||
|
Selected: colUndefined,
|
||||||
|
Header: colUndefined,
|
||||||
|
Border: colUndefined}
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
_prevDownTime = time.Unix(0, 0)
|
_prevDownTime = time.Unix(0, 0)
|
||||||
_clickY = []int{}
|
_clickY = []int{}
|
||||||
@@ -167,7 +231,9 @@ func init() {
|
|||||||
Spinner: C.COLOR_GREEN,
|
Spinner: C.COLOR_GREEN,
|
||||||
Info: C.COLOR_WHITE,
|
Info: C.COLOR_WHITE,
|
||||||
Cursor: C.COLOR_RED,
|
Cursor: C.COLOR_RED,
|
||||||
Selected: C.COLOR_MAGENTA}
|
Selected: C.COLOR_MAGENTA,
|
||||||
|
Header: C.COLOR_CYAN,
|
||||||
|
Border: C.COLOR_BLACK}
|
||||||
Dark256 = &ColorTheme{
|
Dark256 = &ColorTheme{
|
||||||
UseDefault: true,
|
UseDefault: true,
|
||||||
Fg: 15,
|
Fg: 15,
|
||||||
@@ -180,7 +246,9 @@ func init() {
|
|||||||
Spinner: 148,
|
Spinner: 148,
|
||||||
Info: 144,
|
Info: 144,
|
||||||
Cursor: 161,
|
Cursor: 161,
|
||||||
Selected: 168}
|
Selected: 168,
|
||||||
|
Header: 109,
|
||||||
|
Border: 59}
|
||||||
Light256 = &ColorTheme{
|
Light256 = &ColorTheme{
|
||||||
UseDefault: true,
|
UseDefault: true,
|
||||||
Fg: 15,
|
Fg: 15,
|
||||||
@@ -193,7 +261,9 @@ func init() {
|
|||||||
Spinner: 65,
|
Spinner: 65,
|
||||||
Info: 101,
|
Info: 101,
|
||||||
Cursor: 161,
|
Cursor: 161,
|
||||||
Selected: 168}
|
Selected: 168,
|
||||||
|
Header: 31,
|
||||||
|
Border: 145}
|
||||||
}
|
}
|
||||||
|
|
||||||
func attrColored(pair int, bold bool) C.int {
|
func attrColored(pair int, bold bool) C.int {
|
||||||
@@ -254,69 +324,79 @@ func Init(theme *ColorTheme, black bool, mouse bool) {
|
|||||||
// syscall.Dup2(int(in.Fd()), int(os.Stdin.Fd()))
|
// syscall.Dup2(int(in.Fd()), int(os.Stdin.Fd()))
|
||||||
}
|
}
|
||||||
|
|
||||||
C.swapOutput()
|
|
||||||
|
|
||||||
C.setlocale(C.LC_ALL, C.CString(""))
|
C.setlocale(C.LC_ALL, C.CString(""))
|
||||||
C.initscr()
|
_screen = C.c_newterm()
|
||||||
|
if _screen == nil {
|
||||||
|
fmt.Println("Invalid $TERM: " + os.Getenv("TERM"))
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
C.set_term(_screen)
|
||||||
if mouse {
|
if mouse {
|
||||||
C.mousemask(C.ALL_MOUSE_EVENTS, nil)
|
C.mousemask(C.ALL_MOUSE_EVENTS, nil)
|
||||||
}
|
}
|
||||||
C.cbreak()
|
|
||||||
C.noecho()
|
C.noecho()
|
||||||
C.raw() // stty dsusp undef
|
C.raw() // stty dsusp undef
|
||||||
|
|
||||||
intChan := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(intChan, os.Interrupt, os.Kill)
|
|
||||||
go func() {
|
|
||||||
<-intChan
|
|
||||||
Close()
|
|
||||||
os.Exit(1)
|
|
||||||
}()
|
|
||||||
|
|
||||||
if theme != nil {
|
if theme != nil {
|
||||||
C.start_color()
|
C.start_color()
|
||||||
initPairs(theme, black)
|
var baseTheme *ColorTheme
|
||||||
|
if C.tigetnum(C.CString("colors")) >= 256 {
|
||||||
|
baseTheme = Dark256
|
||||||
|
} else {
|
||||||
|
baseTheme = Default16
|
||||||
|
}
|
||||||
|
initPairs(baseTheme, theme, black)
|
||||||
_color = attrColored
|
_color = attrColored
|
||||||
} else {
|
} else {
|
||||||
_color = attrMono
|
_color = attrMono
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func initPairs(theme *ColorTheme, black bool) {
|
func override(a int16, b int16) C.short {
|
||||||
fg := C.short(theme.Fg)
|
if b == colUndefined {
|
||||||
bg := C.short(theme.Bg)
|
return C.short(a)
|
||||||
|
}
|
||||||
|
return C.short(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func initPairs(baseTheme *ColorTheme, theme *ColorTheme, black bool) {
|
||||||
|
fg := override(baseTheme.Fg, theme.Fg)
|
||||||
|
bg := override(baseTheme.Bg, theme.Bg)
|
||||||
if black {
|
if black {
|
||||||
bg = C.COLOR_BLACK
|
bg = C.COLOR_BLACK
|
||||||
} else if theme.UseDefault {
|
} else if theme.UseDefault {
|
||||||
fg = -1
|
fg = colDefault
|
||||||
bg = -1
|
bg = colDefault
|
||||||
C.use_default_colors()
|
C.use_default_colors()
|
||||||
}
|
}
|
||||||
if theme.UseDefault {
|
if theme.UseDefault {
|
||||||
FG = -1
|
FG = colDefault
|
||||||
BG = -1
|
BG = colDefault
|
||||||
} else {
|
} else {
|
||||||
FG = int(fg)
|
FG = int(fg)
|
||||||
BG = int(bg)
|
BG = int(bg)
|
||||||
C.assume_default_colors(C.int(theme.Fg), C.int(bg))
|
C.assume_default_colors(C.int(override(baseTheme.Fg, theme.Fg)), C.int(bg))
|
||||||
}
|
}
|
||||||
|
|
||||||
CurrentFG = int(theme.Current)
|
currentFG := override(baseTheme.Current, theme.Current)
|
||||||
DarkBG = int(theme.DarkBg)
|
darkBG := override(baseTheme.DarkBg, theme.DarkBg)
|
||||||
darkBG := C.short(DarkBG)
|
CurrentFG = int(currentFG)
|
||||||
C.init_pair(ColPrompt, C.short(theme.Prompt), bg)
|
DarkBG = int(darkBG)
|
||||||
C.init_pair(ColMatch, C.short(theme.Match), bg)
|
C.init_pair(ColPrompt, override(baseTheme.Prompt, theme.Prompt), bg)
|
||||||
C.init_pair(ColCurrent, C.short(theme.Current), darkBG)
|
C.init_pair(ColMatch, override(baseTheme.Match, theme.Match), bg)
|
||||||
C.init_pair(ColCurrentMatch, C.short(theme.CurrentMatch), darkBG)
|
C.init_pair(ColCurrent, currentFG, darkBG)
|
||||||
C.init_pair(ColSpinner, C.short(theme.Spinner), bg)
|
C.init_pair(ColCurrentMatch, override(baseTheme.CurrentMatch, theme.CurrentMatch), darkBG)
|
||||||
C.init_pair(ColInfo, C.short(theme.Info), bg)
|
C.init_pair(ColSpinner, override(baseTheme.Spinner, theme.Spinner), bg)
|
||||||
C.init_pair(ColCursor, C.short(theme.Cursor), darkBG)
|
C.init_pair(ColInfo, override(baseTheme.Info, theme.Info), bg)
|
||||||
C.init_pair(ColSelected, C.short(theme.Selected), darkBG)
|
C.init_pair(ColCursor, override(baseTheme.Cursor, theme.Cursor), darkBG)
|
||||||
|
C.init_pair(ColSelected, override(baseTheme.Selected, theme.Selected), darkBG)
|
||||||
|
C.init_pair(ColHeader, override(baseTheme.Header, theme.Header), bg)
|
||||||
|
C.init_pair(ColBorder, override(baseTheme.Border, theme.Border), bg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Close() {
|
func Close() {
|
||||||
C.endwin()
|
C.endwin()
|
||||||
C.swapOutput()
|
C.delscreen(_screen)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetBytes() []byte {
|
func GetBytes() []byte {
|
||||||
@@ -367,7 +447,9 @@ func mouseSequence(sz *int) Event {
|
|||||||
97, 101, 105, 113: // scroll-down / shift / cmd / ctrl
|
97, 101, 105, 113: // scroll-down / shift / cmd / ctrl
|
||||||
mod := _buf[3] >= 100
|
mod := _buf[3] >= 100
|
||||||
s := 1 - int(_buf[3]%2)*2
|
s := 1 - int(_buf[3]%2)*2
|
||||||
return Event{Mouse, 0, &MouseEvent{0, 0, s, false, false, mod}}
|
x := int(_buf[4] - 33)
|
||||||
|
y := int(_buf[5] - 33)
|
||||||
|
return Event{Mouse, 0, &MouseEvent{y, x, s, false, false, mod}}
|
||||||
}
|
}
|
||||||
return Event{Invalid, 0, nil}
|
return Event{Invalid, 0, nil}
|
||||||
}
|
}
|
||||||
@@ -378,6 +460,12 @@ func escSequence(sz *int) Event {
|
|||||||
}
|
}
|
||||||
*sz = 2
|
*sz = 2
|
||||||
switch _buf[1] {
|
switch _buf[1] {
|
||||||
|
case 13:
|
||||||
|
return Event{AltEnter, 0, nil}
|
||||||
|
case 32:
|
||||||
|
return Event{AltSpace, 0, nil}
|
||||||
|
case 47:
|
||||||
|
return Event{AltSlash, 0, nil}
|
||||||
case 98:
|
case 98:
|
||||||
return Event{AltB, 0, nil}
|
return Event{AltB, 0, nil}
|
||||||
case 100:
|
case 100:
|
||||||
@@ -423,6 +511,20 @@ func escSequence(sz *int) Event {
|
|||||||
*sz = 4
|
*sz = 4
|
||||||
switch _buf[2] {
|
switch _buf[2] {
|
||||||
case 50:
|
case 50:
|
||||||
|
if len(_buf) == 5 && _buf[4] == 126 {
|
||||||
|
*sz = 5
|
||||||
|
switch _buf[3] {
|
||||||
|
case 48:
|
||||||
|
return Event{F9, 0, nil}
|
||||||
|
case 49:
|
||||||
|
return Event{F10, 0, nil}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Bracketed paste mode \e[200~ / \e[201
|
||||||
|
if _buf[3] == 48 && (_buf[4] == 48 || _buf[4] == 49) && _buf[5] == 126 {
|
||||||
|
*sz = 6
|
||||||
|
return Event{Invalid, 0, nil}
|
||||||
|
}
|
||||||
return Event{Invalid, 0, nil} // INS
|
return Event{Invalid, 0, nil} // INS
|
||||||
case 51:
|
case 51:
|
||||||
return Event{Del, 0, nil}
|
return Event{Del, 0, nil}
|
||||||
@@ -436,6 +538,21 @@ func escSequence(sz *int) Event {
|
|||||||
switch _buf[3] {
|
switch _buf[3] {
|
||||||
case 126:
|
case 126:
|
||||||
return Event{Home, 0, nil}
|
return Event{Home, 0, nil}
|
||||||
|
case 53, 55, 56, 57:
|
||||||
|
if len(_buf) == 5 && _buf[4] == 126 {
|
||||||
|
*sz = 5
|
||||||
|
switch _buf[3] {
|
||||||
|
case 53:
|
||||||
|
return Event{F5, 0, nil}
|
||||||
|
case 55:
|
||||||
|
return Event{F6, 0, nil}
|
||||||
|
case 56:
|
||||||
|
return Event{F7, 0, nil}
|
||||||
|
case 57:
|
||||||
|
return Event{F8, 0, nil}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Event{Invalid, 0, nil}
|
||||||
case 59:
|
case 59:
|
||||||
if len(_buf) != 6 {
|
if len(_buf) != 6 {
|
||||||
return Event{Invalid, 0, nil}
|
return Event{Invalid, 0, nil}
|
||||||
@@ -481,10 +598,14 @@ func GetChar() Event {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
switch _buf[0] {
|
switch _buf[0] {
|
||||||
case CtrlC, CtrlG, CtrlQ:
|
case CtrlC:
|
||||||
return Event{CtrlC, 0, nil}
|
return Event{CtrlC, 0, nil}
|
||||||
|
case CtrlG:
|
||||||
|
return Event{CtrlG, 0, nil}
|
||||||
|
case CtrlQ:
|
||||||
|
return Event{CtrlQ, 0, nil}
|
||||||
case 127:
|
case 127:
|
||||||
return Event{CtrlH, 0, nil}
|
return Event{BSpace, 0, nil}
|
||||||
case ESC:
|
case ESC:
|
||||||
return escSequence(&sz)
|
return escSequence(&sz)
|
||||||
}
|
}
|
||||||
@@ -501,24 +622,37 @@ func GetChar() Event {
|
|||||||
return Event{Rune, r, nil}
|
return Event{Rune, r, nil}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Move(y int, x int) {
|
func (w *Window) Close() {
|
||||||
C.move(C.int(y), C.int(x))
|
C.delwin(w.win)
|
||||||
}
|
}
|
||||||
|
|
||||||
func MoveAndClear(y int, x int) {
|
func (w *Window) Enclose(y int, x int) bool {
|
||||||
Move(y, x)
|
return bool(C.wenclose(w.win, C.int(y), C.int(x)))
|
||||||
C.clrtoeol()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Print(text string) {
|
func (w *Window) Move(y int, x int) {
|
||||||
C.addstr(C.CString(text))
|
C.wmove(w.win, C.int(y), C.int(x))
|
||||||
}
|
}
|
||||||
|
|
||||||
func CPrint(pair int, bold bool, text string) {
|
func (w *Window) MoveAndClear(y int, x int) {
|
||||||
|
w.Move(y, x)
|
||||||
|
C.wclrtoeol(w.win)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Window) Print(text string) {
|
||||||
|
C.waddstr(w.win, C.CString(strings.Map(func(r rune) rune {
|
||||||
|
if r < 32 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}, text)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Window) CPrint(pair int, bold bool, text string) {
|
||||||
attr := _color(pair, bold)
|
attr := _color(pair, bold)
|
||||||
C.attron(attr)
|
C.wattron(w.win, attr)
|
||||||
Print(text)
|
w.Print(text)
|
||||||
C.attroff(attr)
|
C.wattroff(w.win, attr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Clear() {
|
func Clear() {
|
||||||
@@ -533,6 +667,30 @@ func Refresh() {
|
|||||||
C.refresh()
|
C.refresh()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *Window) Erase() {
|
||||||
|
C.werase(w.win)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Window) Fill(str string) bool {
|
||||||
|
return C.waddstr(w.win, C.CString(str)) == C.OK
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Window) CFill(str string, fg int, bg int, bold bool) bool {
|
||||||
|
attr := _color(PairFor(fg, bg), bold)
|
||||||
|
C.wattron(w.win, attr)
|
||||||
|
ret := w.Fill(str)
|
||||||
|
C.wattroff(w.win, attr)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Window) Refresh() {
|
||||||
|
C.wnoutrefresh(w.win)
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoUpdate() {
|
||||||
|
C.doupdate()
|
||||||
|
}
|
||||||
|
|
||||||
func PairFor(fg int, bg int) int {
|
func PairFor(fg int, bg int) int {
|
||||||
key := (fg << 8) + bg
|
key := (fg << 8) + bg
|
||||||
if found, prs := _colorMap[key]; prs {
|
if found, prs := _colorMap[key]; prs {
|
||||||
|
96
src/history.go
Normal file
96
src/history.go
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
package fzf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// History struct represents input history
|
||||||
|
type History struct {
|
||||||
|
path string
|
||||||
|
lines []string
|
||||||
|
modified map[int]string
|
||||||
|
maxSize int
|
||||||
|
cursor int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHistory returns the pointer to a new History struct
|
||||||
|
func NewHistory(path string, maxSize int) (*History, error) {
|
||||||
|
fmtError := func(e error) error {
|
||||||
|
if os.IsPermission(e) {
|
||||||
|
return errors.New("permission denied: " + path)
|
||||||
|
}
|
||||||
|
return errors.New("invalid history file: " + e.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read history file
|
||||||
|
data, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
// If it doesn't exist, check if we can create a file with the name
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
data = []byte{}
|
||||||
|
if err := ioutil.WriteFile(path, data, 0600); err != nil {
|
||||||
|
return nil, fmtError(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, fmtError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Split lines and limit the maximum number of lines
|
||||||
|
lines := strings.Split(strings.Trim(string(data), "\n"), "\n")
|
||||||
|
if len(lines[len(lines)-1]) > 0 {
|
||||||
|
lines = append(lines, "")
|
||||||
|
}
|
||||||
|
return &History{
|
||||||
|
path: path,
|
||||||
|
maxSize: maxSize,
|
||||||
|
lines: lines,
|
||||||
|
modified: make(map[int]string),
|
||||||
|
cursor: len(lines) - 1}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *History) append(line string) error {
|
||||||
|
// We don't append empty lines
|
||||||
|
if len(line) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lines := append(h.lines[:len(h.lines)-1], line)
|
||||||
|
if len(lines) > h.maxSize {
|
||||||
|
lines = lines[len(lines)-h.maxSize : len(lines)]
|
||||||
|
}
|
||||||
|
h.lines = append(lines, "")
|
||||||
|
return ioutil.WriteFile(h.path, []byte(strings.Join(h.lines, "\n")), 0600)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *History) override(str string) {
|
||||||
|
// You can update the history but they're not written to the file
|
||||||
|
if h.cursor == len(h.lines)-1 {
|
||||||
|
h.lines[h.cursor] = str
|
||||||
|
} else if h.cursor < len(h.lines)-1 {
|
||||||
|
h.modified[h.cursor] = str
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *History) current() string {
|
||||||
|
if str, prs := h.modified[h.cursor]; prs {
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
return h.lines[h.cursor]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *History) previous() string {
|
||||||
|
if h.cursor > 0 {
|
||||||
|
h.cursor--
|
||||||
|
}
|
||||||
|
return h.current()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *History) next() string {
|
||||||
|
if h.cursor < len(h.lines)-1 {
|
||||||
|
h.cursor++
|
||||||
|
}
|
||||||
|
return h.current()
|
||||||
|
}
|
59
src/history_test.go
Normal file
59
src/history_test.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
package fzf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/user"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHistory(t *testing.T) {
|
||||||
|
maxHistory := 50
|
||||||
|
|
||||||
|
// Invalid arguments
|
||||||
|
user, _ := user.Current()
|
||||||
|
paths := []string{"/etc", "/proc"}
|
||||||
|
if user.Name != "root" {
|
||||||
|
paths = append(paths, "/etc/sudoers")
|
||||||
|
}
|
||||||
|
for _, path := range paths {
|
||||||
|
if _, e := NewHistory(path, maxHistory); e == nil {
|
||||||
|
t.Error("Error expected for: " + path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{ // Append lines
|
||||||
|
h, _ := NewHistory("/tmp/fzf-history", maxHistory)
|
||||||
|
for i := 0; i < maxHistory+10; i++ {
|
||||||
|
h.append("foobar")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{ // Read lines
|
||||||
|
h, _ := NewHistory("/tmp/fzf-history", maxHistory)
|
||||||
|
if len(h.lines) != maxHistory+1 {
|
||||||
|
t.Errorf("Expected: %d, actual: %d\n", maxHistory+1, len(h.lines))
|
||||||
|
}
|
||||||
|
for i := 0; i < maxHistory; i++ {
|
||||||
|
if h.lines[i] != "foobar" {
|
||||||
|
t.Error("Expected: foobar, actual: " + h.lines[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{ // Append lines
|
||||||
|
h, _ := NewHistory("/tmp/fzf-history", maxHistory)
|
||||||
|
h.append("barfoo")
|
||||||
|
h.append("")
|
||||||
|
h.append("foobarbaz")
|
||||||
|
}
|
||||||
|
{ // Read lines again
|
||||||
|
h, _ := NewHistory("/tmp/fzf-history", maxHistory)
|
||||||
|
if len(h.lines) != maxHistory+1 {
|
||||||
|
t.Errorf("Expected: %d, actual: %d\n", maxHistory+1, len(h.lines))
|
||||||
|
}
|
||||||
|
compare := func(idx int, exp string) {
|
||||||
|
if h.lines[idx] != exp {
|
||||||
|
t.Errorf("Expected: %s, actual: %s\n", exp, h.lines[idx])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
compare(maxHistory-3, "foobar")
|
||||||
|
compare(maxHistory-2, "barfoo")
|
||||||
|
compare(maxHistory-1, "foobarbaz")
|
||||||
|
}
|
||||||
|
}
|
250
src/item.go
250
src/item.go
@@ -1,247 +1,39 @@
|
|||||||
package fzf
|
package fzf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"github.com/junegunn/fzf/src/util"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/curses"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Offset holds two 32-bit integers denoting the offsets of a matched substring
|
|
||||||
type Offset [2]int32
|
|
||||||
|
|
||||||
type colorOffset struct {
|
|
||||||
offset [2]int32
|
|
||||||
color int
|
|
||||||
bold bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Item represents each input line
|
// Item represents each input line
|
||||||
type Item struct {
|
type Item struct {
|
||||||
text *string
|
index int32
|
||||||
origText *string
|
text util.Chars
|
||||||
transformed *[]Token
|
origText *[]byte
|
||||||
index uint32
|
colors *[]ansiOffset
|
||||||
offsets []Offset
|
transformed []Token
|
||||||
colors []ansiOffset
|
|
||||||
rank Rank
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rank is used to sort the search result
|
// Index returns ordinal index of the Item
|
||||||
type Rank struct {
|
func (item *Item) Index() int32 {
|
||||||
matchlen uint16
|
return item.index
|
||||||
tiebreak uint16
|
|
||||||
index uint32
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tiebreak criterion to use. Never changes once fzf is started.
|
// Colors returns ansiOffsets of the Item
|
||||||
var rankTiebreak tiebreak
|
func (item *Item) Colors() []ansiOffset {
|
||||||
|
if item.colors == nil {
|
||||||
// Rank calculates rank of the Item
|
return []ansiOffset{}
|
||||||
func (i *Item) Rank(cache bool) Rank {
|
|
||||||
if cache && (i.rank.matchlen > 0 || i.rank.tiebreak > 0) {
|
|
||||||
return i.rank
|
|
||||||
}
|
}
|
||||||
matchlen := 0
|
return *item.colors
|
||||||
prevEnd := 0
|
|
||||||
minBegin := math.MaxUint16
|
|
||||||
for _, offset := range i.offsets {
|
|
||||||
begin := int(offset[0])
|
|
||||||
end := int(offset[1])
|
|
||||||
if prevEnd > begin {
|
|
||||||
begin = prevEnd
|
|
||||||
}
|
|
||||||
if end > prevEnd {
|
|
||||||
prevEnd = end
|
|
||||||
}
|
|
||||||
if end > begin {
|
|
||||||
if begin < minBegin {
|
|
||||||
minBegin = begin
|
|
||||||
}
|
|
||||||
matchlen += end - begin
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var tiebreak uint16
|
|
||||||
switch rankTiebreak {
|
|
||||||
case byLength:
|
|
||||||
tiebreak = uint16(len(*i.text))
|
|
||||||
case byBegin:
|
|
||||||
// We can't just look at i.offsets[0][0] because it can be an inverse term
|
|
||||||
tiebreak = uint16(minBegin)
|
|
||||||
case byEnd:
|
|
||||||
if prevEnd > 0 {
|
|
||||||
tiebreak = uint16(1 + len(*i.text) - prevEnd)
|
|
||||||
} else {
|
|
||||||
// Empty offsets due to inverse terms.
|
|
||||||
tiebreak = 1
|
|
||||||
}
|
|
||||||
case byIndex:
|
|
||||||
tiebreak = 1
|
|
||||||
}
|
|
||||||
rank := Rank{uint16(matchlen), tiebreak, i.index}
|
|
||||||
if cache {
|
|
||||||
i.rank = rank
|
|
||||||
}
|
|
||||||
return rank
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsString returns the original string
|
// AsString returns the original string
|
||||||
func (i *Item) AsString() string {
|
func (item *Item) AsString(stripAnsi bool) string {
|
||||||
if i.origText != nil {
|
if item.origText != nil {
|
||||||
return *i.origText
|
if stripAnsi {
|
||||||
|
trimmed, _, _ := extractColor(string(*item.origText), nil, nil)
|
||||||
|
return trimmed
|
||||||
}
|
}
|
||||||
return *i.text
|
return string(*item.origText)
|
||||||
}
|
}
|
||||||
|
return item.text.ToString()
|
||||||
func (item *Item) colorOffsets(color int, bold bool, current bool) []colorOffset {
|
|
||||||
if len(item.colors) == 0 {
|
|
||||||
var offsets []colorOffset
|
|
||||||
for _, off := range item.offsets {
|
|
||||||
offsets = append(offsets, colorOffset{offset: off, color: color, bold: bold})
|
|
||||||
}
|
|
||||||
return offsets
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find max column
|
|
||||||
var maxCol int32
|
|
||||||
for _, off := range item.offsets {
|
|
||||||
if off[1] > maxCol {
|
|
||||||
maxCol = off[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, ansi := range item.colors {
|
|
||||||
if ansi.offset[1] > maxCol {
|
|
||||||
maxCol = ansi.offset[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cols := make([]int, maxCol)
|
|
||||||
|
|
||||||
for colorIndex, ansi := range item.colors {
|
|
||||||
for i := ansi.offset[0]; i < ansi.offset[1]; i++ {
|
|
||||||
cols[i] = colorIndex + 1 // XXX
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, off := range item.offsets {
|
|
||||||
for i := off[0]; i < off[1]; i++ {
|
|
||||||
cols[i] = -1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sort.Sort(ByOrder(offsets))
|
|
||||||
|
|
||||||
// Merge offsets
|
|
||||||
// ------------ ---- -- ----
|
|
||||||
// ++++++++ ++++++++++
|
|
||||||
// --++++++++-- --++++++++++---
|
|
||||||
curr := 0
|
|
||||||
start := 0
|
|
||||||
var offsets []colorOffset
|
|
||||||
add := func(idx int) {
|
|
||||||
if curr != 0 && idx > start {
|
|
||||||
if curr == -1 {
|
|
||||||
offsets = append(offsets, colorOffset{
|
|
||||||
offset: Offset{int32(start), int32(idx)}, color: color, bold: bold})
|
|
||||||
} else {
|
|
||||||
ansi := item.colors[curr-1]
|
|
||||||
fg := ansi.color.fg
|
|
||||||
if fg == -1 {
|
|
||||||
if current {
|
|
||||||
fg = curses.CurrentFG
|
|
||||||
} else {
|
|
||||||
fg = curses.FG
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bg := ansi.color.bg
|
|
||||||
if bg == -1 {
|
|
||||||
if current {
|
|
||||||
bg = curses.DarkBG
|
|
||||||
} else {
|
|
||||||
bg = curses.BG
|
|
||||||
}
|
|
||||||
}
|
|
||||||
offsets = append(offsets, colorOffset{
|
|
||||||
offset: Offset{int32(start), int32(idx)},
|
|
||||||
color: curses.PairFor(fg, bg),
|
|
||||||
bold: ansi.color.bold || bold})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for idx, col := range cols {
|
|
||||||
if col != curr {
|
|
||||||
add(idx)
|
|
||||||
start = idx
|
|
||||||
curr = col
|
|
||||||
}
|
|
||||||
}
|
|
||||||
add(int(maxCol))
|
|
||||||
return offsets
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByOrder is for sorting substring offsets
|
|
||||||
type ByOrder []Offset
|
|
||||||
|
|
||||||
func (a ByOrder) Len() int {
|
|
||||||
return len(a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ByOrder) Swap(i, j int) {
|
|
||||||
a[i], a[j] = a[j], a[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ByOrder) Less(i, j int) bool {
|
|
||||||
ioff := a[i]
|
|
||||||
joff := a[j]
|
|
||||||
return (ioff[0] < joff[0]) || (ioff[0] == joff[0]) && (ioff[1] <= joff[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByRelevance is for sorting Items
|
|
||||||
type ByRelevance []*Item
|
|
||||||
|
|
||||||
func (a ByRelevance) Len() int {
|
|
||||||
return len(a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ByRelevance) Swap(i, j int) {
|
|
||||||
a[i], a[j] = a[j], a[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ByRelevance) Less(i, j int) bool {
|
|
||||||
irank := a[i].Rank(true)
|
|
||||||
jrank := a[j].Rank(true)
|
|
||||||
|
|
||||||
return compareRanks(irank, jrank, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByRelevanceTac is for sorting Items
|
|
||||||
type ByRelevanceTac []*Item
|
|
||||||
|
|
||||||
func (a ByRelevanceTac) Len() int {
|
|
||||||
return len(a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ByRelevanceTac) Swap(i, j int) {
|
|
||||||
a[i], a[j] = a[j], a[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ByRelevanceTac) Less(i, j int) bool {
|
|
||||||
irank := a[i].Rank(true)
|
|
||||||
jrank := a[j].Rank(true)
|
|
||||||
|
|
||||||
return compareRanks(irank, jrank, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func compareRanks(irank Rank, jrank Rank, tac bool) bool {
|
|
||||||
if irank.matchlen < jrank.matchlen {
|
|
||||||
return true
|
|
||||||
} else if irank.matchlen > jrank.matchlen {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if irank.tiebreak < jrank.tiebreak {
|
|
||||||
return true
|
|
||||||
} else if irank.tiebreak > jrank.tiebreak {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return (irank.index <= jrank.index) != tac
|
|
||||||
}
|
}
|
||||||
|
109
src/item_test.go
109
src/item_test.go
@@ -1,104 +1,23 @@
|
|||||||
package fzf
|
package fzf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sort"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/curses"
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestOffsetSort(t *testing.T) {
|
func TestStringPtr(t *testing.T) {
|
||||||
offsets := []Offset{
|
orig := []byte("\x1b[34mfoo")
|
||||||
Offset{3, 5}, Offset{2, 7},
|
text := []byte("\x1b[34mbar")
|
||||||
Offset{1, 3}, Offset{2, 9}}
|
item := Item{origText: &orig, text: util.ToChars(text)}
|
||||||
sort.Sort(ByOrder(offsets))
|
if item.AsString(true) != "foo" || item.AsString(false) != string(orig) {
|
||||||
|
t.Fail()
|
||||||
if offsets[0][0] != 1 || offsets[0][1] != 3 ||
|
}
|
||||||
offsets[1][0] != 2 || offsets[1][1] != 7 ||
|
if item.AsString(true) != "foo" {
|
||||||
offsets[2][0] != 2 || offsets[2][1] != 9 ||
|
t.Fail()
|
||||||
offsets[3][0] != 3 || offsets[3][1] != 5 {
|
}
|
||||||
t.Error("Invalid order:", offsets)
|
item.origText = nil
|
||||||
|
if item.AsString(true) != string(text) || item.AsString(false) != string(text) {
|
||||||
|
t.Fail()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRankComparison(t *testing.T) {
|
|
||||||
if compareRanks(Rank{3, 0, 5}, Rank{2, 0, 7}, false) ||
|
|
||||||
!compareRanks(Rank{3, 0, 5}, Rank{3, 0, 6}, false) ||
|
|
||||||
!compareRanks(Rank{1, 2, 3}, Rank{1, 3, 2}, false) ||
|
|
||||||
!compareRanks(Rank{0, 0, 0}, Rank{0, 0, 0}, false) {
|
|
||||||
t.Error("Invalid order")
|
|
||||||
}
|
|
||||||
|
|
||||||
if compareRanks(Rank{3, 0, 5}, Rank{2, 0, 7}, true) ||
|
|
||||||
!compareRanks(Rank{3, 0, 5}, Rank{3, 0, 6}, false) ||
|
|
||||||
!compareRanks(Rank{1, 2, 3}, Rank{1, 3, 2}, true) ||
|
|
||||||
!compareRanks(Rank{0, 0, 0}, Rank{0, 0, 0}, false) {
|
|
||||||
t.Error("Invalid order (tac)")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match length, string length, index
|
|
||||||
func TestItemRank(t *testing.T) {
|
|
||||||
strs := []string{"foo", "foobar", "bar", "baz"}
|
|
||||||
item1 := Item{text: &strs[0], index: 1, offsets: []Offset{}}
|
|
||||||
rank1 := item1.Rank(true)
|
|
||||||
if rank1.matchlen != 0 || rank1.tiebreak != 3 || rank1.index != 1 {
|
|
||||||
t.Error(item1.Rank(true))
|
|
||||||
}
|
|
||||||
// Only differ in index
|
|
||||||
item2 := Item{text: &strs[0], index: 0, offsets: []Offset{}}
|
|
||||||
|
|
||||||
items := []*Item{&item1, &item2}
|
|
||||||
sort.Sort(ByRelevance(items))
|
|
||||||
if items[0] != &item2 || items[1] != &item1 {
|
|
||||||
t.Error(items)
|
|
||||||
}
|
|
||||||
|
|
||||||
items = []*Item{&item2, &item1, &item1, &item2}
|
|
||||||
sort.Sort(ByRelevance(items))
|
|
||||||
if items[0] != &item2 || items[1] != &item2 ||
|
|
||||||
items[2] != &item1 || items[3] != &item1 {
|
|
||||||
t.Error(items)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort by relevance
|
|
||||||
item3 := Item{text: &strs[1], rank: Rank{0, 0, 2}, offsets: []Offset{Offset{1, 3}, Offset{5, 7}}}
|
|
||||||
item4 := Item{text: &strs[1], rank: Rank{0, 0, 2}, offsets: []Offset{Offset{1, 2}, Offset{6, 7}}}
|
|
||||||
item5 := Item{text: &strs[2], rank: Rank{0, 0, 2}, offsets: []Offset{Offset{1, 3}, Offset{5, 7}}}
|
|
||||||
item6 := Item{text: &strs[2], rank: Rank{0, 0, 2}, offsets: []Offset{Offset{1, 2}, Offset{6, 7}}}
|
|
||||||
items = []*Item{&item1, &item2, &item3, &item4, &item5, &item6}
|
|
||||||
sort.Sort(ByRelevance(items))
|
|
||||||
if items[0] != &item2 || items[1] != &item1 ||
|
|
||||||
items[2] != &item6 || items[3] != &item4 ||
|
|
||||||
items[4] != &item5 || items[5] != &item3 {
|
|
||||||
t.Error(items)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestColorOffset(t *testing.T) {
|
|
||||||
// ------------ 20 ---- -- ----
|
|
||||||
// ++++++++ ++++++++++
|
|
||||||
// --++++++++-- --++++++++++---
|
|
||||||
item := Item{
|
|
||||||
offsets: []Offset{Offset{5, 15}, Offset{25, 35}},
|
|
||||||
colors: []ansiOffset{
|
|
||||||
ansiOffset{[2]int32{0, 20}, ansiState{1, 5, false}},
|
|
||||||
ansiOffset{[2]int32{22, 27}, ansiState{2, 6, true}},
|
|
||||||
ansiOffset{[2]int32{30, 32}, ansiState{3, 7, false}},
|
|
||||||
ansiOffset{[2]int32{33, 40}, ansiState{4, 8, true}}}}
|
|
||||||
// [{[0 5] 9 false} {[5 15] 99 false} {[15 20] 9 false} {[22 25] 10 true} {[25 35] 99 false} {[35 40] 11 true}]
|
|
||||||
|
|
||||||
offsets := item.colorOffsets(99, false, true)
|
|
||||||
assert := func(idx int, b int32, e int32, c int, bold bool) {
|
|
||||||
o := offsets[idx]
|
|
||||||
if o.offset[0] != b || o.offset[1] != e || o.color != c || o.bold != bold {
|
|
||||||
t.Error(o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(0, 0, 5, curses.ColUser, false)
|
|
||||||
assert(1, 5, 15, 99, false)
|
|
||||||
assert(2, 15, 20, curses.ColUser, false)
|
|
||||||
assert(3, 22, 25, curses.ColUser+1, true)
|
|
||||||
assert(4, 25, 35, 99, false)
|
|
||||||
assert(5, 35, 40, curses.ColUser+2, true)
|
|
||||||
}
|
|
||||||
|
@@ -26,6 +26,7 @@ type Matcher struct {
|
|||||||
eventBox *util.EventBox
|
eventBox *util.EventBox
|
||||||
reqBox *util.EventBox
|
reqBox *util.EventBox
|
||||||
partitions int
|
partitions int
|
||||||
|
slab []*util.Slab
|
||||||
mergerCache map[string]*Merger
|
mergerCache map[string]*Merger
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -37,13 +38,15 @@ const (
|
|||||||
// NewMatcher returns a new Matcher
|
// NewMatcher returns a new Matcher
|
||||||
func NewMatcher(patternBuilder func([]rune) *Pattern,
|
func NewMatcher(patternBuilder func([]rune) *Pattern,
|
||||||
sort bool, tac bool, eventBox *util.EventBox) *Matcher {
|
sort bool, tac bool, eventBox *util.EventBox) *Matcher {
|
||||||
|
partitions := util.Min(numPartitionsMultiplier*runtime.NumCPU(), maxPartitions)
|
||||||
return &Matcher{
|
return &Matcher{
|
||||||
patternBuilder: patternBuilder,
|
patternBuilder: patternBuilder,
|
||||||
sort: sort,
|
sort: sort,
|
||||||
tac: tac,
|
tac: tac,
|
||||||
eventBox: eventBox,
|
eventBox: eventBox,
|
||||||
reqBox: util.NewEventBox(),
|
reqBox: util.NewEventBox(),
|
||||||
partitions: runtime.NumCPU(),
|
partitions: partitions,
|
||||||
|
slab: make([]*util.Slab, partitions),
|
||||||
mergerCache: make(map[string]*Merger)}
|
mergerCache: make(map[string]*Merger)}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,7 +99,7 @@ func (m *Matcher) Loop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !cancelled {
|
if !cancelled {
|
||||||
if merger.Cacheable() {
|
if merger.cacheable() {
|
||||||
m.mergerCache[patternString] = merger
|
m.mergerCache[patternString] = merger
|
||||||
}
|
}
|
||||||
merger.final = request.final
|
merger.final = request.final
|
||||||
@@ -106,18 +109,19 @@ func (m *Matcher) Loop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Matcher) sliceChunks(chunks []*Chunk) [][]*Chunk {
|
func (m *Matcher) sliceChunks(chunks []*Chunk) [][]*Chunk {
|
||||||
perSlice := len(chunks) / m.partitions
|
partitions := m.partitions
|
||||||
|
perSlice := len(chunks) / partitions
|
||||||
|
|
||||||
// No need to parallelize
|
|
||||||
if perSlice == 0 {
|
if perSlice == 0 {
|
||||||
return [][]*Chunk{chunks}
|
partitions = len(chunks)
|
||||||
|
perSlice = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
slices := make([][]*Chunk, m.partitions)
|
slices := make([][]*Chunk, partitions)
|
||||||
for i := 0; i < m.partitions; i++ {
|
for i := 0; i < partitions; i++ {
|
||||||
start := i * perSlice
|
start := i * perSlice
|
||||||
end := start + perSlice
|
end := start + perSlice
|
||||||
if i == m.partitions-1 {
|
if i == partitions-1 {
|
||||||
end = len(chunks)
|
end = len(chunks)
|
||||||
}
|
}
|
||||||
slices[i] = chunks[start:end]
|
slices[i] = chunks[start:end]
|
||||||
@@ -127,7 +131,7 @@ func (m *Matcher) sliceChunks(chunks []*Chunk) [][]*Chunk {
|
|||||||
|
|
||||||
type partialResult struct {
|
type partialResult struct {
|
||||||
index int
|
index int
|
||||||
matches []*Item
|
matches []*Result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
||||||
@@ -152,17 +156,26 @@ func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
|||||||
|
|
||||||
for idx, chunks := range slices {
|
for idx, chunks := range slices {
|
||||||
waitGroup.Add(1)
|
waitGroup.Add(1)
|
||||||
go func(idx int, chunks []*Chunk) {
|
if m.slab[idx] == nil {
|
||||||
|
m.slab[idx] = util.MakeSlab(slab16Size, slab32Size)
|
||||||
|
}
|
||||||
|
go func(idx int, slab *util.Slab, chunks []*Chunk) {
|
||||||
defer func() { waitGroup.Done() }()
|
defer func() { waitGroup.Done() }()
|
||||||
sliceMatches := []*Item{}
|
count := 0
|
||||||
for _, chunk := range chunks {
|
allMatches := make([][]*Result, len(chunks))
|
||||||
matches := request.pattern.Match(chunk)
|
for idx, chunk := range chunks {
|
||||||
sliceMatches = append(sliceMatches, matches...)
|
matches := request.pattern.Match(chunk, slab)
|
||||||
|
allMatches[idx] = matches
|
||||||
|
count += len(matches)
|
||||||
if cancelled.Get() {
|
if cancelled.Get() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
countChan <- len(matches)
|
countChan <- len(matches)
|
||||||
}
|
}
|
||||||
|
sliceMatches := make([]*Result, 0, count)
|
||||||
|
for _, matches := range allMatches {
|
||||||
|
sliceMatches = append(sliceMatches, matches...)
|
||||||
|
}
|
||||||
if m.sort {
|
if m.sort {
|
||||||
if m.tac {
|
if m.tac {
|
||||||
sort.Sort(ByRelevanceTac(sliceMatches))
|
sort.Sort(ByRelevanceTac(sliceMatches))
|
||||||
@@ -171,7 +184,7 @@ func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
resultChan <- partialResult{idx, sliceMatches}
|
resultChan <- partialResult{idx, sliceMatches}
|
||||||
}(idx, chunks)
|
}(idx, m.slab[idx], chunks)
|
||||||
}
|
}
|
||||||
|
|
||||||
wait := func() bool {
|
wait := func() bool {
|
||||||
@@ -199,12 +212,12 @@ func (m *Matcher) scan(request MatchRequest) (*Merger, bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
partialResults := make([][]*Item, numSlices)
|
partialResults := make([][]*Result, numSlices)
|
||||||
for range slices {
|
for _ = range slices {
|
||||||
partialResult := <-resultChan
|
partialResult := <-resultChan
|
||||||
partialResults[partialResult.index] = partialResult.matches
|
partialResults[partialResult.index] = partialResult.matches
|
||||||
}
|
}
|
||||||
return NewMerger(partialResults, m.sort, m.tac), false
|
return NewMerger(pattern, partialResults, m.sort, m.tac), false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset is called to interrupt/signal the ongoing search
|
// Reset is called to interrupt/signal the ongoing search
|
||||||
|
@@ -2,14 +2,15 @@ package fzf
|
|||||||
|
|
||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
// Merger with no data
|
// EmptyMerger is a Merger with no data
|
||||||
var EmptyMerger = NewMerger([][]*Item{}, false, false)
|
var EmptyMerger = NewMerger(nil, [][]*Result{}, false, false)
|
||||||
|
|
||||||
// Merger holds a set of locally sorted lists of items and provides the view of
|
// Merger holds a set of locally sorted lists of items and provides the view of
|
||||||
// a single, globally-sorted list
|
// a single, globally-sorted list
|
||||||
type Merger struct {
|
type Merger struct {
|
||||||
lists [][]*Item
|
pattern *Pattern
|
||||||
merged []*Item
|
lists [][]*Result
|
||||||
|
merged []*Result
|
||||||
chunks *[]*Chunk
|
chunks *[]*Chunk
|
||||||
cursors []int
|
cursors []int
|
||||||
sorted bool
|
sorted bool
|
||||||
@@ -22,6 +23,7 @@ type Merger struct {
|
|||||||
// original order
|
// original order
|
||||||
func PassMerger(chunks *[]*Chunk, tac bool) *Merger {
|
func PassMerger(chunks *[]*Chunk, tac bool) *Merger {
|
||||||
mg := Merger{
|
mg := Merger{
|
||||||
|
pattern: nil,
|
||||||
chunks: chunks,
|
chunks: chunks,
|
||||||
tac: tac,
|
tac: tac,
|
||||||
count: 0}
|
count: 0}
|
||||||
@@ -33,10 +35,11 @@ func PassMerger(chunks *[]*Chunk, tac bool) *Merger {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewMerger returns a new Merger
|
// NewMerger returns a new Merger
|
||||||
func NewMerger(lists [][]*Item, sorted bool, tac bool) *Merger {
|
func NewMerger(pattern *Pattern, lists [][]*Result, sorted bool, tac bool) *Merger {
|
||||||
mg := Merger{
|
mg := Merger{
|
||||||
|
pattern: pattern,
|
||||||
lists: lists,
|
lists: lists,
|
||||||
merged: []*Item{},
|
merged: []*Result{},
|
||||||
chunks: nil,
|
chunks: nil,
|
||||||
cursors: make([]int, len(lists)),
|
cursors: make([]int, len(lists)),
|
||||||
sorted: sorted,
|
sorted: sorted,
|
||||||
@@ -55,14 +58,14 @@ func (mg *Merger) Length() int {
|
|||||||
return mg.count
|
return mg.count
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the pointer to the Item object indexed by the given integer
|
// Get returns the pointer to the Result object indexed by the given integer
|
||||||
func (mg *Merger) Get(idx int) *Item {
|
func (mg *Merger) Get(idx int) *Result {
|
||||||
if mg.chunks != nil {
|
if mg.chunks != nil {
|
||||||
if mg.tac {
|
if mg.tac {
|
||||||
idx = mg.count - idx - 1
|
idx = mg.count - idx - 1
|
||||||
}
|
}
|
||||||
chunk := (*mg.chunks)[idx/chunkSize]
|
chunk := (*mg.chunks)[idx/chunkSize]
|
||||||
return (*chunk)[idx%chunkSize]
|
return &Result{item: (*chunk)[idx%chunkSize]}
|
||||||
}
|
}
|
||||||
|
|
||||||
if mg.sorted {
|
if mg.sorted {
|
||||||
@@ -82,13 +85,13 @@ func (mg *Merger) Get(idx int) *Item {
|
|||||||
panic(fmt.Sprintf("Index out of bounds (unsorted, %d/%d)", idx, mg.count))
|
panic(fmt.Sprintf("Index out of bounds (unsorted, %d/%d)", idx, mg.count))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mg *Merger) Cacheable() bool {
|
func (mg *Merger) cacheable() bool {
|
||||||
return mg.count < mergerCacheMax
|
return mg.count < mergerCacheMax
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mg *Merger) mergedGet(idx int) *Item {
|
func (mg *Merger) mergedGet(idx int) *Result {
|
||||||
for i := len(mg.merged); i <= idx; i++ {
|
for i := len(mg.merged); i <= idx; i++ {
|
||||||
minRank := Rank{0, 0, 0}
|
minRank := minRank()
|
||||||
minIdx := -1
|
minIdx := -1
|
||||||
for listIdx, list := range mg.lists {
|
for listIdx, list := range mg.lists {
|
||||||
cursor := mg.cursors[listIdx]
|
cursor := mg.cursors[listIdx]
|
||||||
@@ -97,7 +100,7 @@ func (mg *Merger) mergedGet(idx int) *Item {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if cursor >= 0 {
|
if cursor >= 0 {
|
||||||
rank := list[cursor].Rank(false)
|
rank := list[cursor].rank
|
||||||
if minIdx < 0 || compareRanks(rank, minRank, mg.tac) {
|
if minIdx < 0 || compareRanks(rank, minRank, mg.tac) {
|
||||||
minRank = rank
|
minRank = rank
|
||||||
minIdx = listIdx
|
minIdx = listIdx
|
||||||
|
@@ -5,6 +5,8 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func assert(t *testing.T, cond bool, msg ...string) {
|
func assert(t *testing.T, cond bool, msg ...string) {
|
||||||
@@ -13,18 +15,11 @@ func assert(t *testing.T, cond bool, msg ...string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func randItem() *Item {
|
func randResult() *Result {
|
||||||
str := fmt.Sprintf("%d", rand.Uint32())
|
str := fmt.Sprintf("%d", rand.Uint32())
|
||||||
offsets := make([]Offset, rand.Int()%3)
|
return &Result{
|
||||||
for idx := range offsets {
|
item: &Item{text: util.RunesToChars([]rune(str))},
|
||||||
sidx := int32(rand.Uint32() % 20)
|
rank: rank{index: rand.Int31()}}
|
||||||
eidx := sidx + int32(rand.Uint32()%20)
|
|
||||||
offsets[idx] = Offset{sidx, eidx}
|
|
||||||
}
|
|
||||||
return &Item{
|
|
||||||
text: &str,
|
|
||||||
index: rand.Uint32(),
|
|
||||||
offsets: offsets}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyMerger(t *testing.T) {
|
func TestEmptyMerger(t *testing.T) {
|
||||||
@@ -34,23 +29,23 @@ func TestEmptyMerger(t *testing.T) {
|
|||||||
assert(t, len(EmptyMerger.merged) == 0, "Invalid merged list")
|
assert(t, len(EmptyMerger.merged) == 0, "Invalid merged list")
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildLists(partiallySorted bool) ([][]*Item, []*Item) {
|
func buildLists(partiallySorted bool) ([][]*Result, []*Result) {
|
||||||
numLists := 4
|
numLists := 4
|
||||||
lists := make([][]*Item, numLists)
|
lists := make([][]*Result, numLists)
|
||||||
cnt := 0
|
cnt := 0
|
||||||
for i := 0; i < numLists; i++ {
|
for i := 0; i < numLists; i++ {
|
||||||
numItems := rand.Int() % 20
|
numResults := rand.Int() % 20
|
||||||
cnt += numItems
|
cnt += numResults
|
||||||
lists[i] = make([]*Item, numItems)
|
lists[i] = make([]*Result, numResults)
|
||||||
for j := 0; j < numItems; j++ {
|
for j := 0; j < numResults; j++ {
|
||||||
item := randItem()
|
item := randResult()
|
||||||
lists[i][j] = item
|
lists[i][j] = item
|
||||||
}
|
}
|
||||||
if partiallySorted {
|
if partiallySorted {
|
||||||
sort.Sort(ByRelevance(lists[i]))
|
sort.Sort(ByRelevance(lists[i]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
items := []*Item{}
|
items := []*Result{}
|
||||||
for _, list := range lists {
|
for _, list := range lists {
|
||||||
items = append(items, list...)
|
items = append(items, list...)
|
||||||
}
|
}
|
||||||
@@ -62,7 +57,7 @@ func TestMergerUnsorted(t *testing.T) {
|
|||||||
cnt := len(items)
|
cnt := len(items)
|
||||||
|
|
||||||
// Not sorted: same order
|
// Not sorted: same order
|
||||||
mg := NewMerger(lists, false, false)
|
mg := NewMerger(nil, lists, false, false)
|
||||||
assert(t, cnt == mg.Length(), "Invalid Length")
|
assert(t, cnt == mg.Length(), "Invalid Length")
|
||||||
for i := 0; i < cnt; i++ {
|
for i := 0; i < cnt; i++ {
|
||||||
assert(t, items[i] == mg.Get(i), "Invalid Get")
|
assert(t, items[i] == mg.Get(i), "Invalid Get")
|
||||||
@@ -74,7 +69,7 @@ func TestMergerSorted(t *testing.T) {
|
|||||||
cnt := len(items)
|
cnt := len(items)
|
||||||
|
|
||||||
// Sorted sorted order
|
// Sorted sorted order
|
||||||
mg := NewMerger(lists, true, false)
|
mg := NewMerger(nil, lists, true, false)
|
||||||
assert(t, cnt == mg.Length(), "Invalid Length")
|
assert(t, cnt == mg.Length(), "Invalid Length")
|
||||||
sort.Sort(ByRelevance(items))
|
sort.Sort(ByRelevance(items))
|
||||||
for i := 0; i < cnt; i++ {
|
for i := 0; i < cnt; i++ {
|
||||||
@@ -84,7 +79,7 @@ func TestMergerSorted(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Inverse order
|
// Inverse order
|
||||||
mg2 := NewMerger(lists, true, false)
|
mg2 := NewMerger(nil, lists, true, false)
|
||||||
for i := cnt - 1; i >= 0; i-- {
|
for i := cnt - 1; i >= 0; i-- {
|
||||||
if items[i] != mg2.Get(i) {
|
if items[i] != mg2.Get(i) {
|
||||||
t.Error("Not sorted", items[i], mg2.Get(i))
|
t.Error("Not sorted", items[i], mg2.Get(i))
|
||||||
|
746
src/options.go
746
src/options.go
File diff suppressed because it is too large
Load Diff
@@ -1,17 +1,67 @@
|
|||||||
package fzf
|
package fzf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/curses"
|
"github.com/junegunn/fzf/src/curses"
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDelimiterRegex(t *testing.T) {
|
func TestDelimiterRegex(t *testing.T) {
|
||||||
rx := delimiterRegexp("*")
|
// Valid regex
|
||||||
tokens := rx.FindAllString("-*--*---**---", -1)
|
delim := delimiterRegexp(".")
|
||||||
if tokens[0] != "-*" || tokens[1] != "--*" || tokens[2] != "---*" ||
|
if delim.regex == nil || delim.str != nil {
|
||||||
tokens[3] != "*" || tokens[4] != "---" {
|
t.Error(delim)
|
||||||
t.Errorf("%s %s %d", rx, tokens, len(tokens))
|
}
|
||||||
|
// Broken regex -> string
|
||||||
|
delim = delimiterRegexp("[0-9")
|
||||||
|
if delim.regex != nil || *delim.str != "[0-9" {
|
||||||
|
t.Error(delim)
|
||||||
|
}
|
||||||
|
// Valid regex
|
||||||
|
delim = delimiterRegexp("[0-9]")
|
||||||
|
if delim.regex.String() != "[0-9]" || delim.str != nil {
|
||||||
|
t.Error(delim)
|
||||||
|
}
|
||||||
|
// Tab character
|
||||||
|
delim = delimiterRegexp("\t")
|
||||||
|
if delim.regex != nil || *delim.str != "\t" {
|
||||||
|
t.Error(delim)
|
||||||
|
}
|
||||||
|
// Tab expression
|
||||||
|
delim = delimiterRegexp("\\t")
|
||||||
|
if delim.regex != nil || *delim.str != "\t" {
|
||||||
|
t.Error(delim)
|
||||||
|
}
|
||||||
|
// Tabs -> regex
|
||||||
|
delim = delimiterRegexp("\t+")
|
||||||
|
if delim.regex == nil || delim.str != nil {
|
||||||
|
t.Error(delim)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDelimiterRegexString(t *testing.T) {
|
||||||
|
delim := delimiterRegexp("*")
|
||||||
|
tokens := Tokenize(util.RunesToChars([]rune("-*--*---**---")), delim)
|
||||||
|
if delim.regex != nil ||
|
||||||
|
tokens[0].text.ToString() != "-*" ||
|
||||||
|
tokens[1].text.ToString() != "--*" ||
|
||||||
|
tokens[2].text.ToString() != "---*" ||
|
||||||
|
tokens[3].text.ToString() != "*" ||
|
||||||
|
tokens[4].text.ToString() != "---" {
|
||||||
|
t.Errorf("%s %s %d", delim, tokens, len(tokens))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDelimiterRegexRegex(t *testing.T) {
|
||||||
|
delim := delimiterRegexp("--\\*")
|
||||||
|
tokens := Tokenize(util.RunesToChars([]rune("-*--*---**---")), delim)
|
||||||
|
if delim.str != nil ||
|
||||||
|
tokens[0].text.ToString() != "-*--*" ||
|
||||||
|
tokens[1].text.ToString() != "---*" ||
|
||||||
|
tokens[2].text.ToString() != "*---" {
|
||||||
|
t.Errorf("%s %d", tokens, len(tokens))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,14 +97,16 @@ func TestIrrelevantNth(t *testing.T) {
|
|||||||
opts := defaultOptions()
|
opts := defaultOptions()
|
||||||
words := []string{"--nth", "..", "-x"}
|
words := []string{"--nth", "..", "-x"}
|
||||||
parseOptions(opts, words)
|
parseOptions(opts, words)
|
||||||
|
postProcessOptions(opts)
|
||||||
if len(opts.Nth) != 0 {
|
if len(opts.Nth) != 0 {
|
||||||
t.Errorf("nth should be empty: %s", opts.Nth)
|
t.Errorf("nth should be empty: %s", opts.Nth)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, words := range [][]string{[]string{"--nth", "..,3"}, []string{"--nth", "3,1.."}, []string{"--nth", "..-1,1"}} {
|
for _, words := range [][]string{[]string{"--nth", "..,3", "+x"}, []string{"--nth", "3,1..", "+x"}, []string{"--nth", "..-1,1", "+x"}} {
|
||||||
{
|
{
|
||||||
opts := defaultOptions()
|
opts := defaultOptions()
|
||||||
parseOptions(opts, words)
|
parseOptions(opts, words)
|
||||||
|
postProcessOptions(opts)
|
||||||
if len(opts.Nth) != 0 {
|
if len(opts.Nth) != 0 {
|
||||||
t.Errorf("nth should be empty: %s", opts.Nth)
|
t.Errorf("nth should be empty: %s", opts.Nth)
|
||||||
}
|
}
|
||||||
@@ -63,6 +115,7 @@ func TestIrrelevantNth(t *testing.T) {
|
|||||||
opts := defaultOptions()
|
opts := defaultOptions()
|
||||||
words = append(words, "-x")
|
words = append(words, "-x")
|
||||||
parseOptions(opts, words)
|
parseOptions(opts, words)
|
||||||
|
postProcessOptions(opts)
|
||||||
if len(opts.Nth) != 2 {
|
if len(opts.Nth) != 2 {
|
||||||
t.Errorf("nth should not be empty: %s", opts.Nth)
|
t.Errorf("nth should not be empty: %s", opts.Nth)
|
||||||
}
|
}
|
||||||
@@ -71,63 +124,103 @@ func TestIrrelevantNth(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseKeys(t *testing.T) {
|
func TestParseKeys(t *testing.T) {
|
||||||
keys := parseKeyChords("ctrl-z,alt-z,f2,@,Alt-a,!,ctrl-G,J,g", "")
|
pairs := parseKeyChords("ctrl-z,alt-z,f2,@,Alt-a,!,ctrl-G,J,g,ALT-enter,alt-SPACE", "")
|
||||||
check := func(key int, expected int) {
|
check := func(i int, s string) {
|
||||||
if key != expected {
|
if pairs[i] != s {
|
||||||
t.Errorf("%d != %d", key, expected)
|
t.Errorf("%s != %s", pairs[i], s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
check(len(keys), 9)
|
if len(pairs) != 11 {
|
||||||
check(keys[0], curses.CtrlZ)
|
t.Error(11)
|
||||||
check(keys[1], curses.AltZ)
|
}
|
||||||
check(keys[2], curses.F2)
|
check(curses.CtrlZ, "ctrl-z")
|
||||||
check(keys[3], curses.AltZ+'@')
|
check(curses.AltZ, "alt-z")
|
||||||
check(keys[4], curses.AltA)
|
check(curses.F2, "f2")
|
||||||
check(keys[5], curses.AltZ+'!')
|
check(curses.AltZ+'@', "@")
|
||||||
check(keys[6], curses.CtrlA+'g'-'a')
|
check(curses.AltA, "Alt-a")
|
||||||
check(keys[7], curses.AltZ+'J')
|
check(curses.AltZ+'!', "!")
|
||||||
check(keys[8], curses.AltZ+'g')
|
check(curses.CtrlA+'g'-'a', "ctrl-G")
|
||||||
|
check(curses.AltZ+'J', "J")
|
||||||
|
check(curses.AltZ+'g', "g")
|
||||||
|
check(curses.AltEnter, "ALT-enter")
|
||||||
|
check(curses.AltSpace, "alt-SPACE")
|
||||||
|
|
||||||
|
// Synonyms
|
||||||
|
pairs = parseKeyChords("enter,Return,space,tab,btab,esc,up,down,left,right", "")
|
||||||
|
if len(pairs) != 9 {
|
||||||
|
t.Error(9)
|
||||||
|
}
|
||||||
|
check(curses.CtrlM, "Return")
|
||||||
|
check(curses.AltZ+' ', "space")
|
||||||
|
check(curses.Tab, "tab")
|
||||||
|
check(curses.BTab, "btab")
|
||||||
|
check(curses.ESC, "esc")
|
||||||
|
check(curses.Up, "up")
|
||||||
|
check(curses.Down, "down")
|
||||||
|
check(curses.Left, "left")
|
||||||
|
check(curses.Right, "right")
|
||||||
|
|
||||||
|
pairs = parseKeyChords("Tab,Ctrl-I,PgUp,page-up,pgdn,Page-Down,Home,End,Alt-BS,Alt-BSpace,shift-left,shift-right,btab,shift-tab,return,Enter,bspace", "")
|
||||||
|
if len(pairs) != 11 {
|
||||||
|
t.Error(11)
|
||||||
|
}
|
||||||
|
check(curses.Tab, "Ctrl-I")
|
||||||
|
check(curses.PgUp, "page-up")
|
||||||
|
check(curses.PgDn, "Page-Down")
|
||||||
|
check(curses.Home, "Home")
|
||||||
|
check(curses.End, "End")
|
||||||
|
check(curses.AltBS, "Alt-BSpace")
|
||||||
|
check(curses.SLeft, "shift-left")
|
||||||
|
check(curses.SRight, "shift-right")
|
||||||
|
check(curses.BTab, "shift-tab")
|
||||||
|
check(curses.CtrlM, "Enter")
|
||||||
|
check(curses.BSpace, "bspace")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseKeysWithComma(t *testing.T) {
|
func TestParseKeysWithComma(t *testing.T) {
|
||||||
check := func(key int, expected int) {
|
checkN := func(a int, b int) {
|
||||||
if key != expected {
|
if a != b {
|
||||||
t.Errorf("%d != %d", key, expected)
|
t.Errorf("%d != %d", a, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
check := func(pairs map[int]string, i int, s string) {
|
||||||
|
if pairs[i] != s {
|
||||||
|
t.Errorf("%s != %s", pairs[i], s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
keys := parseKeyChords(",", "")
|
pairs := parseKeyChords(",", "")
|
||||||
check(len(keys), 1)
|
checkN(len(pairs), 1)
|
||||||
check(keys[0], curses.AltZ+',')
|
check(pairs, curses.AltZ+',', ",")
|
||||||
|
|
||||||
keys = parseKeyChords(",,a,b", "")
|
pairs = parseKeyChords(",,a,b", "")
|
||||||
check(len(keys), 3)
|
checkN(len(pairs), 3)
|
||||||
check(keys[0], curses.AltZ+'a')
|
check(pairs, curses.AltZ+'a', "a")
|
||||||
check(keys[1], curses.AltZ+'b')
|
check(pairs, curses.AltZ+'b', "b")
|
||||||
check(keys[2], curses.AltZ+',')
|
check(pairs, curses.AltZ+',', ",")
|
||||||
|
|
||||||
keys = parseKeyChords("a,b,,", "")
|
pairs = parseKeyChords("a,b,,", "")
|
||||||
check(len(keys), 3)
|
checkN(len(pairs), 3)
|
||||||
check(keys[0], curses.AltZ+'a')
|
check(pairs, curses.AltZ+'a', "a")
|
||||||
check(keys[1], curses.AltZ+'b')
|
check(pairs, curses.AltZ+'b', "b")
|
||||||
check(keys[2], curses.AltZ+',')
|
check(pairs, curses.AltZ+',', ",")
|
||||||
|
|
||||||
keys = parseKeyChords("a,,,b", "")
|
pairs = parseKeyChords("a,,,b", "")
|
||||||
check(len(keys), 3)
|
checkN(len(pairs), 3)
|
||||||
check(keys[0], curses.AltZ+'a')
|
check(pairs, curses.AltZ+'a', "a")
|
||||||
check(keys[1], curses.AltZ+'b')
|
check(pairs, curses.AltZ+'b', "b")
|
||||||
check(keys[2], curses.AltZ+',')
|
check(pairs, curses.AltZ+',', ",")
|
||||||
|
|
||||||
keys = parseKeyChords("a,,,b,c", "")
|
pairs = parseKeyChords("a,,,b,c", "")
|
||||||
check(len(keys), 4)
|
checkN(len(pairs), 4)
|
||||||
check(keys[0], curses.AltZ+'a')
|
check(pairs, curses.AltZ+'a', "a")
|
||||||
check(keys[1], curses.AltZ+'b')
|
check(pairs, curses.AltZ+'b', "b")
|
||||||
check(keys[2], curses.AltZ+'c')
|
check(pairs, curses.AltZ+'c', "c")
|
||||||
check(keys[3], curses.AltZ+',')
|
check(pairs, curses.AltZ+',', ",")
|
||||||
|
|
||||||
keys = parseKeyChords(",,,", "")
|
pairs = parseKeyChords(",,,", "")
|
||||||
check(len(keys), 1)
|
checkN(len(pairs), 1)
|
||||||
check(keys[0], curses.AltZ+',')
|
check(pairs, curses.AltZ+',', ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBind(t *testing.T) {
|
func TestBind(t *testing.T) {
|
||||||
@@ -136,23 +229,43 @@ func TestBind(t *testing.T) {
|
|||||||
t.Errorf("%d != %d", action, expected)
|
t.Errorf("%d != %d", action, expected)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
keymap := defaultKeymap()
|
checkString := func(action string, expected string) {
|
||||||
check(actBeginningOfLine, keymap[curses.CtrlA])
|
if action != expected {
|
||||||
keymap, toggleSort :=
|
t.Errorf("%d != %d", action, expected)
|
||||||
parseKeymap(keymap, false,
|
|
||||||
"ctrl-a:kill-line,ctrl-b:toggle-sort,c:page-up,alt-z:page-down")
|
|
||||||
if !toggleSort {
|
|
||||||
t.Errorf("toggleSort not set")
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
keymap := defaultKeymap()
|
||||||
|
execmap := make(map[int]string)
|
||||||
|
check(actBeginningOfLine, keymap[curses.CtrlA])
|
||||||
|
parseKeymap(keymap, execmap,
|
||||||
|
"ctrl-a:kill-line,ctrl-b:toggle-sort,c:page-up,alt-z:page-down,"+
|
||||||
|
"f1:execute(ls {}),f2:execute/echo {}, {}, {}/,f3:execute[echo '({})'],f4:execute;less {};,"+
|
||||||
|
"alt-a:execute@echo (,),[,],/,:,;,%,{}@,alt-b:execute;echo (,),[,],/,:,@,%,{};"+
|
||||||
|
",,:abort,::accept,X:execute:\nfoobar,Y:execute(baz)")
|
||||||
check(actKillLine, keymap[curses.CtrlA])
|
check(actKillLine, keymap[curses.CtrlA])
|
||||||
check(actToggleSort, keymap[curses.CtrlB])
|
check(actToggleSort, keymap[curses.CtrlB])
|
||||||
check(actPageUp, keymap[curses.AltZ+'c'])
|
check(actPageUp, keymap[curses.AltZ+'c'])
|
||||||
|
check(actAbort, keymap[curses.AltZ+','])
|
||||||
|
check(actAccept, keymap[curses.AltZ+':'])
|
||||||
check(actPageDown, keymap[curses.AltZ])
|
check(actPageDown, keymap[curses.AltZ])
|
||||||
|
check(actExecute, keymap[curses.F1])
|
||||||
|
check(actExecute, keymap[curses.F2])
|
||||||
|
check(actExecute, keymap[curses.F3])
|
||||||
|
check(actExecute, keymap[curses.F4])
|
||||||
|
checkString("ls {}", execmap[curses.F1])
|
||||||
|
checkString("echo {}, {}, {}", execmap[curses.F2])
|
||||||
|
checkString("echo '({})'", execmap[curses.F3])
|
||||||
|
checkString("less {}", execmap[curses.F4])
|
||||||
|
checkString("echo (,),[,],/,:,;,%,{}", execmap[curses.AltA])
|
||||||
|
checkString("echo (,),[,],/,:,@,%,{}", execmap[curses.AltB])
|
||||||
|
checkString("\nfoobar,Y:execute(baz)", execmap[curses.AltZ+'X'])
|
||||||
|
|
||||||
keymap, toggleSort = parseKeymap(keymap, false, "f1:abort")
|
for idx, char := range []rune{'~', '!', '@', '#', '$', '%', '^', '&', '*', '|', ';', '/'} {
|
||||||
if toggleSort {
|
parseKeymap(keymap, execmap, fmt.Sprintf("%d:execute%cfoobar%c", idx%10, char, char))
|
||||||
t.Errorf("toggleSort set")
|
checkString("foobar", execmap[curses.AltZ+int([]rune(fmt.Sprintf("%d", idx%10))[0])])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
parseKeymap(keymap, execmap, "f1:abort")
|
||||||
check(actAbort, keymap[curses.F1])
|
check(actAbort, keymap[curses.F1])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -201,3 +314,93 @@ func TestColorSpec(t *testing.T) {
|
|||||||
t.Errorf("using default colors")
|
t.Errorf("using default colors")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseNilTheme(t *testing.T) {
|
||||||
|
var theme *curses.ColorTheme
|
||||||
|
newTheme := parseTheme(theme, "prompt:12")
|
||||||
|
if newTheme != nil {
|
||||||
|
t.Errorf("color is disabled. keep it that way.")
|
||||||
|
}
|
||||||
|
newTheme = parseTheme(theme, "prompt:12,dark,prompt:13")
|
||||||
|
if newTheme.Prompt != 13 {
|
||||||
|
t.Errorf("color should now be enabled and customized")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultCtrlNP(t *testing.T) {
|
||||||
|
check := func(words []string, key int, expected actionType) {
|
||||||
|
opts := defaultOptions()
|
||||||
|
parseOptions(opts, words)
|
||||||
|
postProcessOptions(opts)
|
||||||
|
if opts.Keymap[key] != expected {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
check([]string{}, curses.CtrlN, actDown)
|
||||||
|
check([]string{}, curses.CtrlP, actUp)
|
||||||
|
|
||||||
|
check([]string{"--bind=ctrl-n:accept"}, curses.CtrlN, actAccept)
|
||||||
|
check([]string{"--bind=ctrl-p:accept"}, curses.CtrlP, actAccept)
|
||||||
|
|
||||||
|
hist := "--history=/tmp/foo"
|
||||||
|
check([]string{hist}, curses.CtrlN, actNextHistory)
|
||||||
|
check([]string{hist}, curses.CtrlP, actPreviousHistory)
|
||||||
|
|
||||||
|
check([]string{hist, "--bind=ctrl-n:accept"}, curses.CtrlN, actAccept)
|
||||||
|
check([]string{hist, "--bind=ctrl-n:accept"}, curses.CtrlP, actPreviousHistory)
|
||||||
|
|
||||||
|
check([]string{hist, "--bind=ctrl-p:accept"}, curses.CtrlN, actNextHistory)
|
||||||
|
check([]string{hist, "--bind=ctrl-p:accept"}, curses.CtrlP, actAccept)
|
||||||
|
}
|
||||||
|
|
||||||
|
func optsFor(words ...string) *Options {
|
||||||
|
opts := defaultOptions()
|
||||||
|
parseOptions(opts, words)
|
||||||
|
postProcessOptions(opts)
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestToggle(t *testing.T) {
|
||||||
|
opts := optsFor()
|
||||||
|
if opts.ToggleSort {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
opts = optsFor("--bind=a:toggle-sort")
|
||||||
|
if !opts.ToggleSort {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
opts = optsFor("--bind=a:toggle-sort", "--bind=a:up")
|
||||||
|
if opts.ToggleSort {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPreviewOpts(t *testing.T) {
|
||||||
|
opts := optsFor()
|
||||||
|
if !(opts.Preview.command == "" &&
|
||||||
|
opts.Preview.hidden == false &&
|
||||||
|
opts.Preview.position == posRight &&
|
||||||
|
opts.Preview.size.percent == true &&
|
||||||
|
opts.Preview.size.size == 50) {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
opts = optsFor("--preview", "cat {}", "--preview-window=left:15:hidden")
|
||||||
|
if !(opts.Preview.command == "cat {}" &&
|
||||||
|
opts.Preview.hidden == true &&
|
||||||
|
opts.Preview.position == posLeft &&
|
||||||
|
opts.Preview.size.percent == false &&
|
||||||
|
opts.Preview.size.size == 15+2) {
|
||||||
|
t.Error(opts.Preview)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts = optsFor("--preview-window=left:15:hidden", "--preview-window=down")
|
||||||
|
if !(opts.Preview.command == "" &&
|
||||||
|
opts.Preview.hidden == false &&
|
||||||
|
opts.Preview.position == posDown &&
|
||||||
|
opts.Preview.size.percent == true &&
|
||||||
|
opts.Preview.size.size == 50) {
|
||||||
|
t.Error(opts.Preview)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
262
src/pattern.go
262
src/pattern.go
@@ -2,10 +2,10 @@ package fzf
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/algo"
|
"github.com/junegunn/fzf/src/algo"
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// fuzzy
|
// fuzzy
|
||||||
@@ -24,6 +24,7 @@ const (
|
|||||||
termExact
|
termExact
|
||||||
termPrefix
|
termPrefix
|
||||||
termSuffix
|
termSuffix
|
||||||
|
termEqual
|
||||||
)
|
)
|
||||||
|
|
||||||
type term struct {
|
type term struct {
|
||||||
@@ -34,16 +35,21 @@ type term struct {
|
|||||||
origText []rune
|
origText []rune
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type termSet []term
|
||||||
|
|
||||||
// Pattern represents search pattern
|
// Pattern represents search pattern
|
||||||
type Pattern struct {
|
type Pattern struct {
|
||||||
mode Mode
|
fuzzy bool
|
||||||
|
fuzzyAlgo algo.Algo
|
||||||
|
extended bool
|
||||||
caseSensitive bool
|
caseSensitive bool
|
||||||
|
forward bool
|
||||||
text []rune
|
text []rune
|
||||||
terms []term
|
termSets []termSet
|
||||||
hasInvTerm bool
|
cacheable bool
|
||||||
delimiter *regexp.Regexp
|
delimiter Delimiter
|
||||||
nth []Range
|
nth []Range
|
||||||
procFun map[termType]func(bool, *[]rune, []rune) (int, int)
|
procFun map[termType]algo.Algo
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -60,7 +66,7 @@ func init() {
|
|||||||
|
|
||||||
func clearPatternCache() {
|
func clearPatternCache() {
|
||||||
// We can uniquely identify the pattern for a given string since
|
// We can uniquely identify the pattern for a given string since
|
||||||
// mode and caseMode do not change while the program is running
|
// search mode and caseMode do not change while the program is running
|
||||||
_patternCache = make(map[string]*Pattern)
|
_patternCache = make(map[string]*Pattern)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,14 +75,13 @@ func clearChunkCache() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BuildPattern builds Pattern object from the given arguments
|
// BuildPattern builds Pattern object from the given arguments
|
||||||
func BuildPattern(mode Mode, caseMode Case,
|
func BuildPattern(fuzzy bool, fuzzyAlgo algo.Algo, extended bool, caseMode Case, forward bool,
|
||||||
nth []Range, delimiter *regexp.Regexp, runes []rune) *Pattern {
|
cacheable bool, nth []Range, delimiter Delimiter, runes []rune) *Pattern {
|
||||||
|
|
||||||
var asString string
|
var asString string
|
||||||
switch mode {
|
if extended {
|
||||||
case ModeExtended, ModeExtendedExact:
|
|
||||||
asString = strings.Trim(string(runes), " ")
|
asString = strings.Trim(string(runes), " ")
|
||||||
default:
|
} else {
|
||||||
asString = string(runes)
|
asString = string(runes)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,18 +90,23 @@ func BuildPattern(mode Mode, caseMode Case,
|
|||||||
return cached
|
return cached
|
||||||
}
|
}
|
||||||
|
|
||||||
caseSensitive, hasInvTerm := true, false
|
caseSensitive := true
|
||||||
terms := []term{}
|
termSets := []termSet{}
|
||||||
|
|
||||||
switch mode {
|
if extended {
|
||||||
case ModeExtended, ModeExtendedExact:
|
termSets = parseTerms(fuzzy, caseMode, asString)
|
||||||
terms = parseTerms(mode, caseMode, asString)
|
Loop:
|
||||||
for _, term := range terms {
|
for _, termSet := range termSets {
|
||||||
if term.inv {
|
for idx, term := range termSet {
|
||||||
hasInvTerm = true
|
// If the query contains inverse search terms or OR operators,
|
||||||
|
// we cannot cache the search scope
|
||||||
|
if !cacheable || idx > 0 || term.inv {
|
||||||
|
cacheable = false
|
||||||
|
break Loop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
}
|
||||||
|
} else {
|
||||||
lowerString := strings.ToLower(asString)
|
lowerString := strings.ToLower(asString)
|
||||||
caseSensitive = caseMode == CaseRespect ||
|
caseSensitive = caseMode == CaseRespect ||
|
||||||
caseMode == CaseSmart && lowerString != asString
|
caseMode == CaseSmart && lowerString != asString
|
||||||
@@ -106,16 +116,20 @@ func BuildPattern(mode Mode, caseMode Case,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ptr := &Pattern{
|
ptr := &Pattern{
|
||||||
mode: mode,
|
fuzzy: fuzzy,
|
||||||
|
fuzzyAlgo: fuzzyAlgo,
|
||||||
|
extended: extended,
|
||||||
caseSensitive: caseSensitive,
|
caseSensitive: caseSensitive,
|
||||||
|
forward: forward,
|
||||||
text: []rune(asString),
|
text: []rune(asString),
|
||||||
terms: terms,
|
termSets: termSets,
|
||||||
hasInvTerm: hasInvTerm,
|
cacheable: cacheable,
|
||||||
nth: nth,
|
nth: nth,
|
||||||
delimiter: delimiter,
|
delimiter: delimiter,
|
||||||
procFun: make(map[termType]func(bool, *[]rune, []rune) (int, int))}
|
procFun: make(map[termType]algo.Algo)}
|
||||||
|
|
||||||
ptr.procFun[termFuzzy] = algo.FuzzyMatch
|
ptr.procFun[termFuzzy] = fuzzyAlgo
|
||||||
|
ptr.procFun[termEqual] = algo.EqualMatch
|
||||||
ptr.procFun[termExact] = algo.ExactMatchNaive
|
ptr.procFun[termExact] = algo.ExactMatchNaive
|
||||||
ptr.procFun[termPrefix] = algo.PrefixMatch
|
ptr.procFun[termPrefix] = algo.PrefixMatch
|
||||||
ptr.procFun[termSuffix] = algo.SuffixMatch
|
ptr.procFun[termSuffix] = algo.SuffixMatch
|
||||||
@@ -124,9 +138,11 @@ func BuildPattern(mode Mode, caseMode Case,
|
|||||||
return ptr
|
return ptr
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseTerms(mode Mode, caseMode Case, str string) []term {
|
func parseTerms(fuzzy bool, caseMode Case, str string) []termSet {
|
||||||
tokens := _splitRegex.Split(str, -1)
|
tokens := _splitRegex.Split(str, -1)
|
||||||
terms := []term{}
|
sets := []termSet{}
|
||||||
|
set := termSet{}
|
||||||
|
switchSet := false
|
||||||
for _, token := range tokens {
|
for _, token := range tokens {
|
||||||
typ, inv, text := termFuzzy, false, token
|
typ, inv, text := termFuzzy, false, token
|
||||||
lowerText := strings.ToLower(text)
|
lowerText := strings.ToLower(text)
|
||||||
@@ -136,46 +152,68 @@ func parseTerms(mode Mode, caseMode Case, str string) []term {
|
|||||||
text = lowerText
|
text = lowerText
|
||||||
}
|
}
|
||||||
origText := []rune(text)
|
origText := []rune(text)
|
||||||
if mode == ModeExtendedExact {
|
if !fuzzy {
|
||||||
typ = termExact
|
typ = termExact
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if text == "|" {
|
||||||
|
switchSet = false
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(text, "!") {
|
if strings.HasPrefix(text, "!") {
|
||||||
inv = true
|
inv = true
|
||||||
text = text[1:]
|
text = text[1:]
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(text, "'") {
|
if strings.HasPrefix(text, "'") {
|
||||||
if mode == ModeExtended {
|
// Flip exactness
|
||||||
|
if fuzzy {
|
||||||
typ = termExact
|
typ = termExact
|
||||||
text = text[1:]
|
text = text[1:]
|
||||||
|
} else {
|
||||||
|
typ = termFuzzy
|
||||||
|
text = text[1:]
|
||||||
}
|
}
|
||||||
} else if strings.HasPrefix(text, "^") {
|
} else if strings.HasPrefix(text, "^") {
|
||||||
|
if strings.HasSuffix(text, "$") {
|
||||||
|
typ = termEqual
|
||||||
|
text = text[1 : len(text)-1]
|
||||||
|
} else {
|
||||||
typ = termPrefix
|
typ = termPrefix
|
||||||
text = text[1:]
|
text = text[1:]
|
||||||
|
}
|
||||||
} else if strings.HasSuffix(text, "$") {
|
} else if strings.HasSuffix(text, "$") {
|
||||||
typ = termSuffix
|
typ = termSuffix
|
||||||
text = text[:len(text)-1]
|
text = text[:len(text)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(text) > 0 {
|
if len(text) > 0 {
|
||||||
terms = append(terms, term{
|
if switchSet {
|
||||||
|
sets = append(sets, set)
|
||||||
|
set = termSet{}
|
||||||
|
}
|
||||||
|
set = append(set, term{
|
||||||
typ: typ,
|
typ: typ,
|
||||||
inv: inv,
|
inv: inv,
|
||||||
text: []rune(text),
|
text: []rune(text),
|
||||||
caseSensitive: caseSensitive,
|
caseSensitive: caseSensitive,
|
||||||
origText: origText})
|
origText: origText})
|
||||||
|
switchSet = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return terms
|
if len(set) > 0 {
|
||||||
|
sets = append(sets, set)
|
||||||
|
}
|
||||||
|
return sets
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsEmpty returns true if the pattern is effectively empty
|
// IsEmpty returns true if the pattern is effectively empty
|
||||||
func (p *Pattern) IsEmpty() bool {
|
func (p *Pattern) IsEmpty() bool {
|
||||||
if p.mode == ModeFuzzy {
|
if !p.extended {
|
||||||
return len(p.text) == 0
|
return len(p.text) == 0
|
||||||
}
|
}
|
||||||
return len(p.terms) == 0
|
return len(p.termSets) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsString returns the search query in string type
|
// AsString returns the search query in string type
|
||||||
@@ -185,32 +223,30 @@ func (p *Pattern) AsString() string {
|
|||||||
|
|
||||||
// CacheKey is used to build string to be used as the key of result cache
|
// CacheKey is used to build string to be used as the key of result cache
|
||||||
func (p *Pattern) CacheKey() string {
|
func (p *Pattern) CacheKey() string {
|
||||||
if p.mode == ModeFuzzy {
|
if !p.extended {
|
||||||
return p.AsString()
|
return p.AsString()
|
||||||
}
|
}
|
||||||
cacheableTerms := []string{}
|
cacheableTerms := []string{}
|
||||||
for _, term := range p.terms {
|
for _, termSet := range p.termSets {
|
||||||
if term.inv {
|
if len(termSet) == 1 && !termSet[0].inv && (p.fuzzy || termSet[0].typ == termExact) {
|
||||||
continue
|
cacheableTerms = append(cacheableTerms, string(termSet[0].origText))
|
||||||
}
|
}
|
||||||
cacheableTerms = append(cacheableTerms, string(term.origText))
|
|
||||||
}
|
}
|
||||||
return strings.Join(cacheableTerms, " ")
|
return strings.Join(cacheableTerms, " ")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match returns the list of matches Items in the given Chunk
|
// Match returns the list of matches Items in the given Chunk
|
||||||
func (p *Pattern) Match(chunk *Chunk) []*Item {
|
func (p *Pattern) Match(chunk *Chunk, slab *util.Slab) []*Result {
|
||||||
space := chunk
|
|
||||||
|
|
||||||
// ChunkCache: Exact match
|
// ChunkCache: Exact match
|
||||||
cacheKey := p.CacheKey()
|
cacheKey := p.CacheKey()
|
||||||
if !p.hasInvTerm { // Because we're excluding Inv-term from cache key
|
if p.cacheable {
|
||||||
if cached, found := _cache.Find(chunk, cacheKey); found {
|
if cached, found := _cache.Find(chunk, cacheKey); found {
|
||||||
return cached
|
return cached
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChunkCache: Prefix/suffix match
|
// Prefix/suffix cache
|
||||||
|
var space []*Result
|
||||||
Loop:
|
Loop:
|
||||||
for idx := 1; idx < len(cacheKey); idx++ {
|
for idx := 1; idx < len(cacheKey); idx++ {
|
||||||
// [---------| ] | [ |---------]
|
// [---------| ] | [ |---------]
|
||||||
@@ -220,34 +256,33 @@ Loop:
|
|||||||
suffix := cacheKey[idx:]
|
suffix := cacheKey[idx:]
|
||||||
for _, substr := range [2]*string{&prefix, &suffix} {
|
for _, substr := range [2]*string{&prefix, &suffix} {
|
||||||
if cached, found := _cache.Find(chunk, *substr); found {
|
if cached, found := _cache.Find(chunk, *substr); found {
|
||||||
cachedChunk := Chunk(cached)
|
space = cached
|
||||||
space = &cachedChunk
|
|
||||||
break Loop
|
break Loop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
matches := p.matchChunk(space)
|
matches := p.matchChunk(chunk, space, slab)
|
||||||
|
|
||||||
if !p.hasInvTerm {
|
if p.cacheable {
|
||||||
_cache.Add(chunk, cacheKey, matches)
|
_cache.Add(chunk, cacheKey, matches)
|
||||||
}
|
}
|
||||||
return matches
|
return matches
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) matchChunk(chunk *Chunk) []*Item {
|
func (p *Pattern) matchChunk(chunk *Chunk, space []*Result, slab *util.Slab) []*Result {
|
||||||
matches := []*Item{}
|
matches := []*Result{}
|
||||||
if p.mode == ModeFuzzy {
|
|
||||||
|
if space == nil {
|
||||||
for _, item := range *chunk {
|
for _, item := range *chunk {
|
||||||
if sidx, eidx := p.fuzzyMatch(item); sidx >= 0 {
|
if match, _, _ := p.MatchItem(item, false, slab); match != nil {
|
||||||
matches = append(matches,
|
matches = append(matches, match)
|
||||||
dupItem(item, []Offset{Offset{int32(sidx), int32(eidx)}}))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for _, item := range *chunk {
|
for _, result := range space {
|
||||||
if offsets := p.extendedMatch(item); len(offsets) == len(p.terms) {
|
if match, _, _ := p.MatchItem(result.item, false, slab); match != nil {
|
||||||
matches = append(matches, dupItem(item, offsets))
|
matches = append(matches, match)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -255,74 +290,105 @@ func (p *Pattern) matchChunk(chunk *Chunk) []*Item {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MatchItem returns true if the Item is a match
|
// MatchItem returns true if the Item is a match
|
||||||
func (p *Pattern) MatchItem(item *Item) bool {
|
func (p *Pattern) MatchItem(item *Item, withPos bool, slab *util.Slab) (*Result, []Offset, *[]int) {
|
||||||
if p.mode == ModeFuzzy {
|
if p.extended {
|
||||||
sidx, _ := p.fuzzyMatch(item)
|
if offsets, bonus, trimLen, pos := p.extendedMatch(item, withPos, slab); len(offsets) == len(p.termSets) {
|
||||||
return sidx >= 0
|
return buildResult(item, offsets, bonus, trimLen), offsets, pos
|
||||||
}
|
}
|
||||||
offsets := p.extendedMatch(item)
|
return nil, nil, nil
|
||||||
return len(offsets) == len(p.terms)
|
}
|
||||||
|
offset, bonus, trimLen, pos := p.basicMatch(item, withPos, slab)
|
||||||
|
if sidx := offset[0]; sidx >= 0 {
|
||||||
|
offsets := []Offset{offset}
|
||||||
|
return buildResult(item, offsets, bonus, trimLen), offsets, pos
|
||||||
|
}
|
||||||
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func dupItem(item *Item, offsets []Offset) *Item {
|
func (p *Pattern) basicMatch(item *Item, withPos bool, slab *util.Slab) (Offset, int, int, *[]int) {
|
||||||
sort.Sort(ByOrder(offsets))
|
|
||||||
return &Item{
|
|
||||||
text: item.text,
|
|
||||||
origText: item.origText,
|
|
||||||
transformed: item.transformed,
|
|
||||||
index: item.index,
|
|
||||||
offsets: offsets,
|
|
||||||
colors: item.colors,
|
|
||||||
rank: Rank{0, 0, item.index}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Pattern) fuzzyMatch(item *Item) (int, int) {
|
|
||||||
input := p.prepareInput(item)
|
input := p.prepareInput(item)
|
||||||
return p.iter(algo.FuzzyMatch, input, p.caseSensitive, p.text)
|
if p.fuzzy {
|
||||||
|
return p.iter(p.fuzzyAlgo, input, p.caseSensitive, p.forward, p.text, withPos, slab)
|
||||||
|
}
|
||||||
|
return p.iter(algo.ExactMatchNaive, input, p.caseSensitive, p.forward, p.text, withPos, slab)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) extendedMatch(item *Item) []Offset {
|
func (p *Pattern) extendedMatch(item *Item, withPos bool, slab *util.Slab) ([]Offset, int, int, *[]int) {
|
||||||
input := p.prepareInput(item)
|
input := p.prepareInput(item)
|
||||||
offsets := []Offset{}
|
offsets := []Offset{}
|
||||||
for _, term := range p.terms {
|
var totalScore int
|
||||||
|
var totalTrimLen int
|
||||||
|
var allPos *[]int
|
||||||
|
if withPos {
|
||||||
|
allPos = &[]int{}
|
||||||
|
}
|
||||||
|
for _, termSet := range p.termSets {
|
||||||
|
var offset Offset
|
||||||
|
var currentScore int
|
||||||
|
var trimLen int
|
||||||
|
matched := false
|
||||||
|
for _, term := range termSet {
|
||||||
pfun := p.procFun[term.typ]
|
pfun := p.procFun[term.typ]
|
||||||
if sidx, eidx := p.iter(pfun, input, term.caseSensitive, term.text); sidx >= 0 {
|
off, score, tLen, pos := p.iter(pfun, input, term.caseSensitive, p.forward, term.text, withPos, slab)
|
||||||
|
if sidx := off[0]; sidx >= 0 {
|
||||||
if term.inv {
|
if term.inv {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
offset, currentScore, trimLen = off, score, tLen
|
||||||
|
matched = true
|
||||||
|
if withPos {
|
||||||
|
if pos != nil {
|
||||||
|
*allPos = append(*allPos, *pos...)
|
||||||
|
} else {
|
||||||
|
for idx := off[0]; idx < off[1]; idx++ {
|
||||||
|
*allPos = append(*allPos, int(idx))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
break
|
break
|
||||||
}
|
|
||||||
offsets = append(offsets, Offset{int32(sidx), int32(eidx)})
|
|
||||||
} else if term.inv {
|
} else if term.inv {
|
||||||
offsets = append(offsets, Offset{0, 0})
|
offset, currentScore, trimLen = Offset{0, 0}, 0, 0
|
||||||
|
matched = true
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return offsets
|
if matched {
|
||||||
|
offsets = append(offsets, offset)
|
||||||
|
totalScore += currentScore
|
||||||
|
totalTrimLen += trimLen
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return offsets, totalScore, totalTrimLen, allPos
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) prepareInput(item *Item) *[]Token {
|
func (p *Pattern) prepareInput(item *Item) []Token {
|
||||||
if item.transformed != nil {
|
if item.transformed != nil {
|
||||||
return item.transformed
|
return item.transformed
|
||||||
}
|
}
|
||||||
|
|
||||||
var ret *[]Token
|
var ret []Token
|
||||||
if len(p.nth) > 0 {
|
if len(p.nth) == 0 {
|
||||||
|
ret = []Token{Token{text: &item.text, prefixLength: 0, trimLength: int32(item.text.TrimLength())}}
|
||||||
|
} else {
|
||||||
tokens := Tokenize(item.text, p.delimiter)
|
tokens := Tokenize(item.text, p.delimiter)
|
||||||
ret = Transform(tokens, p.nth)
|
ret = Transform(tokens, p.nth)
|
||||||
} else {
|
|
||||||
runes := []rune(*item.text)
|
|
||||||
trans := []Token{Token{text: &runes, prefixLength: 0}}
|
|
||||||
ret = &trans
|
|
||||||
}
|
}
|
||||||
item.transformed = ret
|
item.transformed = ret
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pattern) iter(pfun func(bool, *[]rune, []rune) (int, int),
|
func (p *Pattern) iter(pfun algo.Algo, tokens []Token, caseSensitive bool, forward bool, pattern []rune, withPos bool, slab *util.Slab) (Offset, int, int, *[]int) {
|
||||||
tokens *[]Token, caseSensitive bool, pattern []rune) (int, int) {
|
for _, part := range tokens {
|
||||||
for _, part := range *tokens {
|
if res, pos := pfun(caseSensitive, forward, *part.text, pattern, withPos, slab); res.Start >= 0 {
|
||||||
prefixLength := part.prefixLength
|
sidx := int32(res.Start) + part.prefixLength
|
||||||
if sidx, eidx := pfun(caseSensitive, part.text, pattern); sidx >= 0 {
|
eidx := int32(res.End) + part.prefixLength
|
||||||
return sidx + prefixLength, eidx + prefixLength
|
if pos != nil {
|
||||||
|
for idx := range *pos {
|
||||||
|
(*pos)[idx] += int(part.prefixLength)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return -1, -1
|
return Offset{sidx, eidx}, res.Score, int(part.trimLength), pos
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Offset{-1, -1}, 0, -1, nil
|
||||||
}
|
}
|
||||||
|
@@ -1,26 +1,40 @@
|
|||||||
package fzf
|
package fzf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/algo"
|
"github.com/junegunn/fzf/src/algo"
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var slab *util.Slab
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
slab = util.MakeSlab(slab16Size, slab32Size)
|
||||||
|
}
|
||||||
|
|
||||||
func TestParseTermsExtended(t *testing.T) {
|
func TestParseTermsExtended(t *testing.T) {
|
||||||
terms := parseTerms(ModeExtended, CaseSmart,
|
terms := parseTerms(true, CaseSmart,
|
||||||
"aaa 'bbb ^ccc ddd$ !eee !'fff !^ggg !hhh$")
|
"| aaa 'bbb ^ccc ddd$ !eee !'fff !^ggg !hhh$ | ^iii$ ^xxx | 'yyy | | zzz$ | !ZZZ |")
|
||||||
if len(terms) != 8 ||
|
if len(terms) != 9 ||
|
||||||
terms[0].typ != termFuzzy || terms[0].inv ||
|
terms[0][0].typ != termFuzzy || terms[0][0].inv ||
|
||||||
terms[1].typ != termExact || terms[1].inv ||
|
terms[1][0].typ != termExact || terms[1][0].inv ||
|
||||||
terms[2].typ != termPrefix || terms[2].inv ||
|
terms[2][0].typ != termPrefix || terms[2][0].inv ||
|
||||||
terms[3].typ != termSuffix || terms[3].inv ||
|
terms[3][0].typ != termSuffix || terms[3][0].inv ||
|
||||||
terms[4].typ != termFuzzy || !terms[4].inv ||
|
terms[4][0].typ != termFuzzy || !terms[4][0].inv ||
|
||||||
terms[5].typ != termExact || !terms[5].inv ||
|
terms[5][0].typ != termExact || !terms[5][0].inv ||
|
||||||
terms[6].typ != termPrefix || !terms[6].inv ||
|
terms[6][0].typ != termPrefix || !terms[6][0].inv ||
|
||||||
terms[7].typ != termSuffix || !terms[7].inv {
|
terms[7][0].typ != termSuffix || !terms[7][0].inv ||
|
||||||
|
terms[7][1].typ != termEqual || terms[7][1].inv ||
|
||||||
|
terms[8][0].typ != termPrefix || terms[8][0].inv ||
|
||||||
|
terms[8][1].typ != termExact || terms[8][1].inv ||
|
||||||
|
terms[8][2].typ != termSuffix || terms[8][2].inv ||
|
||||||
|
terms[8][3].typ != termFuzzy || !terms[8][3].inv {
|
||||||
t.Errorf("%s", terms)
|
t.Errorf("%s", terms)
|
||||||
}
|
}
|
||||||
for idx, term := range terms {
|
for idx, termSet := range terms[:8] {
|
||||||
|
term := termSet[0]
|
||||||
if len(term.text) != 3 {
|
if len(term.text) != 3 {
|
||||||
t.Errorf("%s", term)
|
t.Errorf("%s", term)
|
||||||
}
|
}
|
||||||
@@ -28,26 +42,31 @@ func TestParseTermsExtended(t *testing.T) {
|
|||||||
t.Errorf("%s", term)
|
t.Errorf("%s", term)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for _, term := range terms[8] {
|
||||||
|
if len(term.origText) != 4 {
|
||||||
|
t.Errorf("%s", term)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseTermsExtendedExact(t *testing.T) {
|
func TestParseTermsExtendedExact(t *testing.T) {
|
||||||
terms := parseTerms(ModeExtendedExact, CaseSmart,
|
terms := parseTerms(false, CaseSmart,
|
||||||
"aaa 'bbb ^ccc ddd$ !eee !'fff !^ggg !hhh$")
|
"aaa 'bbb ^ccc ddd$ !eee !'fff !^ggg !hhh$")
|
||||||
if len(terms) != 8 ||
|
if len(terms) != 8 ||
|
||||||
terms[0].typ != termExact || terms[0].inv || len(terms[0].text) != 3 ||
|
terms[0][0].typ != termExact || terms[0][0].inv || len(terms[0][0].text) != 3 ||
|
||||||
terms[1].typ != termExact || terms[1].inv || len(terms[1].text) != 4 ||
|
terms[1][0].typ != termFuzzy || terms[1][0].inv || len(terms[1][0].text) != 3 ||
|
||||||
terms[2].typ != termPrefix || terms[2].inv || len(terms[2].text) != 3 ||
|
terms[2][0].typ != termPrefix || terms[2][0].inv || len(terms[2][0].text) != 3 ||
|
||||||
terms[3].typ != termSuffix || terms[3].inv || len(terms[3].text) != 3 ||
|
terms[3][0].typ != termSuffix || terms[3][0].inv || len(terms[3][0].text) != 3 ||
|
||||||
terms[4].typ != termExact || !terms[4].inv || len(terms[4].text) != 3 ||
|
terms[4][0].typ != termExact || !terms[4][0].inv || len(terms[4][0].text) != 3 ||
|
||||||
terms[5].typ != termExact || !terms[5].inv || len(terms[5].text) != 4 ||
|
terms[5][0].typ != termFuzzy || !terms[5][0].inv || len(terms[5][0].text) != 3 ||
|
||||||
terms[6].typ != termPrefix || !terms[6].inv || len(terms[6].text) != 3 ||
|
terms[6][0].typ != termPrefix || !terms[6][0].inv || len(terms[6][0].text) != 3 ||
|
||||||
terms[7].typ != termSuffix || !terms[7].inv || len(terms[7].text) != 3 {
|
terms[7][0].typ != termSuffix || !terms[7][0].inv || len(terms[7][0].text) != 3 {
|
||||||
t.Errorf("%s", terms)
|
t.Errorf("%s", terms)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseTermsEmpty(t *testing.T) {
|
func TestParseTermsEmpty(t *testing.T) {
|
||||||
terms := parseTerms(ModeExtended, CaseSmart, "' $ ^ !' !^ !$")
|
terms := parseTerms(true, CaseSmart, "' $ ^ !' !^ !$")
|
||||||
if len(terms) != 0 {
|
if len(terms) != 0 {
|
||||||
t.Errorf("%s", terms)
|
t.Errorf("%s", terms)
|
||||||
}
|
}
|
||||||
@@ -56,29 +75,51 @@ func TestParseTermsEmpty(t *testing.T) {
|
|||||||
func TestExact(t *testing.T) {
|
func TestExact(t *testing.T) {
|
||||||
defer clearPatternCache()
|
defer clearPatternCache()
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pattern := BuildPattern(ModeExtended, CaseSmart,
|
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, true, true,
|
||||||
[]Range{}, nil, []rune("'abc"))
|
[]Range{}, Delimiter{}, []rune("'abc"))
|
||||||
runes := []rune("aabbcc abc")
|
res, pos := algo.ExactMatchNaive(
|
||||||
sidx, eidx := algo.ExactMatchNaive(pattern.caseSensitive, &runes, pattern.terms[0].text)
|
pattern.caseSensitive, pattern.forward, util.RunesToChars([]rune("aabbcc abc")), pattern.termSets[0][0].text, true, nil)
|
||||||
if sidx != 7 || eidx != 10 {
|
if res.Start != 7 || res.End != 10 {
|
||||||
t.Errorf("%s / %d / %d", pattern.terms, sidx, eidx)
|
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
|
||||||
}
|
}
|
||||||
|
if pos != nil {
|
||||||
|
t.Errorf("pos is expected to be nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEqual(t *testing.T) {
|
||||||
|
defer clearPatternCache()
|
||||||
|
clearPatternCache()
|
||||||
|
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("^AbC$"))
|
||||||
|
|
||||||
|
match := func(str string, sidxExpected int, eidxExpected int) {
|
||||||
|
res, pos := algo.EqualMatch(
|
||||||
|
pattern.caseSensitive, pattern.forward, util.RunesToChars([]rune(str)), pattern.termSets[0][0].text, true, nil)
|
||||||
|
if res.Start != sidxExpected || res.End != eidxExpected {
|
||||||
|
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
|
||||||
|
}
|
||||||
|
if pos != nil {
|
||||||
|
t.Errorf("pos is expected to be nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
match("ABC", -1, -1)
|
||||||
|
match("AbC", 0, 3)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCaseSensitivity(t *testing.T) {
|
func TestCaseSensitivity(t *testing.T) {
|
||||||
defer clearPatternCache()
|
defer clearPatternCache()
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pat1 := BuildPattern(ModeFuzzy, CaseSmart, []Range{}, nil, []rune("abc"))
|
pat1 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("abc"))
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pat2 := BuildPattern(ModeFuzzy, CaseSmart, []Range{}, nil, []rune("Abc"))
|
pat2 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("Abc"))
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pat3 := BuildPattern(ModeFuzzy, CaseIgnore, []Range{}, nil, []rune("abc"))
|
pat3 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseIgnore, true, true, []Range{}, Delimiter{}, []rune("abc"))
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pat4 := BuildPattern(ModeFuzzy, CaseIgnore, []Range{}, nil, []rune("Abc"))
|
pat4 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseIgnore, true, true, []Range{}, Delimiter{}, []rune("Abc"))
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pat5 := BuildPattern(ModeFuzzy, CaseRespect, []Range{}, nil, []rune("abc"))
|
pat5 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseRespect, true, true, []Range{}, Delimiter{}, []rune("abc"))
|
||||||
clearPatternCache()
|
clearPatternCache()
|
||||||
pat6 := BuildPattern(ModeFuzzy, CaseRespect, []Range{}, nil, []rune("Abc"))
|
pat6 := BuildPattern(true, algo.FuzzyMatchV2, false, CaseRespect, true, true, []Range{}, Delimiter{}, []rune("Abc"))
|
||||||
|
|
||||||
if string(pat1.text) != "abc" || pat1.caseSensitive != false ||
|
if string(pat1.text) != "abc" || pat1.caseSensitive != false ||
|
||||||
string(pat2.text) != "Abc" || pat2.caseSensitive != true ||
|
string(pat2.text) != "Abc" || pat2.caseSensitive != true ||
|
||||||
@@ -91,26 +132,57 @@ func TestCaseSensitivity(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestOrigTextAndTransformed(t *testing.T) {
|
func TestOrigTextAndTransformed(t *testing.T) {
|
||||||
strptr := func(str string) *string {
|
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("jg"))
|
||||||
return &str
|
tokens := Tokenize(util.RunesToChars([]rune("junegunn")), Delimiter{})
|
||||||
}
|
|
||||||
pattern := BuildPattern(ModeExtended, CaseSmart, []Range{}, nil, []rune("jg"))
|
|
||||||
tokens := Tokenize(strptr("junegunn"), nil)
|
|
||||||
trans := Transform(tokens, []Range{Range{1, 1}})
|
trans := Transform(tokens, []Range{Range{1, 1}})
|
||||||
|
|
||||||
for _, mode := range []Mode{ModeFuzzy, ModeExtended} {
|
origBytes := []byte("junegunn.choi")
|
||||||
|
for _, extended := range []bool{false, true} {
|
||||||
chunk := Chunk{
|
chunk := Chunk{
|
||||||
&Item{
|
&Item{
|
||||||
text: strptr("junegunn"),
|
text: util.RunesToChars([]rune("junegunn")),
|
||||||
origText: strptr("junegunn.choi"),
|
origText: &origBytes,
|
||||||
transformed: trans},
|
transformed: trans},
|
||||||
}
|
}
|
||||||
pattern.mode = mode
|
pattern.extended = extended
|
||||||
matches := pattern.matchChunk(&chunk)
|
matches := pattern.matchChunk(&chunk, nil, slab) // No cache
|
||||||
if *matches[0].text != "junegunn" || *matches[0].origText != "junegunn.choi" ||
|
if !(matches[0].item.text.ToString() == "junegunn" &&
|
||||||
matches[0].offsets[0][0] != 0 || matches[0].offsets[0][1] != 5 ||
|
string(*matches[0].item.origText) == "junegunn.choi" &&
|
||||||
matches[0].transformed != trans {
|
reflect.DeepEqual(matches[0].item.transformed, trans)) {
|
||||||
t.Error("Invalid match result", matches)
|
t.Error("Invalid match result", matches)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
match, offsets, pos := pattern.MatchItem(chunk[0], true, slab)
|
||||||
|
if !(match.item.text.ToString() == "junegunn" &&
|
||||||
|
string(*match.item.origText) == "junegunn.choi" &&
|
||||||
|
offsets[0][0] == 0 && offsets[0][1] == 5 &&
|
||||||
|
reflect.DeepEqual(match.item.transformed, trans)) {
|
||||||
|
t.Error("Invalid match result", match, offsets, extended)
|
||||||
|
}
|
||||||
|
if !((*pos)[0] == 4 && (*pos)[1] == 0) {
|
||||||
|
t.Error("Invalid pos array", *pos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCacheKey(t *testing.T) {
|
||||||
|
test := func(extended bool, patStr string, expected string, cacheable bool) {
|
||||||
|
pat := BuildPattern(true, algo.FuzzyMatchV2, extended, CaseSmart, true, true, []Range{}, Delimiter{}, []rune(patStr))
|
||||||
|
if pat.CacheKey() != expected {
|
||||||
|
t.Errorf("Expected: %s, actual: %s", expected, pat.CacheKey())
|
||||||
|
}
|
||||||
|
if pat.cacheable != cacheable {
|
||||||
|
t.Errorf("Expected: %s, actual: %s (%s)", cacheable, pat.cacheable, patStr)
|
||||||
|
}
|
||||||
|
clearPatternCache()
|
||||||
|
}
|
||||||
|
test(false, "foo !bar", "foo !bar", true)
|
||||||
|
test(false, "foo | bar !baz", "foo | bar !baz", true)
|
||||||
|
test(true, "foo bar baz", "foo bar baz", true)
|
||||||
|
test(true, "foo !bar", "foo", false)
|
||||||
|
test(true, "foo !bar baz", "foo baz", false)
|
||||||
|
test(true, "foo | bar baz", "baz", false)
|
||||||
|
test(true, "foo | bar | baz", "", false)
|
||||||
|
test(true, "foo | bar !baz", "", false)
|
||||||
|
test(true, "| | | foo", "foo", true)
|
||||||
|
}
|
||||||
|
@@ -4,15 +4,15 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
|
||||||
|
|
||||||
"github.com/junegunn/fzf/src/util"
|
"github.com/junegunn/fzf/src/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reader reads from command or standard input
|
// Reader reads from command or standard input
|
||||||
type Reader struct {
|
type Reader struct {
|
||||||
pusher func(string)
|
pusher func([]byte) bool
|
||||||
eventBox *util.EventBox
|
eventBox *util.EventBox
|
||||||
|
delimNil bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadSource reads data from the default command or from standard input
|
// ReadSource reads data from the default command or from standard input
|
||||||
@@ -30,32 +30,27 @@ func (r *Reader) ReadSource() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) feed(src io.Reader) {
|
func (r *Reader) feed(src io.Reader) {
|
||||||
reader := bufio.NewReader(src)
|
delim := byte('\n')
|
||||||
eof := false
|
if r.delimNil {
|
||||||
Loop:
|
delim = '\000'
|
||||||
for !eof {
|
}
|
||||||
buf := []byte{}
|
reader := bufio.NewReaderSize(src, readerBufferSize)
|
||||||
iter := 0 // TODO: max size?
|
|
||||||
for {
|
for {
|
||||||
// "ReadLine either returns a non-nil line or it returns an error, never both"
|
// ReadBytes returns err != nil if and only if the returned data does not
|
||||||
line, isPrefix, err := reader.ReadLine()
|
// end in delim.
|
||||||
eof = err == io.EOF
|
bytea, err := reader.ReadBytes(delim)
|
||||||
if eof {
|
if len(bytea) > 0 {
|
||||||
break
|
if err == nil {
|
||||||
} else if err != nil {
|
bytea = bytea[:len(bytea)-1]
|
||||||
break Loop
|
|
||||||
}
|
}
|
||||||
iter++
|
if r.pusher(bytea) {
|
||||||
buf = append(buf, line...)
|
|
||||||
if !isPrefix {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if iter > 0 {
|
|
||||||
r.pusher(string(buf))
|
|
||||||
r.eventBox.Set(EvtReadNew, nil)
|
r.eventBox.Set(EvtReadNew, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) readFromStdin() {
|
func (r *Reader) readFromStdin() {
|
||||||
@@ -63,7 +58,7 @@ func (r *Reader) readFromStdin() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) readFromCommand(cmd string) {
|
func (r *Reader) readFromCommand(cmd string) {
|
||||||
listCommand := exec.Command("sh", "-c", cmd)
|
listCommand := util.ExecCommand(cmd)
|
||||||
out, err := listCommand.StdoutPipe()
|
out, err := listCommand.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
|
@@ -10,7 +10,7 @@ func TestReadFromCommand(t *testing.T) {
|
|||||||
strs := []string{}
|
strs := []string{}
|
||||||
eb := util.NewEventBox()
|
eb := util.NewEventBox()
|
||||||
reader := Reader{
|
reader := Reader{
|
||||||
pusher: func(s string) { strs = append(strs, s) },
|
pusher: func(s []byte) bool { strs = append(strs, string(s)); return true },
|
||||||
eventBox: eb}
|
eventBox: eb}
|
||||||
|
|
||||||
// Check EventBox
|
// Check EventBox
|
||||||
|
240
src/result.go
Normal file
240
src/result.go
Normal file
@@ -0,0 +1,240 @@
|
|||||||
|
package fzf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/curses"
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Offset holds two 32-bit integers denoting the offsets of a matched substring
|
||||||
|
type Offset [2]int32
|
||||||
|
|
||||||
|
type colorOffset struct {
|
||||||
|
offset [2]int32
|
||||||
|
color int
|
||||||
|
bold bool
|
||||||
|
index int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type rank struct {
|
||||||
|
points [4]uint16
|
||||||
|
index int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type Result struct {
|
||||||
|
item *Item
|
||||||
|
rank rank
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildResult(item *Item, offsets []Offset, score int, trimLen int) *Result {
|
||||||
|
if len(offsets) > 1 {
|
||||||
|
sort.Sort(ByOrder(offsets))
|
||||||
|
}
|
||||||
|
|
||||||
|
result := Result{item: item, rank: rank{index: item.index}}
|
||||||
|
numChars := item.text.Length()
|
||||||
|
minBegin := math.MaxUint16
|
||||||
|
maxEnd := 0
|
||||||
|
validOffsetFound := false
|
||||||
|
for _, offset := range offsets {
|
||||||
|
b, e := int(offset[0]), int(offset[1])
|
||||||
|
if b < e {
|
||||||
|
minBegin = util.Min(b, minBegin)
|
||||||
|
maxEnd = util.Max(e, maxEnd)
|
||||||
|
validOffsetFound = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, criterion := range sortCriteria {
|
||||||
|
val := uint16(math.MaxUint16)
|
||||||
|
switch criterion {
|
||||||
|
case byScore:
|
||||||
|
// Higher is better
|
||||||
|
val = math.MaxUint16 - util.AsUint16(score)
|
||||||
|
case byLength:
|
||||||
|
// If offsets is empty, trimLen will be 0, but we don't care
|
||||||
|
val = util.AsUint16(trimLen)
|
||||||
|
case byBegin:
|
||||||
|
if validOffsetFound {
|
||||||
|
whitePrefixLen := 0
|
||||||
|
for idx := 0; idx < numChars; idx++ {
|
||||||
|
r := item.text.Get(idx)
|
||||||
|
whitePrefixLen = idx
|
||||||
|
if idx == minBegin || r != ' ' && r != '\t' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
val = util.AsUint16(minBegin - whitePrefixLen)
|
||||||
|
}
|
||||||
|
case byEnd:
|
||||||
|
if validOffsetFound {
|
||||||
|
val = util.AsUint16(1 + numChars - maxEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result.rank.points[idx] = val
|
||||||
|
}
|
||||||
|
|
||||||
|
return &result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort criteria to use. Never changes once fzf is started.
|
||||||
|
var sortCriteria []criterion
|
||||||
|
|
||||||
|
// Index returns ordinal index of the Item
|
||||||
|
func (result *Result) Index() int32 {
|
||||||
|
return result.item.index
|
||||||
|
}
|
||||||
|
|
||||||
|
func minRank() rank {
|
||||||
|
return rank{index: 0, points: [4]uint16{math.MaxUint16, 0, 0, 0}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (result *Result) colorOffsets(matchOffsets []Offset, color int, bold bool, current bool) []colorOffset {
|
||||||
|
itemColors := result.item.Colors()
|
||||||
|
|
||||||
|
if len(itemColors) == 0 {
|
||||||
|
var offsets []colorOffset
|
||||||
|
for _, off := range matchOffsets {
|
||||||
|
|
||||||
|
offsets = append(offsets, colorOffset{offset: [2]int32{off[0], off[1]}, color: color, bold: bold})
|
||||||
|
}
|
||||||
|
return offsets
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find max column
|
||||||
|
var maxCol int32
|
||||||
|
for _, off := range matchOffsets {
|
||||||
|
if off[1] > maxCol {
|
||||||
|
maxCol = off[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, ansi := range itemColors {
|
||||||
|
if ansi.offset[1] > maxCol {
|
||||||
|
maxCol = ansi.offset[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cols := make([]int, maxCol)
|
||||||
|
|
||||||
|
for colorIndex, ansi := range itemColors {
|
||||||
|
for i := ansi.offset[0]; i < ansi.offset[1]; i++ {
|
||||||
|
cols[i] = colorIndex + 1 // XXX
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, off := range matchOffsets {
|
||||||
|
for i := off[0]; i < off[1]; i++ {
|
||||||
|
cols[i] = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sort.Sort(ByOrder(offsets))
|
||||||
|
|
||||||
|
// Merge offsets
|
||||||
|
// ------------ ---- -- ----
|
||||||
|
// ++++++++ ++++++++++
|
||||||
|
// --++++++++-- --++++++++++---
|
||||||
|
curr := 0
|
||||||
|
start := 0
|
||||||
|
var colors []colorOffset
|
||||||
|
add := func(idx int) {
|
||||||
|
if curr != 0 && idx > start {
|
||||||
|
if curr == -1 {
|
||||||
|
colors = append(colors, colorOffset{
|
||||||
|
offset: [2]int32{int32(start), int32(idx)}, color: color, bold: bold})
|
||||||
|
} else {
|
||||||
|
ansi := itemColors[curr-1]
|
||||||
|
fg := ansi.color.fg
|
||||||
|
if fg == -1 {
|
||||||
|
if current {
|
||||||
|
fg = curses.CurrentFG
|
||||||
|
} else {
|
||||||
|
fg = curses.FG
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bg := ansi.color.bg
|
||||||
|
if bg == -1 {
|
||||||
|
if current {
|
||||||
|
bg = curses.DarkBG
|
||||||
|
} else {
|
||||||
|
bg = curses.BG
|
||||||
|
}
|
||||||
|
}
|
||||||
|
colors = append(colors, colorOffset{
|
||||||
|
offset: [2]int32{int32(start), int32(idx)},
|
||||||
|
color: curses.PairFor(fg, bg),
|
||||||
|
bold: ansi.color.bold || bold})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for idx, col := range cols {
|
||||||
|
if col != curr {
|
||||||
|
add(idx)
|
||||||
|
start = idx
|
||||||
|
curr = col
|
||||||
|
}
|
||||||
|
}
|
||||||
|
add(int(maxCol))
|
||||||
|
return colors
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByOrder is for sorting substring offsets
|
||||||
|
type ByOrder []Offset
|
||||||
|
|
||||||
|
func (a ByOrder) Len() int {
|
||||||
|
return len(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a ByOrder) Swap(i, j int) {
|
||||||
|
a[i], a[j] = a[j], a[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a ByOrder) Less(i, j int) bool {
|
||||||
|
ioff := a[i]
|
||||||
|
joff := a[j]
|
||||||
|
return (ioff[0] < joff[0]) || (ioff[0] == joff[0]) && (ioff[1] <= joff[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRelevance is for sorting Items
|
||||||
|
type ByRelevance []*Result
|
||||||
|
|
||||||
|
func (a ByRelevance) Len() int {
|
||||||
|
return len(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a ByRelevance) Swap(i, j int) {
|
||||||
|
a[i], a[j] = a[j], a[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a ByRelevance) Less(i, j int) bool {
|
||||||
|
return compareRanks((*a[i]).rank, (*a[j]).rank, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRelevanceTac is for sorting Items
|
||||||
|
type ByRelevanceTac []*Result
|
||||||
|
|
||||||
|
func (a ByRelevanceTac) Len() int {
|
||||||
|
return len(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a ByRelevanceTac) Swap(i, j int) {
|
||||||
|
a[i], a[j] = a[j], a[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a ByRelevanceTac) Less(i, j int) bool {
|
||||||
|
return compareRanks((*a[i]).rank, (*a[j]).rank, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareRanks(irank rank, jrank rank, tac bool) bool {
|
||||||
|
for idx := 0; idx < 4; idx++ {
|
||||||
|
left := irank.points[idx]
|
||||||
|
right := jrank.points[idx]
|
||||||
|
if left < right {
|
||||||
|
return true
|
||||||
|
} else if left > right {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return (irank.index <= jrank.index) != tac
|
||||||
|
}
|
119
src/result_test.go
Normal file
119
src/result_test.go
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
package fzf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/curses"
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOffsetSort(t *testing.T) {
|
||||||
|
offsets := []Offset{
|
||||||
|
Offset{3, 5}, Offset{2, 7},
|
||||||
|
Offset{1, 3}, Offset{2, 9}}
|
||||||
|
sort.Sort(ByOrder(offsets))
|
||||||
|
|
||||||
|
if offsets[0][0] != 1 || offsets[0][1] != 3 ||
|
||||||
|
offsets[1][0] != 2 || offsets[1][1] != 7 ||
|
||||||
|
offsets[2][0] != 2 || offsets[2][1] != 9 ||
|
||||||
|
offsets[3][0] != 3 || offsets[3][1] != 5 {
|
||||||
|
t.Error("Invalid order:", offsets)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRankComparison(t *testing.T) {
|
||||||
|
rank := func(vals ...uint16) rank {
|
||||||
|
return rank{
|
||||||
|
points: [4]uint16{vals[0], vals[1], vals[2], vals[3]},
|
||||||
|
index: int32(vals[4])}
|
||||||
|
}
|
||||||
|
if compareRanks(rank(3, 0, 0, 0, 5), rank(2, 0, 0, 0, 7), false) ||
|
||||||
|
!compareRanks(rank(3, 0, 0, 0, 5), rank(3, 0, 0, 0, 6), false) ||
|
||||||
|
!compareRanks(rank(1, 2, 0, 0, 3), rank(1, 3, 0, 0, 2), false) ||
|
||||||
|
!compareRanks(rank(0, 0, 0, 0, 0), rank(0, 0, 0, 0, 0), false) {
|
||||||
|
t.Error("Invalid order")
|
||||||
|
}
|
||||||
|
|
||||||
|
if compareRanks(rank(3, 0, 0, 0, 5), rank(2, 0, 0, 0, 7), true) ||
|
||||||
|
!compareRanks(rank(3, 0, 0, 0, 5), rank(3, 0, 0, 0, 6), false) ||
|
||||||
|
!compareRanks(rank(1, 2, 0, 0, 3), rank(1, 3, 0, 0, 2), true) ||
|
||||||
|
!compareRanks(rank(0, 0, 0, 0, 0), rank(0, 0, 0, 0, 0), false) {
|
||||||
|
t.Error("Invalid order (tac)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match length, string length, index
|
||||||
|
func TestResultRank(t *testing.T) {
|
||||||
|
// FIXME global
|
||||||
|
sortCriteria = []criterion{byScore, byLength}
|
||||||
|
|
||||||
|
strs := [][]rune{[]rune("foo"), []rune("foobar"), []rune("bar"), []rune("baz")}
|
||||||
|
item1 := buildResult(&Item{text: util.RunesToChars(strs[0]), index: 1}, []Offset{}, 2, 3)
|
||||||
|
if item1.rank.points[0] != math.MaxUint16-2 || // Bonus
|
||||||
|
item1.rank.points[1] != 3 || // Length
|
||||||
|
item1.rank.points[2] != 0 || // Unused
|
||||||
|
item1.rank.points[3] != 0 || // Unused
|
||||||
|
item1.item.index != 1 {
|
||||||
|
t.Error(item1.rank)
|
||||||
|
}
|
||||||
|
// Only differ in index
|
||||||
|
item2 := buildResult(&Item{text: util.RunesToChars(strs[0])}, []Offset{}, 2, 3)
|
||||||
|
|
||||||
|
items := []*Result{item1, item2}
|
||||||
|
sort.Sort(ByRelevance(items))
|
||||||
|
if items[0] != item2 || items[1] != item1 {
|
||||||
|
t.Error(items)
|
||||||
|
}
|
||||||
|
|
||||||
|
items = []*Result{item2, item1, item1, item2}
|
||||||
|
sort.Sort(ByRelevance(items))
|
||||||
|
if items[0] != item2 || items[1] != item2 ||
|
||||||
|
items[2] != item1 || items[3] != item1 {
|
||||||
|
t.Error(items, item1, item1.item.index, item2, item2.item.index)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by relevance
|
||||||
|
item3 := buildResult(&Item{index: 2}, []Offset{Offset{1, 3}, Offset{5, 7}}, 3, 0)
|
||||||
|
item4 := buildResult(&Item{index: 2}, []Offset{Offset{1, 2}, Offset{6, 7}}, 4, 0)
|
||||||
|
item5 := buildResult(&Item{index: 2}, []Offset{Offset{1, 3}, Offset{5, 7}}, 5, 0)
|
||||||
|
item6 := buildResult(&Item{index: 2}, []Offset{Offset{1, 2}, Offset{6, 7}}, 6, 0)
|
||||||
|
items = []*Result{item1, item2, item3, item4, item5, item6}
|
||||||
|
sort.Sort(ByRelevance(items))
|
||||||
|
if !(items[0] == item6 && items[1] == item5 &&
|
||||||
|
items[2] == item4 && items[3] == item3 &&
|
||||||
|
items[4] == item2 && items[5] == item1) {
|
||||||
|
t.Error(items, item1, item2, item3, item4, item5, item6)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestColorOffset(t *testing.T) {
|
||||||
|
// ------------ 20 ---- -- ----
|
||||||
|
// ++++++++ ++++++++++
|
||||||
|
// --++++++++-- --++++++++++---
|
||||||
|
|
||||||
|
offsets := []Offset{Offset{5, 15}, Offset{25, 35}}
|
||||||
|
item := Result{
|
||||||
|
item: &Item{
|
||||||
|
colors: &[]ansiOffset{
|
||||||
|
ansiOffset{[2]int32{0, 20}, ansiState{1, 5, false}},
|
||||||
|
ansiOffset{[2]int32{22, 27}, ansiState{2, 6, true}},
|
||||||
|
ansiOffset{[2]int32{30, 32}, ansiState{3, 7, false}},
|
||||||
|
ansiOffset{[2]int32{33, 40}, ansiState{4, 8, true}}}}}
|
||||||
|
// [{[0 5] 9 false} {[5 15] 99 false} {[15 20] 9 false} {[22 25] 10 true} {[25 35] 99 false} {[35 40] 11 true}]
|
||||||
|
|
||||||
|
colors := item.colorOffsets(offsets, 99, false, true)
|
||||||
|
assert := func(idx int, b int32, e int32, c int, bold bool) {
|
||||||
|
o := colors[idx]
|
||||||
|
if o.offset[0] != b || o.offset[1] != e || o.color != c || o.bold != bold {
|
||||||
|
t.Error(o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert(0, 0, 5, curses.ColUser, false)
|
||||||
|
assert(1, 5, 15, 99, false)
|
||||||
|
assert(2, 15, 20, curses.ColUser, false)
|
||||||
|
assert(3, 22, 25, curses.ColUser+1, true)
|
||||||
|
assert(4, 25, 35, 99, false)
|
||||||
|
assert(5, 35, 40, curses.ColUser+2, true)
|
||||||
|
}
|
805
src/terminal.go
805
src/terminal.go
File diff suppressed because it is too large
Load Diff
122
src/tokenizer.go
122
src/tokenizer.go
@@ -18,8 +18,15 @@ type Range struct {
|
|||||||
|
|
||||||
// Token contains the tokenized part of the strings and its prefix length
|
// Token contains the tokenized part of the strings and its prefix length
|
||||||
type Token struct {
|
type Token struct {
|
||||||
text *[]rune
|
text *util.Chars
|
||||||
prefixLength int
|
prefixLength int32
|
||||||
|
trimLength int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delimiter for tokenizing the input
|
||||||
|
type Delimiter struct {
|
||||||
|
regex *regexp.Regexp
|
||||||
|
str *string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRange(begin int, end int) Range {
|
func newRange(begin int, end int) Range {
|
||||||
@@ -68,16 +75,14 @@ func ParseRange(str *string) (Range, bool) {
|
|||||||
return newRange(n, n), true
|
return newRange(n, n), true
|
||||||
}
|
}
|
||||||
|
|
||||||
func withPrefixLengths(tokens []string, begin int) []Token {
|
func withPrefixLengths(tokens []util.Chars, begin int) []Token {
|
||||||
ret := make([]Token, len(tokens))
|
ret := make([]Token, len(tokens))
|
||||||
|
|
||||||
prefixLength := begin
|
prefixLength := begin
|
||||||
for idx, token := range tokens {
|
for idx, token := range tokens {
|
||||||
// Need to define a new local variable instead of the reused token to take
|
// NOTE: &tokens[idx] instead of &tokens
|
||||||
// the pointer to it
|
ret[idx] = Token{&tokens[idx], int32(prefixLength), int32(token.TrimLength())}
|
||||||
runes := []rune(token)
|
prefixLength += token.Length()
|
||||||
ret[idx] = Token{text: &runes, prefixLength: prefixLength}
|
|
||||||
prefixLength += len([]rune(token))
|
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
@@ -88,88 +93,104 @@ const (
|
|||||||
awkWhite
|
awkWhite
|
||||||
)
|
)
|
||||||
|
|
||||||
func awkTokenizer(input *string) ([]string, int) {
|
func awkTokenizer(input util.Chars) ([]util.Chars, int) {
|
||||||
// 9, 32
|
// 9, 32
|
||||||
ret := []string{}
|
ret := []util.Chars{}
|
||||||
str := []rune{}
|
|
||||||
prefixLength := 0
|
prefixLength := 0
|
||||||
state := awkNil
|
state := awkNil
|
||||||
for _, r := range []rune(*input) {
|
numChars := input.Length()
|
||||||
|
begin := 0
|
||||||
|
end := 0
|
||||||
|
for idx := 0; idx < numChars; idx++ {
|
||||||
|
r := input.Get(idx)
|
||||||
white := r == 9 || r == 32
|
white := r == 9 || r == 32
|
||||||
switch state {
|
switch state {
|
||||||
case awkNil:
|
case awkNil:
|
||||||
if white {
|
if white {
|
||||||
prefixLength++
|
prefixLength++
|
||||||
} else {
|
} else {
|
||||||
state = awkBlack
|
state, begin, end = awkBlack, idx, idx+1
|
||||||
str = append(str, r)
|
|
||||||
}
|
}
|
||||||
case awkBlack:
|
case awkBlack:
|
||||||
str = append(str, r)
|
end = idx + 1
|
||||||
if white {
|
if white {
|
||||||
state = awkWhite
|
state = awkWhite
|
||||||
}
|
}
|
||||||
case awkWhite:
|
case awkWhite:
|
||||||
if white {
|
if white {
|
||||||
str = append(str, r)
|
end = idx + 1
|
||||||
} else {
|
} else {
|
||||||
ret = append(ret, string(str))
|
ret = append(ret, input.Slice(begin, end))
|
||||||
state = awkBlack
|
state, begin, end = awkBlack, idx, idx+1
|
||||||
str = []rune{r}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(str) > 0 {
|
if begin < end {
|
||||||
ret = append(ret, string(str))
|
ret = append(ret, input.Slice(begin, end))
|
||||||
}
|
}
|
||||||
return ret, prefixLength
|
return ret, prefixLength
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tokenize tokenizes the given string with the delimiter
|
// Tokenize tokenizes the given string with the delimiter
|
||||||
func Tokenize(str *string, delimiter *regexp.Regexp) []Token {
|
func Tokenize(text util.Chars, delimiter Delimiter) []Token {
|
||||||
if delimiter == nil {
|
if delimiter.str == nil && delimiter.regex == nil {
|
||||||
// AWK-style (\S+\s*)
|
// AWK-style (\S+\s*)
|
||||||
tokens, prefixLength := awkTokenizer(str)
|
tokens, prefixLength := awkTokenizer(text)
|
||||||
return withPrefixLengths(tokens, prefixLength)
|
return withPrefixLengths(tokens, prefixLength)
|
||||||
}
|
}
|
||||||
tokens := delimiter.FindAllString(*str, -1)
|
|
||||||
return withPrefixLengths(tokens, 0)
|
if delimiter.str != nil {
|
||||||
|
return withPrefixLengths(text.Split(*delimiter.str), 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func joinTokens(tokens *[]Token) *string {
|
// FIXME performance
|
||||||
ret := ""
|
var tokens []string
|
||||||
for _, token := range *tokens {
|
if delimiter.regex != nil {
|
||||||
ret += string(*token.text)
|
str := text.ToString()
|
||||||
|
for len(str) > 0 {
|
||||||
|
loc := delimiter.regex.FindStringIndex(str)
|
||||||
|
if loc == nil {
|
||||||
|
loc = []int{0, len(str)}
|
||||||
}
|
}
|
||||||
return &ret
|
last := util.Max(loc[1], 1)
|
||||||
|
tokens = append(tokens, str[:last])
|
||||||
|
str = str[last:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
asRunes := make([]util.Chars, len(tokens))
|
||||||
|
for i, token := range tokens {
|
||||||
|
asRunes[i] = util.RunesToChars([]rune(token))
|
||||||
|
}
|
||||||
|
return withPrefixLengths(asRunes, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func joinTokensAsRunes(tokens *[]Token) *[]rune {
|
func joinTokens(tokens []Token) []rune {
|
||||||
ret := []rune{}
|
ret := []rune{}
|
||||||
for _, token := range *tokens {
|
for _, token := range tokens {
|
||||||
ret = append(ret, *token.text...)
|
ret = append(ret, token.text.ToRunes()...)
|
||||||
}
|
}
|
||||||
return &ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transform is used to transform the input when --with-nth option is given
|
// Transform is used to transform the input when --with-nth option is given
|
||||||
func Transform(tokens []Token, withNth []Range) *[]Token {
|
func Transform(tokens []Token, withNth []Range) []Token {
|
||||||
transTokens := make([]Token, len(withNth))
|
transTokens := make([]Token, len(withNth))
|
||||||
numTokens := len(tokens)
|
numTokens := len(tokens)
|
||||||
for idx, r := range withNth {
|
for idx, r := range withNth {
|
||||||
part := []rune{}
|
parts := []*util.Chars{}
|
||||||
minIdx := 0
|
minIdx := 0
|
||||||
if r.begin == r.end {
|
if r.begin == r.end {
|
||||||
idx := r.begin
|
idx := r.begin
|
||||||
if idx == rangeEllipsis {
|
if idx == rangeEllipsis {
|
||||||
part = append(part, *joinTokensAsRunes(&tokens)...)
|
chars := util.RunesToChars(joinTokens(tokens))
|
||||||
|
parts = append(parts, &chars)
|
||||||
} else {
|
} else {
|
||||||
if idx < 0 {
|
if idx < 0 {
|
||||||
idx += numTokens + 1
|
idx += numTokens + 1
|
||||||
}
|
}
|
||||||
if idx >= 1 && idx <= numTokens {
|
if idx >= 1 && idx <= numTokens {
|
||||||
minIdx = idx - 1
|
minIdx = idx - 1
|
||||||
part = append(part, *tokens[idx-1].text...)
|
parts = append(parts, tokens[idx-1].text)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -196,17 +217,32 @@ func Transform(tokens []Token, withNth []Range) *[]Token {
|
|||||||
minIdx = util.Max(0, begin-1)
|
minIdx = util.Max(0, begin-1)
|
||||||
for idx := begin; idx <= end; idx++ {
|
for idx := begin; idx <= end; idx++ {
|
||||||
if idx >= 1 && idx <= numTokens {
|
if idx >= 1 && idx <= numTokens {
|
||||||
part = append(part, *tokens[idx-1].text...)
|
parts = append(parts, tokens[idx-1].text)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var prefixLength int
|
// Merge multiple parts
|
||||||
|
var merged util.Chars
|
||||||
|
switch len(parts) {
|
||||||
|
case 0:
|
||||||
|
merged = util.RunesToChars([]rune{})
|
||||||
|
case 1:
|
||||||
|
merged = *parts[0]
|
||||||
|
default:
|
||||||
|
runes := []rune{}
|
||||||
|
for _, part := range parts {
|
||||||
|
runes = append(runes, part.ToRunes()...)
|
||||||
|
}
|
||||||
|
merged = util.RunesToChars(runes)
|
||||||
|
}
|
||||||
|
|
||||||
|
var prefixLength int32
|
||||||
if minIdx < numTokens {
|
if minIdx < numTokens {
|
||||||
prefixLength = tokens[minIdx].prefixLength
|
prefixLength = tokens[minIdx].prefixLength
|
||||||
} else {
|
} else {
|
||||||
prefixLength = 0
|
prefixLength = 0
|
||||||
}
|
}
|
||||||
transTokens[idx] = Token{&part, prefixLength}
|
transTokens[idx] = Token{&merged, prefixLength, int32(merged.TrimLength())}
|
||||||
}
|
}
|
||||||
return &transTokens
|
return transTokens
|
||||||
}
|
}
|
||||||
|
@@ -1,6 +1,10 @@
|
|||||||
package fzf
|
package fzf
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/junegunn/fzf/src/util"
|
||||||
|
)
|
||||||
|
|
||||||
func TestParseRange(t *testing.T) {
|
func TestParseRange(t *testing.T) {
|
||||||
{
|
{
|
||||||
@@ -43,14 +47,23 @@ func TestParseRange(t *testing.T) {
|
|||||||
func TestTokenize(t *testing.T) {
|
func TestTokenize(t *testing.T) {
|
||||||
// AWK-style
|
// AWK-style
|
||||||
input := " abc: def: ghi "
|
input := " abc: def: ghi "
|
||||||
tokens := Tokenize(&input, nil)
|
tokens := Tokenize(util.RunesToChars([]rune(input)), Delimiter{})
|
||||||
if string(*tokens[0].text) != "abc: " || tokens[0].prefixLength != 2 {
|
if tokens[0].text.ToString() != "abc: " || tokens[0].prefixLength != 2 || tokens[0].trimLength != 4 {
|
||||||
t.Errorf("%s", tokens)
|
t.Errorf("%s", tokens)
|
||||||
}
|
}
|
||||||
|
|
||||||
// With delimiter
|
// With delimiter
|
||||||
tokens = Tokenize(&input, delimiterRegexp(":"))
|
tokens = Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp(":"))
|
||||||
if string(*tokens[0].text) != " abc:" || tokens[0].prefixLength != 0 {
|
if tokens[0].text.ToString() != " abc:" || tokens[0].prefixLength != 0 || tokens[0].trimLength != 4 {
|
||||||
|
t.Errorf("%s", tokens)
|
||||||
|
}
|
||||||
|
|
||||||
|
// With delimiter regex
|
||||||
|
tokens = Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp("\\s+"))
|
||||||
|
if tokens[0].text.ToString() != " " || tokens[0].prefixLength != 0 || tokens[0].trimLength != 0 ||
|
||||||
|
tokens[1].text.ToString() != "abc: " || tokens[1].prefixLength != 2 || tokens[1].trimLength != 4 ||
|
||||||
|
tokens[2].text.ToString() != "def: " || tokens[2].prefixLength != 8 || tokens[2].trimLength != 4 ||
|
||||||
|
tokens[3].text.ToString() != "ghi " || tokens[3].prefixLength != 14 || tokens[3].trimLength != 3 {
|
||||||
t.Errorf("%s", tokens)
|
t.Errorf("%s", tokens)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -58,39 +71,39 @@ func TestTokenize(t *testing.T) {
|
|||||||
func TestTransform(t *testing.T) {
|
func TestTransform(t *testing.T) {
|
||||||
input := " abc: def: ghi: jkl"
|
input := " abc: def: ghi: jkl"
|
||||||
{
|
{
|
||||||
tokens := Tokenize(&input, nil)
|
tokens := Tokenize(util.RunesToChars([]rune(input)), Delimiter{})
|
||||||
{
|
{
|
||||||
ranges := splitNth("1,2,3")
|
ranges := splitNth("1,2,3")
|
||||||
tx := Transform(tokens, ranges)
|
tx := Transform(tokens, ranges)
|
||||||
if *joinTokens(tx) != "abc: def: ghi: " {
|
if string(joinTokens(tx)) != "abc: def: ghi: " {
|
||||||
t.Errorf("%s", *tx)
|
t.Errorf("%s", tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
ranges := splitNth("1..2,3,2..,1")
|
ranges := splitNth("1..2,3,2..,1")
|
||||||
tx := Transform(tokens, ranges)
|
tx := Transform(tokens, ranges)
|
||||||
if *joinTokens(tx) != "abc: def: ghi: def: ghi: jklabc: " ||
|
if string(joinTokens(tx)) != "abc: def: ghi: def: ghi: jklabc: " ||
|
||||||
len(*tx) != 4 ||
|
len(tx) != 4 ||
|
||||||
string(*(*tx)[0].text) != "abc: def: " || (*tx)[0].prefixLength != 2 ||
|
tx[0].text.ToString() != "abc: def: " || tx[0].prefixLength != 2 ||
|
||||||
string(*(*tx)[1].text) != "ghi: " || (*tx)[1].prefixLength != 14 ||
|
tx[1].text.ToString() != "ghi: " || tx[1].prefixLength != 14 ||
|
||||||
string(*(*tx)[2].text) != "def: ghi: jkl" || (*tx)[2].prefixLength != 8 ||
|
tx[2].text.ToString() != "def: ghi: jkl" || tx[2].prefixLength != 8 ||
|
||||||
string(*(*tx)[3].text) != "abc: " || (*tx)[3].prefixLength != 2 {
|
tx[3].text.ToString() != "abc: " || tx[3].prefixLength != 2 {
|
||||||
t.Errorf("%s", *tx)
|
t.Errorf("%s", tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
tokens := Tokenize(&input, delimiterRegexp(":"))
|
tokens := Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp(":"))
|
||||||
{
|
{
|
||||||
ranges := splitNth("1..2,3,2..,1")
|
ranges := splitNth("1..2,3,2..,1")
|
||||||
tx := Transform(tokens, ranges)
|
tx := Transform(tokens, ranges)
|
||||||
if *joinTokens(tx) != " abc: def: ghi: def: ghi: jkl abc:" ||
|
if string(joinTokens(tx)) != " abc: def: ghi: def: ghi: jkl abc:" ||
|
||||||
len(*tx) != 4 ||
|
len(tx) != 4 ||
|
||||||
string(*(*tx)[0].text) != " abc: def:" || (*tx)[0].prefixLength != 0 ||
|
tx[0].text.ToString() != " abc: def:" || tx[0].prefixLength != 0 ||
|
||||||
string(*(*tx)[1].text) != " ghi:" || (*tx)[1].prefixLength != 12 ||
|
tx[1].text.ToString() != " ghi:" || tx[1].prefixLength != 12 ||
|
||||||
string(*(*tx)[2].text) != " def: ghi: jkl" || (*tx)[2].prefixLength != 6 ||
|
tx[2].text.ToString() != " def: ghi: jkl" || tx[2].prefixLength != 6 ||
|
||||||
string(*(*tx)[3].text) != " abc:" || (*tx)[3].prefixLength != 0 {
|
tx[3].text.ToString() != " abc:" || tx[3].prefixLength != 0 {
|
||||||
t.Errorf("%s", *tx)
|
t.Errorf("%s", tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
# http://www.rubydoc.info/github/rest-client/rest-client/RestClient
|
# http://www.rubydoc.info/github/rest-client/rest-client/RestClient
|
||||||
require 'rest_client'
|
require 'rest_client'
|
||||||
|
require 'json'
|
||||||
|
|
||||||
if ARGV.length < 3
|
if ARGV.length < 3
|
||||||
puts "usage: #$0 <token> <version> <files...>"
|
puts "usage: #$0 <token> <version> <files...>"
|
||||||
|
156
src/util/chars.go
Normal file
156
src/util/chars.go
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Chars struct {
|
||||||
|
runes []rune
|
||||||
|
bytes []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToChars converts byte array into rune array
|
||||||
|
func ToChars(bytea []byte) Chars {
|
||||||
|
var runes []rune
|
||||||
|
ascii := true
|
||||||
|
numBytes := len(bytea)
|
||||||
|
for i := 0; i < numBytes; {
|
||||||
|
if bytea[i] < utf8.RuneSelf {
|
||||||
|
if !ascii {
|
||||||
|
runes = append(runes, rune(bytea[i]))
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
} else {
|
||||||
|
if ascii {
|
||||||
|
ascii = false
|
||||||
|
runes = make([]rune, i, numBytes)
|
||||||
|
for j := 0; j < i; j++ {
|
||||||
|
runes[j] = rune(bytea[j])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r, sz := utf8.DecodeRune(bytea[i:])
|
||||||
|
i += sz
|
||||||
|
runes = append(runes, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ascii {
|
||||||
|
return Chars{bytes: bytea}
|
||||||
|
}
|
||||||
|
return Chars{runes: runes}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RunesToChars(runes []rune) Chars {
|
||||||
|
return Chars{runes: runes}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) Get(i int) rune {
|
||||||
|
if chars.runes != nil {
|
||||||
|
return chars.runes[i]
|
||||||
|
}
|
||||||
|
return rune(chars.bytes[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) Length() int {
|
||||||
|
if chars.runes != nil {
|
||||||
|
return len(chars.runes)
|
||||||
|
}
|
||||||
|
return len(chars.bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimLength returns the length after trimming leading and trailing whitespaces
|
||||||
|
func (chars *Chars) TrimLength() int {
|
||||||
|
var i int
|
||||||
|
len := chars.Length()
|
||||||
|
for i = len - 1; i >= 0; i-- {
|
||||||
|
char := chars.Get(i)
|
||||||
|
if char != ' ' && char != '\t' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Completely empty
|
||||||
|
if i < 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var j int
|
||||||
|
for j = 0; j < len; j++ {
|
||||||
|
char := chars.Get(j)
|
||||||
|
if char != ' ' && char != '\t' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i - j + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) TrailingWhitespaces() int {
|
||||||
|
whitespaces := 0
|
||||||
|
for i := chars.Length() - 1; i >= 0; i-- {
|
||||||
|
char := chars.Get(i)
|
||||||
|
if char != ' ' && char != '\t' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
whitespaces++
|
||||||
|
}
|
||||||
|
return whitespaces
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) ToString() string {
|
||||||
|
if chars.runes != nil {
|
||||||
|
return string(chars.runes)
|
||||||
|
}
|
||||||
|
return string(chars.bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) ToRunes() []rune {
|
||||||
|
if chars.runes != nil {
|
||||||
|
return chars.runes
|
||||||
|
}
|
||||||
|
runes := make([]rune, len(chars.bytes))
|
||||||
|
for idx, b := range chars.bytes {
|
||||||
|
runes[idx] = rune(b)
|
||||||
|
}
|
||||||
|
return runes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) Slice(b int, e int) Chars {
|
||||||
|
if chars.runes != nil {
|
||||||
|
return Chars{runes: chars.runes[b:e]}
|
||||||
|
}
|
||||||
|
return Chars{bytes: chars.bytes[b:e]}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chars *Chars) Split(delimiter string) []Chars {
|
||||||
|
delim := []rune(delimiter)
|
||||||
|
numChars := chars.Length()
|
||||||
|
numDelim := len(delim)
|
||||||
|
begin := 0
|
||||||
|
ret := make([]Chars, 0, 1)
|
||||||
|
|
||||||
|
for index := 0; index < numChars; {
|
||||||
|
if index+numDelim <= numChars {
|
||||||
|
match := true
|
||||||
|
for off, d := range delim {
|
||||||
|
if chars.Get(index+off) != d {
|
||||||
|
match = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Found the delimiter
|
||||||
|
if match {
|
||||||
|
incr := Max(numDelim, 1)
|
||||||
|
ret = append(ret, chars.Slice(begin, index+incr))
|
||||||
|
index += incr
|
||||||
|
begin = index
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Impossible to find the delimiter in the remaining substring
|
||||||
|
break
|
||||||
|
}
|
||||||
|
index++
|
||||||
|
}
|
||||||
|
if begin < numChars || len(ret) == 0 {
|
||||||
|
ret = append(ret, chars.Slice(begin, numChars))
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
82
src/util/chars_test.go
Normal file
82
src/util/chars_test.go
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
package util
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestToCharsNil(t *testing.T) {
|
||||||
|
bs := Chars{bytes: []byte{}}
|
||||||
|
if bs.bytes == nil || bs.runes != nil {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
rs := RunesToChars([]rune{})
|
||||||
|
if rs.bytes != nil || rs.runes == nil {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestToCharsAscii(t *testing.T) {
|
||||||
|
chars := ToChars([]byte("foobar"))
|
||||||
|
if chars.ToString() != "foobar" || chars.runes != nil {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCharsLength(t *testing.T) {
|
||||||
|
chars := ToChars([]byte("\tabc한글 "))
|
||||||
|
if chars.Length() != 8 || chars.TrimLength() != 5 {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCharsToString(t *testing.T) {
|
||||||
|
text := "\tabc한글 "
|
||||||
|
chars := ToChars([]byte(text))
|
||||||
|
if chars.ToString() != text {
|
||||||
|
t.Error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTrimLength(t *testing.T) {
|
||||||
|
check := func(str string, exp int) {
|
||||||
|
chars := ToChars([]byte(str))
|
||||||
|
trimmed := chars.TrimLength()
|
||||||
|
if trimmed != exp {
|
||||||
|
t.Errorf("Invalid TrimLength result for '%s': %d (expected %d)",
|
||||||
|
str, trimmed, exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
check("hello", 5)
|
||||||
|
check("hello ", 5)
|
||||||
|
check("hello ", 5)
|
||||||
|
check(" hello", 5)
|
||||||
|
check(" hello", 5)
|
||||||
|
check(" hello ", 5)
|
||||||
|
check(" hello ", 5)
|
||||||
|
check("h o", 5)
|
||||||
|
check(" h o ", 5)
|
||||||
|
check(" ", 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSplit(t *testing.T) {
|
||||||
|
check := func(str string, delim string, tokens ...string) {
|
||||||
|
input := ToChars([]byte(str))
|
||||||
|
result := input.Split(delim)
|
||||||
|
if len(result) != len(tokens) {
|
||||||
|
t.Errorf("Invalid Split result for '%s': %d tokens found (expected %d): %s",
|
||||||
|
str, len(result), len(tokens), result)
|
||||||
|
}
|
||||||
|
for idx, token := range tokens {
|
||||||
|
if result[idx].ToString() != token {
|
||||||
|
t.Errorf("Invalid Split result for '%s': %s (expected %s)",
|
||||||
|
str, result[idx].ToString(), token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
check("abc:def::", ":", "abc:", "def:", ":")
|
||||||
|
check("abc:def::", "-", "abc:def::")
|
||||||
|
check("abc", "", "a", "b", "c")
|
||||||
|
check("abc", "a", "a", "bc")
|
||||||
|
check("abc", "ab", "ab", "c")
|
||||||
|
check("abc", "abc", "abc")
|
||||||
|
check("abc", "abcd", "abc")
|
||||||
|
check("", "abcd", "")
|
||||||
|
}
|
12
src/util/slab.go
Normal file
12
src/util/slab.go
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
package util
|
||||||
|
|
||||||
|
type Slab struct {
|
||||||
|
I16 []int16
|
||||||
|
I32 []int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func MakeSlab(size16 int, size32 int) *Slab {
|
||||||
|
return &Slab{
|
||||||
|
I16: make([]int16, size16),
|
||||||
|
I32: make([]int32, size32)}
|
||||||
|
}
|
@@ -4,24 +4,23 @@ package util
|
|||||||
import "C"
|
import "C"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Max returns the largest integer
|
// Max returns the largest integer
|
||||||
func Max(first int, items ...int) int {
|
func Max(first int, second int) int {
|
||||||
max := first
|
if first >= second {
|
||||||
for _, item := range items {
|
return first
|
||||||
if item > max {
|
|
||||||
max = item
|
|
||||||
}
|
}
|
||||||
}
|
return second
|
||||||
return max
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Max32 returns the smallest 32-bit integer
|
// Max16 returns the largest integer
|
||||||
func Min32(first int32, second int32) int32 {
|
func Max16(first int16, second int16) int16 {
|
||||||
if first <= second {
|
if first >= second {
|
||||||
return first
|
return first
|
||||||
}
|
}
|
||||||
return second
|
return second
|
||||||
@@ -35,6 +34,22 @@ func Max32(first int32, second int32) int32 {
|
|||||||
return second
|
return second
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Min returns the smallest integer
|
||||||
|
func Min(first int, second int) int {
|
||||||
|
if first <= second {
|
||||||
|
return first
|
||||||
|
}
|
||||||
|
return second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Min32 returns the smallest 32-bit integer
|
||||||
|
func Min32(first int32, second int32) int32 {
|
||||||
|
if first <= second {
|
||||||
|
return first
|
||||||
|
}
|
||||||
|
return second
|
||||||
|
}
|
||||||
|
|
||||||
// Constrain32 limits the given 32-bit integer with the upper and lower bounds
|
// Constrain32 limits the given 32-bit integer with the upper and lower bounds
|
||||||
func Constrain32(val int32, min int32, max int32) int32 {
|
func Constrain32(val int32, min int32, max int32) int32 {
|
||||||
if val < min {
|
if val < min {
|
||||||
@@ -57,6 +72,15 @@ func Constrain(val int, min int, max int) int {
|
|||||||
return val
|
return val
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func AsUint16(val int) uint16 {
|
||||||
|
if val > math.MaxUint16 {
|
||||||
|
return math.MaxUint16
|
||||||
|
} else if val < 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return uint16(val)
|
||||||
|
}
|
||||||
|
|
||||||
// DurWithin limits the given time.Duration with the upper and lower bounds
|
// DurWithin limits the given time.Duration with the upper and lower bounds
|
||||||
func DurWithin(
|
func DurWithin(
|
||||||
val time.Duration, min time.Duration, max time.Duration) time.Duration {
|
val time.Duration, min time.Duration, max time.Duration) time.Duration {
|
||||||
@@ -69,22 +93,16 @@ func DurWithin(
|
|||||||
return val
|
return val
|
||||||
}
|
}
|
||||||
|
|
||||||
func Between(val int, min int, max int) bool {
|
|
||||||
return val >= min && val <= max
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsTty returns true is stdin is a terminal
|
// IsTty returns true is stdin is a terminal
|
||||||
func IsTty() bool {
|
func IsTty() bool {
|
||||||
return int(C.isatty(C.int(os.Stdin.Fd()))) != 0
|
return int(C.isatty(C.int(os.Stdin.Fd()))) != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func TrimRight(runes *[]rune) []rune {
|
// ExecCommand executes the given command with $SHELL
|
||||||
var i int
|
func ExecCommand(command string) *exec.Cmd {
|
||||||
for i = len(*runes) - 1; i >= 0; i-- {
|
shell := os.Getenv("SHELL")
|
||||||
char := (*runes)[i]
|
if len(shell) == 0 {
|
||||||
if char != ' ' && char != '\t' {
|
shell = "sh"
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
return exec.Command(shell, "-c", command)
|
||||||
return (*runes)[0 : i+1]
|
|
||||||
}
|
}
|
||||||
|
@@ -3,7 +3,7 @@ package util
|
|||||||
import "testing"
|
import "testing"
|
||||||
|
|
||||||
func TestMax(t *testing.T) {
|
func TestMax(t *testing.T) {
|
||||||
if Max(-2, 5, 1, 4, 3) != 5 {
|
if Max(-2, 5) != 5 {
|
||||||
t.Error("Invalid result")
|
t.Error("Invalid result")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
108
test/fzf.vader
108
test/fzf.vader
@@ -1,6 +1,8 @@
|
|||||||
Execute (Setup):
|
Execute (Setup):
|
||||||
let g:dir = fnamemodify(g:vader_file, ':p:h')
|
let g:dir = fnamemodify(g:vader_file, ':p:h')
|
||||||
|
unlet! g:fzf_layout g:fzf_action g:fzf_history_dir
|
||||||
Log 'Test directory: ' . g:dir
|
Log 'Test directory: ' . g:dir
|
||||||
|
Save &acd
|
||||||
|
|
||||||
Execute (fzf#run with dir option):
|
Execute (fzf#run with dir option):
|
||||||
let cwd = getcwd()
|
let cwd = getcwd()
|
||||||
@@ -35,6 +37,112 @@ Execute (fzf#run with string source):
|
|||||||
let result = sort(fzf#run({ 'source': 'echo hi', 'options': '-f i' }))
|
let result = sort(fzf#run({ 'source': 'echo hi', 'options': '-f i' }))
|
||||||
AssertEqual ['hi'], result
|
AssertEqual ['hi'], result
|
||||||
|
|
||||||
|
Execute (fzf#run with dir option and noautochdir):
|
||||||
|
set noacd
|
||||||
|
let cwd = getcwd()
|
||||||
|
call fzf#run({'source': ['/foobar'], 'sink': 'e', 'dir': '/tmp', 'options': '-1'})
|
||||||
|
" No change in working directory
|
||||||
|
AssertEqual cwd, getcwd()
|
||||||
|
|
||||||
|
call fzf#run({'source': ['/foobar'], 'sink': 'tabe', 'dir': '/tmp', 'options': '-1'})
|
||||||
|
AssertEqual cwd, getcwd()
|
||||||
|
tabclose
|
||||||
|
AssertEqual cwd, getcwd()
|
||||||
|
|
||||||
|
Execute (Incomplete fzf#run with dir option and autochdir):
|
||||||
|
set acd
|
||||||
|
let cwd = getcwd()
|
||||||
|
call fzf#run({'source': [], 'sink': 'e', 'dir': '/tmp', 'options': '-0'})
|
||||||
|
" No change in working directory even if &acd is set
|
||||||
|
AssertEqual cwd, getcwd()
|
||||||
|
|
||||||
|
Execute (fzf#run with dir option and autochdir):
|
||||||
|
set acd
|
||||||
|
let cwd = getcwd()
|
||||||
|
call fzf#run({'source': ['/foobar'], 'sink': 'e', 'dir': '/tmp', 'options': '-1'})
|
||||||
|
" Working directory changed due to &acd
|
||||||
|
AssertEqual '/', getcwd()
|
||||||
|
|
||||||
|
Execute (fzf#run with dir option and autochdir when final cwd is same as dir):
|
||||||
|
set acd
|
||||||
|
cd /tmp
|
||||||
|
call fzf#run({'source': ['/foobar'], 'sink': 'e', 'dir': '/', 'options': '-1'})
|
||||||
|
" Working directory changed due to &acd
|
||||||
|
AssertEqual '/', getcwd()
|
||||||
|
|
||||||
|
Execute (fzf#wrap):
|
||||||
|
AssertThrows fzf#wrap({'foo': 'bar'})
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar')
|
||||||
|
Log opts
|
||||||
|
AssertEqual '~40%', opts.down
|
||||||
|
Assert opts.options =~ '--expect='
|
||||||
|
Assert !has_key(opts, 'sink')
|
||||||
|
Assert has_key(opts, 'sink*')
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {}, 0)
|
||||||
|
Log opts
|
||||||
|
AssertEqual '~40%', opts.down
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {}, 1)
|
||||||
|
Log opts
|
||||||
|
Assert !has_key(opts, 'down')
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'down': '50%'})
|
||||||
|
Log opts
|
||||||
|
AssertEqual '50%', opts.down
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'down': '50%'}, 1)
|
||||||
|
Log opts
|
||||||
|
Assert !has_key(opts, 'down')
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'sink': 'e'})
|
||||||
|
Log opts
|
||||||
|
AssertEqual 'e', opts.sink
|
||||||
|
Assert !has_key(opts, 'sink*')
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'options': '--reverse'})
|
||||||
|
Log opts
|
||||||
|
Assert opts.options =~ '--expect='
|
||||||
|
Assert opts.options =~ '--reverse'
|
||||||
|
|
||||||
|
let g:fzf_layout = {'window': 'enew'}
|
||||||
|
let opts = fzf#wrap('foobar')
|
||||||
|
Log opts
|
||||||
|
AssertEqual 'enew', opts.window
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {}, 1)
|
||||||
|
Log opts
|
||||||
|
Assert !has_key(opts, 'window')
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'right': '50%'})
|
||||||
|
Log opts
|
||||||
|
Assert !has_key(opts, 'window')
|
||||||
|
AssertEqual '50%', opts.right
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'right': '50%'}, 1)
|
||||||
|
Log opts
|
||||||
|
Assert !has_key(opts, 'window')
|
||||||
|
Assert !has_key(opts, 'right')
|
||||||
|
|
||||||
|
let g:fzf_action = {'a': 'tabe'}
|
||||||
|
let opts = fzf#wrap('foobar')
|
||||||
|
Log opts
|
||||||
|
Assert opts.options =~ '--expect=a'
|
||||||
|
Assert !has_key(opts, 'sink')
|
||||||
|
Assert has_key(opts, 'sink*')
|
||||||
|
|
||||||
|
let opts = fzf#wrap('foobar', {'sink': 'e'})
|
||||||
|
Log opts
|
||||||
|
AssertEqual 'e', opts.sink
|
||||||
|
Assert !has_key(opts, 'sink*')
|
||||||
|
|
||||||
|
let g:fzf_history_dir = '/tmp'
|
||||||
|
let opts = fzf#wrap('foobar', {'options': '--color light'})
|
||||||
|
Log opts
|
||||||
|
Assert opts.options =~ '--history /tmp/foobar'
|
||||||
|
Assert opts.options =~ '--color light'
|
||||||
|
|
||||||
Execute (Cleanup):
|
Execute (Cleanup):
|
||||||
unlet g:dir
|
unlet g:dir
|
||||||
Restore
|
Restore
|
||||||
|
937
test/test_go.rb
937
test/test_go.rb
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user