mirror of
https://github.com/BurntSushi/ripgrep.git
synced 2025-08-19 14:13:49 -07:00
Compare commits
814 Commits
wincolor-0
...
grep-pcre2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
92daa34eb3 | ||
|
|
a8c1fb7c88 | ||
|
|
52ec68799c | ||
|
|
c0d78240df | ||
|
|
cda9acb876 | ||
|
|
1ece50694e | ||
|
|
f3a966bcbc | ||
|
|
a38913b63a | ||
|
|
e772a95b58 | ||
|
|
9dd4bf8d7f | ||
|
|
c4c43c733e | ||
|
|
447506ebe0 | ||
|
|
12e4180985 | ||
|
|
daa8319398 | ||
|
|
3a6a24a52a | ||
|
|
aab3d80374 | ||
|
|
1856cda77b | ||
|
|
7340d8dbbe | ||
|
|
50d2047ae2 | ||
|
|
227436624f | ||
|
|
5bfdd3a652 | ||
|
|
ecec6147d1 | ||
|
|
db7a8cdcb5 | ||
|
|
eef7a7e7ff | ||
|
|
4176050cdd | ||
|
|
109460fce2 | ||
|
|
da3431b478 | ||
|
|
f314b0d55f | ||
|
|
fab5c812f3 | ||
|
|
c824d095a7 | ||
|
|
ee21897ebd | ||
|
|
0373f6ddb0 | ||
|
|
b44554c803 | ||
|
|
0874aa115c | ||
|
|
fdd8510fdd | ||
|
|
0bc4f0447b | ||
|
|
c95f29e3ba | ||
|
|
3644208b03 | ||
|
|
66f045e055 | ||
|
|
3d59bd98aa | ||
|
|
52d7f47420 | ||
|
|
75cbe88fa2 | ||
|
|
711426a632 | ||
|
|
01eeec56bb | ||
|
|
322fc75a3d | ||
|
|
b435eaafc8 | ||
|
|
f8e70294d5 | ||
|
|
578e2d47a8 | ||
|
|
9f7c2ebc09 | ||
|
|
5c1eac41a3 | ||
|
|
6f2b79f584 | ||
|
|
0c3b673e4c | ||
|
|
297b428c8c | ||
|
|
804b43ecd8 | ||
|
|
2263b8ac92 | ||
|
|
cd8ec38a68 | ||
|
|
6a0e0147e0 | ||
|
|
ad97e9c93f | ||
|
|
24f8a3e5ec | ||
|
|
1bdb767851 | ||
|
|
a4897eca23 | ||
|
|
a070722ff2 | ||
|
|
4628d77808 | ||
|
|
f8418c6a52 | ||
|
|
040ca45ba0 | ||
|
|
91470572cd | ||
|
|
027adbf485 | ||
|
|
e71eedf0eb | ||
|
|
88f46d12f1 | ||
|
|
a18cf6ec39 | ||
|
|
c78c3236a8 | ||
|
|
7cf21600cd | ||
|
|
647b0d3977 | ||
|
|
e572fc1683 | ||
|
|
9cb93abd11 | ||
|
|
41695c66fa | ||
|
|
cb0dfda936 | ||
|
|
74d1fe59e9 | ||
|
|
9fd1e202e0 | ||
|
|
e76807b1b5 | ||
|
|
f8fb65f7e3 | ||
|
|
98de8d248a | ||
|
|
c358700dfb | ||
|
|
8670a4a969 | ||
|
|
e3b1f86908 | ||
|
|
46b07bb2ee | ||
|
|
8bdf84e3a8 | ||
|
|
5a6e17fcc1 | ||
|
|
00bfcd14a6 | ||
|
|
bf0ddc4675 | ||
|
|
0fb3f6a159 | ||
|
|
837fb5e21f | ||
|
|
2e1815606e | ||
|
|
cb2f6ddc61 | ||
|
|
bd7a42602f | ||
|
|
528ce56e1b | ||
|
|
8892bf648c | ||
|
|
8cb7271b64 | ||
|
|
4858267f3b | ||
|
|
5011dba2fd | ||
|
|
e14f9195e5 | ||
|
|
ef0e7af56a | ||
|
|
b266818aa5 | ||
|
|
81415ae52d | ||
|
|
5c4584aa7c | ||
|
|
0972c6e7c7 | ||
|
|
0a372bf2e4 | ||
|
|
345124a7fa | ||
|
|
31807f805a | ||
|
|
4de227fd9a | ||
|
|
d7ce274722 | ||
|
|
5b10328f41 | ||
|
|
813c676eca | ||
|
|
f625d72b6f | ||
|
|
3de31f7527 | ||
|
|
e402d6c260 | ||
|
|
48b5bdc441 | ||
|
|
709ca91f50 | ||
|
|
9c220f9a9b | ||
|
|
9085bed139 | ||
|
|
931ab35f76 | ||
|
|
b5e5979ff1 | ||
|
|
052c857da0 | ||
|
|
5e84e784c8 | ||
|
|
01e8e11621 | ||
|
|
9268ff8e8d | ||
|
|
c2cb0a4de4 | ||
|
|
adb9332f52 | ||
|
|
bc37c32717 | ||
|
|
08ae4da2b7 | ||
|
|
7ac95c1f50 | ||
|
|
7a6903bd4e | ||
|
|
9801fae29f | ||
|
|
abdf7140d7 | ||
|
|
b83e7968ef | ||
|
|
8ebc113847 | ||
|
|
785c1f1766 | ||
|
|
8b734cb490 | ||
|
|
b93762ea7a | ||
|
|
34677d2622 | ||
|
|
d1389db2e3 | ||
|
|
50bcb7409e | ||
|
|
7b9972c308 | ||
|
|
9f000c2910 | ||
|
|
392682d352 | ||
|
|
7d3f794588 | ||
|
|
290fd2a7b6 | ||
|
|
d1e4d28f30 | ||
|
|
5ce2d7351d | ||
|
|
9dcfd9a205 | ||
|
|
36b276c6d0 | ||
|
|
03bf37ff4a | ||
|
|
e7829c05d3 | ||
|
|
a6222939f9 | ||
|
|
6ffd434232 | ||
|
|
1f1cd9b467 | ||
|
|
973de50c9e | ||
|
|
5f8805a496 | ||
|
|
fdde2bcd38 | ||
|
|
7b3fe6b325 | ||
|
|
b3dd3ae203 | ||
|
|
f3083e4574 | ||
|
|
d03e30707e | ||
|
|
d7f57d9aab | ||
|
|
1a2a24ea74 | ||
|
|
d66610b295 | ||
|
|
019ae1989b | ||
|
|
36d3f235dc | ||
|
|
79018eb693 | ||
|
|
44cd344438 | ||
|
|
e493e54b9b | ||
|
|
8e8215aa65 | ||
|
|
3fe701498e | ||
|
|
e79085e9e4 | ||
|
|
764c197022 | ||
|
|
ef1611b5f5 | ||
|
|
45d12abbc5 | ||
|
|
5fde8391f9 | ||
|
|
3edb11c513 | ||
|
|
ed144be775 | ||
|
|
967e7ad0de | ||
|
|
9952ba2068 | ||
|
|
b751758d60 | ||
|
|
8f14cb18a5 | ||
|
|
da9d720431 | ||
|
|
a9d71a0368 | ||
|
|
f3646242cc | ||
|
|
601f212a0b | ||
|
|
5a565354f8 | ||
|
|
2a6532ae71 | ||
|
|
ece1f50cfe | ||
|
|
a7d26c8f14 | ||
|
|
bd222ae93f | ||
|
|
4359d8aac0 | ||
|
|
308819fb1f | ||
|
|
09108b7fda | ||
|
|
743d64f2e4 | ||
|
|
5962abc465 | ||
|
|
1604a18db3 | ||
|
|
9eeb0b01ce | ||
|
|
df4400209a | ||
|
|
77439f99a4 | ||
|
|
be7d6dd9ce | ||
|
|
9f15e3b671 | ||
|
|
254b8b67bb | ||
|
|
8a7f43b84d | ||
|
|
d968a27ed5 | ||
|
|
9b8f5cbaba | ||
|
|
c52da74ac3 | ||
|
|
7dcbff9a9b | ||
|
|
bef1f0e770 | ||
|
|
cd9815cb37 | ||
|
|
3f22c3a658 | ||
|
|
0913972104 | ||
|
|
f19b84fb23 | ||
|
|
59fc583aeb | ||
|
|
1c7c4e6640 | ||
|
|
69c5e3938d | ||
|
|
d9cf05ad50 | ||
|
|
af8b6caebb | ||
|
|
c84cfb6756 | ||
|
|
895e26a000 | ||
|
|
8c95290ff6 | ||
|
|
d6feeb7ff2 | ||
|
|
626ed00c19 | ||
|
|
332ad18401 | ||
|
|
fc3cf41247 | ||
|
|
a4868b8835 | ||
|
|
f99b991117 | ||
|
|
de0bc78982 | ||
|
|
147e96914c | ||
|
|
0abc40c23c | ||
|
|
f768796e4f | ||
|
|
da0c0c4705 | ||
|
|
05411b2b32 | ||
|
|
cc93db3b18 | ||
|
|
049354b766 | ||
|
|
386dd2806d | ||
|
|
5fe9a954e6 | ||
|
|
f158a42a71 | ||
|
|
5724391d39 | ||
|
|
0df71240ff | ||
|
|
f3164f2615 | ||
|
|
31d3e24130 | ||
|
|
bf842dbc7f | ||
|
|
6d5dba85bd | ||
|
|
afb89bcdad | ||
|
|
332dc56372 | ||
|
|
12a6ca45f9 | ||
|
|
9d703110cf | ||
|
|
e99b6bda0e | ||
|
|
276e2c9b9a | ||
|
|
9a9f54d44c | ||
|
|
47833b9ce7 | ||
|
|
44a9e37737 | ||
|
|
8fd05cacee | ||
|
|
4691d11034 | ||
|
|
519a6b68af | ||
|
|
9c940b45f4 | ||
|
|
0a167021c3 | ||
|
|
aeaa5fc1b1 | ||
|
|
7048a06c31 | ||
|
|
23be3cf850 | ||
|
|
b48bbf527d | ||
|
|
8eabe47b57 | ||
|
|
ff712bfd9d | ||
|
|
a7f2d48234 | ||
|
|
57500ad013 | ||
|
|
0b04553aff | ||
|
|
1ae121122f | ||
|
|
688003e51c | ||
|
|
718a00f6f2 | ||
|
|
7cbc535d70 | ||
|
|
7a6a40bae1 | ||
|
|
1e9ee2cc85 | ||
|
|
968491f8e9 | ||
|
|
63b0f31a22 | ||
|
|
7ecee299a5 | ||
|
|
dd396ff34e | ||
|
|
fb0a82f3c3 | ||
|
|
dbc8ca9cc1 | ||
|
|
c3db8db93d | ||
|
|
17ef4c40f3 | ||
|
|
a9e0477ea8 | ||
|
|
b3c5773266 | ||
|
|
118b950085 | ||
|
|
b45b2f58ea | ||
|
|
662a9bc73d | ||
|
|
401add0a99 | ||
|
|
f81b72721b | ||
|
|
1d4fccaadc | ||
|
|
09e464e674 | ||
|
|
31adff6f3c | ||
|
|
b41e596327 | ||
|
|
fb62266620 | ||
|
|
acf226c39d | ||
|
|
8299625e48 | ||
|
|
db256c87eb | ||
|
|
ba533f390e | ||
|
|
ba503eb677 | ||
|
|
f72c2dfd90 | ||
|
|
c0aa58b4f7 | ||
|
|
184ee4c328 | ||
|
|
e82fbf2c46 | ||
|
|
eb18da0450 | ||
|
|
0f7494216f | ||
|
|
442a278635 | ||
|
|
7ebed3ace6 | ||
|
|
8a7db1a918 | ||
|
|
ce80d794c0 | ||
|
|
c5d467a2ab | ||
|
|
a62cd553c2 | ||
|
|
ce5188335b | ||
|
|
b7a456ae83 | ||
|
|
d14f0b37d6 | ||
|
|
3ddc3c040f | ||
|
|
eeaa42ecaf | ||
|
|
3797a2a5cb | ||
|
|
0e2f8f7b47 | ||
|
|
3dd4b77dfb | ||
|
|
3b5cdea862 | ||
|
|
54b3e9eb10 | ||
|
|
56e8864426 | ||
|
|
b8f619d16e | ||
|
|
83dff33326 | ||
|
|
003c3695f4 | ||
|
|
10777c150d | ||
|
|
827179250b | ||
|
|
fd22cd520b | ||
|
|
241bc8f8fc | ||
|
|
b6e30124e0 | ||
|
|
4846d63539 | ||
|
|
13c47530a6 | ||
|
|
328f4369e6 | ||
|
|
04518e32e7 | ||
|
|
f2eaf5b977 | ||
|
|
3edeeca6e9 | ||
|
|
c41b353009 | ||
|
|
d18839f3dc | ||
|
|
8f978a3cf7 | ||
|
|
87b745454d | ||
|
|
e5bb750995 | ||
|
|
d599f0b3c7 | ||
|
|
40e310a9f9 | ||
|
|
510f15f4da | ||
|
|
f9ce7a84a8 | ||
|
|
1b6089674e | ||
|
|
05a0389555 | ||
|
|
16353bad6e | ||
|
|
fe442de091 | ||
|
|
1bb8b7170f | ||
|
|
55ed698a98 | ||
|
|
f1e025873f | ||
|
|
033ad2b8e4 | ||
|
|
098a8ee843 | ||
|
|
2f3dbf5fee | ||
|
|
5c80e4adb6 | ||
|
|
fcd1853031 | ||
|
|
74a89be641 | ||
|
|
5b1ce8bdc2 | ||
|
|
1529ce3341 | ||
|
|
95a4f15916 | ||
|
|
0eef05142a | ||
|
|
edd6eb4e06 | ||
|
|
7ac9782970 | ||
|
|
180054d7dc | ||
|
|
7eaaa04c69 | ||
|
|
87a627631c | ||
|
|
9df60e164e | ||
|
|
afa06c518a | ||
|
|
e46aeb34f8 | ||
|
|
d8f187e990 | ||
|
|
7d93d2ab05 | ||
|
|
9ca2d68e94 | ||
|
|
60b0e3ff80 | ||
|
|
3a1c081c13 | ||
|
|
d5c0b03030 | ||
|
|
eb184d7711 | ||
|
|
bb110c1ebe | ||
|
|
d9ca529356 | ||
|
|
0958837ee1 | ||
|
|
94be3bd4bb | ||
|
|
deb1de6e1e | ||
|
|
6afdf15d85 | ||
|
|
6cda7b24e9 | ||
|
|
ad9befbc1d | ||
|
|
e86d3d95c2 | ||
|
|
6799dcfc0e | ||
|
|
0fdab0ec5e | ||
|
|
74ec5b8932 | ||
|
|
2913fc4cd0 | ||
|
|
7c412bb2fa | ||
|
|
651a0f1ddf | ||
|
|
45473ba48f | ||
|
|
0863c75a5a | ||
|
|
d94d99f657 | ||
|
|
84585908ac | ||
|
|
1611c04e6f | ||
|
|
d857ad6ed3 | ||
|
|
4dd2f8e40e | ||
|
|
dca8110da2 | ||
|
|
0d11497d21 | ||
|
|
22ac2e056e | ||
|
|
03af61fc7b | ||
|
|
560dffd247 | ||
|
|
e65ca21a6c | ||
|
|
6771626553 | ||
|
|
b9c922be53 | ||
|
|
7a44cad599 | ||
|
|
6dc09c5b1b | ||
|
|
209a125ea2 | ||
|
|
090216cf00 | ||
|
|
c96a358593 | ||
|
|
231456c409 | ||
|
|
1d09d4d31b | ||
|
|
02f08f3800 | ||
|
|
470afa1bd7 | ||
|
|
aa2ce39d14 | ||
|
|
d11a3b3377 | ||
|
|
d09e2f6af1 | ||
|
|
7b6af5a177 | ||
|
|
9bd1aa1c04 | ||
|
|
1393ce4b6b | ||
|
|
8e358ee056 | ||
|
|
5b5f4e74d9 | ||
|
|
7829850bf0 | ||
|
|
06b66efd59 | ||
|
|
7e5a590276 | ||
|
|
d17ca45063 | ||
|
|
df469fe1b4 | ||
|
|
b64475aeac | ||
|
|
fca9709d94 | ||
|
|
62b4813b8a | ||
|
|
b38b101c77 | ||
|
|
ac90316e35 | ||
|
|
a6467f880a | ||
|
|
004bb35694 | ||
|
|
cd6c190967 | ||
|
|
d5139228e5 | ||
|
|
bb16ba6311 | ||
|
|
5e85f2577b | ||
|
|
ca23a170f7 | ||
|
|
223d7d9846 | ||
|
|
e4bce86111 | ||
|
|
15fa77cdb3 | ||
|
|
c3f97513d6 | ||
|
|
c21b9b20cf | ||
|
|
ce145c6a2a | ||
|
|
a383d5c4e9 | ||
|
|
64317bda9f | ||
|
|
7f3a0f0828 | ||
|
|
49f36c7dcd | ||
|
|
83b4fdb8d6 | ||
|
|
8b57d78b96 | ||
|
|
a2d8c49d6f | ||
|
|
6ffb4b7466 | ||
|
|
198d1fede9 | ||
|
|
667b9a7d62 | ||
|
|
1f528f1641 | ||
|
|
ab64da73ab | ||
|
|
1266de3d4c | ||
|
|
bf51058eb2 | ||
|
|
3dc6fe6f05 | ||
|
|
06438d5360 | ||
|
|
ae6f871491 | ||
|
|
ed059559cd | ||
|
|
b75526bd7f | ||
|
|
507801c1f2 | ||
|
|
2a9d007261 | ||
|
|
0ee0b160b5 | ||
|
|
b4781e2f91 | ||
|
|
8cb03941e6 | ||
|
|
6b15ce2342 | ||
|
|
4c0b0c6c9d | ||
|
|
6c8b1e93d5 | ||
|
|
ebdb7c1d4c | ||
|
|
58bd0c67da | ||
|
|
1503b3175f | ||
|
|
0345e089aa | ||
|
|
0911ab1546 | ||
|
|
c4dd927a13 | ||
|
|
34abed597f | ||
|
|
835600794f | ||
|
|
07713fb5c5 | ||
|
|
d7c9323a68 | ||
|
|
b7d29d126f | ||
|
|
42b8132d0a | ||
|
|
cd08707c7c | ||
|
|
c2e97cd858 | ||
|
|
1f70e9187c | ||
|
|
7120f32258 | ||
|
|
00520b30f5 | ||
|
|
11a8f0eaf0 | ||
|
|
27fc9f2fd3 | ||
|
|
96f73293c0 | ||
|
|
b006943c01 | ||
|
|
91d0756f62 | ||
|
|
54256515b4 | ||
|
|
e2516ed095 | ||
|
|
c0c80e0209 | ||
|
|
dbf6f15625 | ||
|
|
9163aaac27 | ||
|
|
9d7448bfc0 | ||
|
|
b98585b429 | ||
|
|
f5411b992c | ||
|
|
492effc7be | ||
|
|
4889d2d37c | ||
|
|
354996a16f | ||
|
|
cbebb010a7 | ||
|
|
7098daf6a8 | ||
|
|
17d09c0882 | ||
|
|
c8e9f25b85 | ||
|
|
9305f89f39 | ||
|
|
9c216ad9a4 | ||
|
|
6862e07870 | ||
|
|
a6d09b2d42 | ||
|
|
ab1b877c20 | ||
|
|
2b5c488814 | ||
|
|
cb47be938e | ||
|
|
fe9be658f4 | ||
|
|
8c800adab7 | ||
|
|
d65966efbc | ||
|
|
597bf04a56 | ||
|
|
c78ab9e669 | ||
|
|
d57fc58081 | ||
|
|
d09538c974 | ||
|
|
94768881e1 | ||
|
|
f3a9ced82c | ||
|
|
18f549d289 | ||
|
|
c749b604dc | ||
|
|
d6748a3445 | ||
|
|
9b7f420faa | ||
|
|
361698b90a | ||
|
|
b71a110ccf | ||
|
|
5c1af3c25d | ||
|
|
ad3f55b0e5 | ||
|
|
b8e6d50bbe | ||
|
|
81afe8c5a0 | ||
|
|
c4e0d4bd7b | ||
|
|
23d1b91ead | ||
|
|
ac83ed4992 | ||
|
|
555fbd1201 | ||
|
|
f3146f8316 | ||
|
|
56341973ee | ||
|
|
a431160d4c | ||
|
|
5d15f49f0c | ||
|
|
7718ee362e | ||
|
|
739f8f596b | ||
|
|
e818d7529b | ||
|
|
a2a7f58aa6 | ||
|
|
c4a5bc06c5 | ||
|
|
96ee4482cd | ||
|
|
3effea0b7c | ||
|
|
2d68054b1d | ||
|
|
65a63788bc | ||
|
|
7e5589f07d | ||
|
|
09c5b2c4ea | ||
|
|
904c75bd30 | ||
|
|
ca3e0e8a49 | ||
|
|
ae2d036dd4 | ||
|
|
8e93fa0e7f | ||
|
|
7f5c07434b | ||
|
|
874f0b96a6 | ||
|
|
706323ad8f | ||
|
|
8460d7fe3d | ||
|
|
e1f1ede17d | ||
|
|
6553940328 | ||
|
|
b50ae9a99c | ||
|
|
224c112e05 | ||
|
|
8cb5833ef9 | ||
|
|
85cd3f0a6e | ||
|
|
c57d0fb4e8 | ||
|
|
d83bab4d3f | ||
|
|
ce84e1ef04 | ||
|
|
c8e755f11f | ||
|
|
68dac7c4b0 | ||
|
|
3535047094 | ||
|
|
fe00255494 | ||
|
|
07c837e740 | ||
|
|
cb0e693e31 | ||
|
|
e9d448e93b | ||
|
|
c7fc916e6b | ||
|
|
e36b65a11a | ||
|
|
11ad7ab204 | ||
|
|
93943793c3 | ||
|
|
0fedaa7d28 | ||
|
|
e05023b406 | ||
|
|
f007f940c5 | ||
|
|
a8543f798d | ||
|
|
ef9e17d28a | ||
|
|
3cb4d1337e | ||
|
|
8514d4fbb4 | ||
|
|
ed9150c9b4 | ||
|
|
51864c13fc | ||
|
|
35f802166d | ||
|
|
bba2d56292 | ||
|
|
012880914b | ||
|
|
832f5baf1a | ||
|
|
a6d3a959eb | ||
|
|
f00625c3f4 | ||
|
|
82d03b99cd | ||
|
|
ab2e8190e7 | ||
|
|
58bdc366ec | ||
|
|
34c0b1bc70 | ||
|
|
74e96b498c | ||
|
|
7e0fa1c6be | ||
|
|
50616935a9 | ||
|
|
01b7859399 | ||
|
|
5aed0522e8 | ||
|
|
d1fa295bb2 | ||
|
|
85d463c0cc | ||
|
|
75a4b7b361 | ||
|
|
c687d3a7c0 | ||
|
|
fbc1e7fa18 | ||
|
|
14779ed0ea | ||
|
|
b6177f0459 | ||
|
|
ba1023e1e4 | ||
|
|
5e73075ef5 | ||
|
|
1b42c02489 | ||
|
|
0d03145293 | ||
|
|
f8162d2707 | ||
|
|
e044cfb33f | ||
|
|
7dd1194a97 | ||
|
|
a5855a5d73 | ||
|
|
03b0d832ed | ||
|
|
636bbc7c8f | ||
|
|
162e085b98 | ||
|
|
86c890bcec | ||
|
|
d775259ed9 | ||
|
|
d73a75d6cd | ||
|
|
7ae1f373c2 | ||
|
|
4d34132365 | ||
|
|
5173bfb11b | ||
|
|
8141da9d39 | ||
|
|
373e0595e6 | ||
|
|
1374f15bdf | ||
|
|
263e8f92b9 | ||
|
|
231698f802 | ||
|
|
3e8b44619d | ||
|
|
679198e71a | ||
|
|
2c84825ccb | ||
|
|
948821753c | ||
|
|
d2a3b61220 | ||
|
|
acb57eb4ad | ||
|
|
256aeb5546 | ||
|
|
a9377da624 | ||
|
|
c794ef2f04 | ||
|
|
8b9eba2147 | ||
|
|
c4732ca012 | ||
|
|
1aec4b1123 | ||
|
|
c4e1945384 | ||
|
|
04d17040e7 | ||
|
|
8c8c83a1f8 | ||
|
|
5714dbde09 | ||
|
|
311ccb1f6b | ||
|
|
efa4de8126 | ||
|
|
ad5fa56490 | ||
|
|
1bf9d29259 | ||
|
|
2a14bf2249 | ||
|
|
f0028a66ec | ||
|
|
08060a2105 | ||
|
|
cd575d99f8 | ||
|
|
1267f01c24 | ||
|
|
322d5515e5 | ||
|
|
f4770c2094 | ||
|
|
f887bc1f86 | ||
|
|
363a4fa9b7 | ||
|
|
712311fdc6 | ||
|
|
0d2354aca6 | ||
|
|
8dc513b5d2 | ||
|
|
a98156e71c | ||
|
|
cf94072429 | ||
|
|
db14046de4 | ||
|
|
36091591f0 | ||
|
|
12ffcb4296 | ||
|
|
e7c06b92fb | ||
|
|
353806b87a | ||
|
|
aebb132a86 | ||
|
|
ab4b6ab9c3 | ||
|
|
413178bc2c | ||
|
|
58fb4f987e | ||
|
|
4f1d6af296 | ||
|
|
6b79349f83 | ||
|
|
f858828f61 | ||
|
|
67b835fe2a | ||
|
|
214f2bef66 | ||
|
|
1136f8adab | ||
|
|
beb010d004 | ||
|
|
f9cbf7d3d4 | ||
|
|
7eb1dd129e | ||
|
|
a5f82e8826 | ||
|
|
ca6bd648ab | ||
|
|
af77dd55a2 | ||
|
|
3065a8c9c8 | ||
|
|
208c11af56 | ||
|
|
12a78a992c | ||
|
|
d97c80be63 | ||
|
|
5213bd30ea | ||
|
|
82d101907a | ||
|
|
30608f2444 | ||
|
|
3d323928a0 | ||
|
|
8b6a3bc858 | ||
|
|
e10544f819 | ||
|
|
dc7e39a6ba | ||
|
|
36c16eb00c | ||
|
|
fffee61f80 | ||
|
|
4cfb2b515b | ||
|
|
398326bfe2 | ||
|
|
01358a155c | ||
|
|
30ca3ecca6 | ||
|
|
dbc91644fd | ||
|
|
73c9ac4da5 | ||
|
|
fe7fe74b0a | ||
|
|
3d9acdab18 | ||
|
|
40bacbcd7c | ||
|
|
b3a9c34515 | ||
|
|
972ec1adc6 | ||
|
|
a2d4c03c71 | ||
|
|
b7c3cf314d | ||
|
|
6dce04963d | ||
|
|
d4b790fd8d | ||
|
|
9283dd122e | ||
|
|
4c41e9225b | ||
|
|
9f2b054550 | ||
|
|
5613df3034 | ||
|
|
79ad81626f | ||
|
|
354a5cad97 | ||
|
|
92e5fad27d | ||
|
|
f86f987d71 | ||
|
|
bfbd53eb92 | ||
|
|
0668c74ed4 | ||
|
|
1c03298903 | ||
|
|
e0e8f26c56 | ||
|
|
f5337329f4 | ||
|
|
84f4b4ef68 | ||
|
|
aeac85389d | ||
|
|
9b3921098a | ||
|
|
ad262f1146 | ||
|
|
170c078440 | ||
|
|
db044a058a | ||
|
|
c1f8040b32 | ||
|
|
c8a5a7a3f4 | ||
|
|
dd3df0ded7 | ||
|
|
62a182af78 | ||
|
|
4047d9db71 | ||
|
|
4683a325fa | ||
|
|
b6f1e5db1a | ||
|
|
9e51b18ac7 | ||
|
|
9d7b6eb09a | ||
|
|
7763c98188 | ||
|
|
06393f888c | ||
|
|
e0989ef13b | ||
|
|
45e850aff7 | ||
|
|
f2d1c582a8 | ||
|
|
ab70815ea2 | ||
|
|
27f97db510 | ||
|
|
506ad1f3cf | ||
|
|
13235b596f | ||
|
|
2628c8f38e | ||
|
|
112b3c5e0a | ||
|
|
4c78ca8b70 | ||
|
|
ff898cd105 | ||
|
|
2c98e5ce1e | ||
|
|
1e3fc79949 | ||
|
|
d1bbc6956b | ||
|
|
cd6c54f5f4 | ||
|
|
44c03f58bc | ||
|
|
d1a6ab922e | ||
|
|
b860fa3acd | ||
|
|
229b8e3b33 | ||
|
|
a515c4d601 | ||
|
|
5a666b042d | ||
|
|
16109166fe | ||
|
|
0b685c8429 | ||
|
|
d2c7a76a3c | ||
|
|
20f7d9b3a2 | ||
|
|
362abed44a | ||
|
|
c50b8b4125 | ||
|
|
7ad23e5565 | ||
|
|
66efbad871 | ||
|
|
1f2a9b0306 | ||
|
|
a45fe94240 | ||
|
|
ac1c95a6d9 | ||
|
|
685b431d80 | ||
|
|
487713aa34 | ||
|
|
e300541701 | ||
|
|
e9df420d2f | ||
|
|
201b4fc757 | ||
|
|
90a11dec5e | ||
|
|
9456d95e8f | ||
|
|
0c298f60a6 | ||
|
|
79271fcb33 | ||
|
|
fc975af8e9 | ||
|
|
1425d6735e | ||
|
|
aed3ccb9c7 | ||
|
|
33c95d2919 | ||
|
|
01deac9427 | ||
|
|
b4bc3b6349 | ||
|
|
685cc6c562 | ||
|
|
08c017330f | ||
|
|
2f3a8c7f69 | ||
|
|
3ac1b68e54 | ||
|
|
0ebd5465b7 | ||
|
|
5cb4bb9ea0 | ||
|
|
c8a179b4da | ||
|
|
46f94826fd | ||
|
|
75f1855a91 | ||
|
|
fd9870d668 | ||
|
|
a3a2708067 | ||
|
|
78847b65c8 | ||
|
|
e962eea1cc | ||
|
|
95bc678403 | ||
|
|
68af3bbdc4 | ||
|
|
70b6bdb104 |
195
.github/workflows/ci.yml
vendored
Normal file
195
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
name: ci
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
schedule:
|
||||
- cron: '00 01 * * *'
|
||||
jobs:
|
||||
test:
|
||||
name: test
|
||||
env:
|
||||
# For some builds, we use cross to test on 32-bit and big-endian
|
||||
# systems.
|
||||
CARGO: cargo
|
||||
# When CARGO is set to CROSS, this is set to `--target matrix.target`.
|
||||
TARGET_FLAGS:
|
||||
# When CARGO is set to CROSS, TARGET_DIR includes matrix.target.
|
||||
TARGET_DIR: ./target
|
||||
# Emit backtraces on panics.
|
||||
RUST_BACKTRACE: 1
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
build:
|
||||
# We test ripgrep on a pinned version of Rust, along with the moving
|
||||
# targets of 'stable' and 'beta' for good measure.
|
||||
- pinned
|
||||
- stable
|
||||
- beta
|
||||
# Our release builds are generated by a nightly compiler to take
|
||||
# advantage of the latest optimizations/compile time improvements. So
|
||||
# we test all of them here. (We don't do mips releases, but test on
|
||||
# mips for big-endian coverage.)
|
||||
- nightly
|
||||
- nightly-musl
|
||||
- nightly-32
|
||||
- nightly-mips
|
||||
- nightly-arm
|
||||
- macos
|
||||
- win-msvc
|
||||
- win-gnu
|
||||
include:
|
||||
- build: pinned
|
||||
os: ubuntu-18.04
|
||||
rust: 1.41.0
|
||||
- build: stable
|
||||
os: ubuntu-18.04
|
||||
rust: stable
|
||||
- build: beta
|
||||
os: ubuntu-18.04
|
||||
rust: beta
|
||||
- build: nightly
|
||||
os: ubuntu-18.04
|
||||
rust: nightly
|
||||
- build: nightly-musl
|
||||
os: ubuntu-18.04
|
||||
rust: nightly
|
||||
target: x86_64-unknown-linux-musl
|
||||
- build: nightly-32
|
||||
os: ubuntu-18.04
|
||||
rust: nightly
|
||||
target: i686-unknown-linux-gnu
|
||||
- build: nightly-mips
|
||||
os: ubuntu-18.04
|
||||
rust: nightly
|
||||
target: mips64-unknown-linux-gnuabi64
|
||||
- build: nightly-arm
|
||||
os: ubuntu-18.04
|
||||
rust: nightly
|
||||
# For stripping release binaries:
|
||||
# docker run --rm -v $PWD/target:/target:Z \
|
||||
# rustembedded/cross:arm-unknown-linux-gnueabihf \
|
||||
# arm-linux-gnueabihf-strip \
|
||||
# /target/arm-unknown-linux-gnueabihf/debug/rg
|
||||
target: arm-unknown-linux-gnueabihf
|
||||
- build: macos
|
||||
os: macos-latest
|
||||
rust: nightly
|
||||
- build: win-msvc
|
||||
os: windows-2019
|
||||
rust: nightly
|
||||
- build: win-gnu
|
||||
os: windows-2019
|
||||
rust: nightly-x86_64-gnu
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Install packages (Ubuntu)
|
||||
if: matrix.os == 'ubuntu-18.04'
|
||||
run: |
|
||||
ci/ubuntu-install-packages
|
||||
|
||||
- name: Install packages (macOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
ci/macos-install-packages
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.rust }}
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Use Cross
|
||||
if: matrix.target != ''
|
||||
run: |
|
||||
# FIXME: to work around bugs in latest cross release, install master.
|
||||
# See: https://github.com/rust-embedded/cross/issues/357
|
||||
cargo install --git https://github.com/rust-embedded/cross
|
||||
echo "::set-env name=CARGO::cross"
|
||||
echo "::set-env name=TARGET_FLAGS::--target ${{ matrix.target }}"
|
||||
echo "::set-env name=TARGET_DIR::./target/${{ matrix.target }}"
|
||||
|
||||
- name: Show command used for Cargo
|
||||
run: |
|
||||
echo "cargo command is: ${{ env.CARGO }}"
|
||||
echo "target flag is: ${{ env.TARGET_FLAGS }}"
|
||||
|
||||
- name: Build ripgrep and all crates
|
||||
run: ${{ env.CARGO }} build --verbose --all ${{ env.TARGET_FLAGS }}
|
||||
|
||||
- name: Build ripgrep with PCRE2
|
||||
run: ${{ env.CARGO }} build --verbose --all --features pcre2 ${{ env.TARGET_FLAGS }}
|
||||
|
||||
# This is useful for debugging problems when the expected build artifacts
|
||||
# (like shell completions and man pages) aren't generated.
|
||||
- name: Show build.rs stderr
|
||||
shell: bash
|
||||
run: |
|
||||
set +x
|
||||
stderr="$(find "${{ env.TARGET_DIR }}/debug" -name stderr -print0 | xargs -0 ls -t | head -n1)"
|
||||
if [ -s "$stderr" ]; then
|
||||
echo "===== $stderr ===== "
|
||||
cat "$stderr"
|
||||
echo "====="
|
||||
fi
|
||||
set -x
|
||||
|
||||
- name: Run tests with PCRE2 (sans cross)
|
||||
if: matrix.target == ''
|
||||
run: ${{ env.CARGO }} test --verbose --all --features pcre2 ${{ env.TARGET_FLAGS }}
|
||||
|
||||
- name: Run tests without PCRE2 (with cross)
|
||||
# These tests should actually work, but they almost double the runtime.
|
||||
# Every integration test spins up qemu to run 'rg', and when PCRE2 is
|
||||
# enabled, every integration test is run twice: one with the default
|
||||
# regex engine and once with PCRE2.
|
||||
if: matrix.target != ''
|
||||
run: ${{ env.CARGO }} test --verbose --all ${{ env.TARGET_FLAGS }}
|
||||
|
||||
- name: Test for existence of build artifacts (Windows)
|
||||
if: matrix.os == 'windows-2019'
|
||||
shell: bash
|
||||
run: |
|
||||
outdir="$(ci/cargo-out-dir "${{ env.TARGET_DIR }}")"
|
||||
ls "$outdir/_rg.ps1" && file "$outdir/_rg.ps1"
|
||||
|
||||
- name: Test for existence of build artifacts (Unix)
|
||||
if: matrix.os != 'windows-2019'
|
||||
shell: bash
|
||||
run: |
|
||||
outdir="$(ci/cargo-out-dir "${{ env.TARGET_DIR }}")"
|
||||
for f in rg.bash rg.fish rg.1; do
|
||||
# We could use file -E here, but it isn't supported on macOS.
|
||||
ls "$outdir/$f" && file "$outdir/$f"
|
||||
done
|
||||
|
||||
- name: Test zsh shell completions (Unix, sans cross)
|
||||
# We could test this when using Cross, but we'd have to execute the
|
||||
# 'rg' binary (done in test-complete) with qemu, which is a pain and
|
||||
# doesn't really gain us much. If shell completion works in one place,
|
||||
# it probably works everywhere.
|
||||
if: matrix.target == '' && matrix.os != 'windows-2019'
|
||||
shell: bash
|
||||
run: ci/test-complete
|
||||
|
||||
rustfmt:
|
||||
name: rustfmt
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v1
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
profile: minimal
|
||||
components: rustfmt
|
||||
- name: Check formatting
|
||||
run: |
|
||||
cargo fmt --all -- --check
|
||||
211
.github/workflows/release.yml
vendored
Normal file
211
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
# The way this works is a little weird. But basically, the create-release job
|
||||
# runs purely to initialize the GitHub release itself. Once done, the upload
|
||||
# URL of the release is saved as an artifact.
|
||||
#
|
||||
# The build-release job runs only once create-release is finished. It gets
|
||||
# the release upload URL by downloading the corresponding artifact (which was
|
||||
# uploaded by create-release). It then builds the release executables for each
|
||||
# supported platform and attaches them as release assets to the previously
|
||||
# created release.
|
||||
#
|
||||
# The key here is that we create the release only once.
|
||||
|
||||
name: release
|
||||
on:
|
||||
push:
|
||||
# Enable when testing release infrastructure on a branch.
|
||||
# branches:
|
||||
# - ag/release
|
||||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
jobs:
|
||||
create-release:
|
||||
name: create-release
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# Set to force version number, e.g., when no tag exists.
|
||||
# RG_VERSION: TEST-0.0.0
|
||||
steps:
|
||||
- name: Create artifacts directory
|
||||
run: mkdir artifacts
|
||||
|
||||
- name: Get the release version from the tag
|
||||
if: env.RG_VERSION == ''
|
||||
run: |
|
||||
# Apparently, this is the right way to get a tag name. Really?
|
||||
#
|
||||
# See: https://github.community/t5/GitHub-Actions/How-to-get-just-the-tag-name/m-p/32167/highlight/true#M1027
|
||||
echo "::set-env name=RG_VERSION::${GITHUB_REF#refs/tags/}"
|
||||
echo "version is: ${{ env.RG_VERSION }}"
|
||||
|
||||
- name: Create GitHub release
|
||||
id: release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ env.RG_VERSION }}
|
||||
release_name: ripgrep ${{ env.RG_VERSION }}
|
||||
|
||||
- name: Save release upload URL to artifact
|
||||
run: echo "${{ steps.release.outputs.upload_url }}" > artifacts/release-upload-url
|
||||
|
||||
- name: Save version number to artifact
|
||||
run: echo "${{ env.RG_VERSION }}" > artifacts/release-version
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: artifacts
|
||||
path: artifacts
|
||||
|
||||
build-release:
|
||||
name: build-release
|
||||
needs: ['create-release']
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
# For some builds, we use cross to test on 32-bit and big-endian
|
||||
# systems.
|
||||
CARGO: cargo
|
||||
# When CARGO is set to CROSS, this is set to `--target matrix.target`.
|
||||
TARGET_FLAGS:
|
||||
# When CARGO is set to CROSS, TARGET_DIR includes matrix.target.
|
||||
TARGET_DIR: ./target
|
||||
# Emit backtraces on panics.
|
||||
RUST_BACKTRACE: 1
|
||||
# Build static releases with PCRE2.
|
||||
PCRE2_SYS_STATIC: 1
|
||||
strategy:
|
||||
matrix:
|
||||
build: [linux, linux-arm, macos, win-msvc, win-gnu, win32-msvc]
|
||||
include:
|
||||
- build: linux
|
||||
os: ubuntu-18.04
|
||||
rust: nightly
|
||||
target: x86_64-unknown-linux-musl
|
||||
- build: linux-arm
|
||||
os: ubuntu-18.04
|
||||
rust: nightly
|
||||
target: arm-unknown-linux-gnueabihf
|
||||
- build: macos
|
||||
os: macos-latest
|
||||
rust: nightly
|
||||
target: x86_64-apple-darwin
|
||||
- build: win-msvc
|
||||
os: windows-2019
|
||||
rust: nightly
|
||||
target: x86_64-pc-windows-msvc
|
||||
- build: win-gnu
|
||||
os: windows-2019
|
||||
rust: nightly-x86_64-gnu
|
||||
target: x86_64-pc-windows-gnu
|
||||
- build: win32-msvc
|
||||
os: windows-2019
|
||||
rust: nightly
|
||||
target: i686-pc-windows-msvc
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v1
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Install packages (Ubuntu)
|
||||
if: matrix.os == 'ubuntu-18.04'
|
||||
run: |
|
||||
ci/ubuntu-install-packages
|
||||
|
||||
- name: Install packages (macOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
ci/macos-install-packages
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.rust }}
|
||||
profile: minimal
|
||||
override: true
|
||||
target: ${{ matrix.target }}
|
||||
|
||||
- name: Use Cross
|
||||
# if: matrix.os != 'windows-2019'
|
||||
run: |
|
||||
# FIXME: to work around bugs in latest cross release, install master.
|
||||
# See: https://github.com/rust-embedded/cross/issues/357
|
||||
cargo install --git https://github.com/rust-embedded/cross
|
||||
echo "::set-env name=CARGO::cross"
|
||||
echo "::set-env name=TARGET_FLAGS::--target ${{ matrix.target }}"
|
||||
echo "::set-env name=TARGET_DIR::./target/${{ matrix.target }}"
|
||||
|
||||
- name: Show command used for Cargo
|
||||
run: |
|
||||
echo "cargo command is: ${{ env.CARGO }}"
|
||||
echo "target flag is: ${{ env.TARGET_FLAGS }}"
|
||||
echo "target dir is: ${{ env.TARGET_DIR }}"
|
||||
|
||||
- name: Get release download URL
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: artifacts
|
||||
path: artifacts
|
||||
|
||||
- name: Set release upload URL and release version
|
||||
shell: bash
|
||||
run: |
|
||||
release_upload_url="$(cat artifacts/release-upload-url)"
|
||||
echo "::set-env name=RELEASE_UPLOAD_URL::$release_upload_url"
|
||||
echo "release upload url: $RELEASE_UPLOAD_URL"
|
||||
release_version="$(cat artifacts/release-version)"
|
||||
echo "::set-env name=RELEASE_VERSION::$release_version"
|
||||
echo "release version: $RELEASE_VERSION"
|
||||
|
||||
- name: Build release binary
|
||||
run: ${{ env.CARGO }} build --verbose --release --features pcre2 ${{ env.TARGET_FLAGS }}
|
||||
|
||||
- name: Strip release binary (linux and macos)
|
||||
if: matrix.build == 'linux' || matrix.build == 'macos'
|
||||
run: strip "target/${{ matrix.target }}/release/rg"
|
||||
|
||||
- name: Strip release binary (arm)
|
||||
if: matrix.build == 'linux-arm'
|
||||
run: |
|
||||
docker run --rm -v \
|
||||
"$PWD/target:/target:Z" \
|
||||
rustembedded/cross:arm-unknown-linux-gnueabihf \
|
||||
arm-linux-gnueabihf-strip \
|
||||
/target/arm-unknown-linux-gnueabihf/release/rg
|
||||
|
||||
- name: Build archive
|
||||
shell: bash
|
||||
run: |
|
||||
outdir="$(ci/cargo-out-dir "${{ env.TARGET_DIR }}")"
|
||||
staging="ripgrep-${{ env.RELEASE_VERSION }}-${{ matrix.target }}"
|
||||
mkdir -p "$staging"/{complete,doc}
|
||||
|
||||
cp {README.md,COPYING,UNLICENSE,LICENSE-MIT} "$staging/"
|
||||
cp {CHANGELOG.md,FAQ.md,GUIDE.md} "$staging/doc/"
|
||||
cp "$outdir"/{rg.bash,rg.fish,_rg.ps1} "$staging/complete/"
|
||||
cp complete/_rg "$staging/complete/"
|
||||
|
||||
if [ "${{ matrix.os }}" = "windows-2019" ]; then
|
||||
cp "target/${{ matrix.target }}/release/rg.exe" "$staging/"
|
||||
7z a "$staging.zip" "$staging"
|
||||
echo "::set-env name=ASSET::$staging.zip"
|
||||
else
|
||||
# The man page is only generated on Unix systems. ¯\_(ツ)_/¯
|
||||
cp "$outdir"/rg.1 "$staging/doc/"
|
||||
cp "target/${{ matrix.target }}/release/rg" "$staging/"
|
||||
tar czf "$staging.tar.gz" "$staging"
|
||||
echo "::set-env name=ASSET::$staging.tar.gz"
|
||||
fi
|
||||
|
||||
- name: Upload release archive
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ env.RELEASE_UPLOAD_URL }}
|
||||
asset_path: ${{ env.ASSET }}
|
||||
asset_name: ${{ env.ASSET }}
|
||||
asset_content_type: application/octet-stream
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -6,3 +6,12 @@ target
|
||||
/ignore/Cargo.lock
|
||||
/termcolor/Cargo.lock
|
||||
/wincolor/Cargo.lock
|
||||
/deployment
|
||||
|
||||
# Snapcraft files
|
||||
stage
|
||||
prime
|
||||
parts
|
||||
*.snap
|
||||
*.pyc
|
||||
ripgrep*_source.tar.bz2
|
||||
|
||||
73
.travis.yml
73
.travis.yml
@@ -1,73 +0,0 @@
|
||||
language: rust
|
||||
|
||||
env:
|
||||
global:
|
||||
- PROJECT_NAME=ripgrep
|
||||
matrix:
|
||||
include:
|
||||
# Nightly channel.
|
||||
# (All *nix releases are done on the nightly channel to take advantage
|
||||
# of the regex library's multiple pattern SIMD search.)
|
||||
- os: linux
|
||||
rust: nightly-2017-03-13
|
||||
env: TARGET=i686-unknown-linux-musl
|
||||
- os: linux
|
||||
rust: nightly-2017-03-13
|
||||
env: TARGET=x86_64-unknown-linux-musl
|
||||
- os: osx
|
||||
rust: nightly-2017-03-13
|
||||
env: TARGET=x86_64-apple-darwin
|
||||
# Beta channel.
|
||||
- os: linux
|
||||
rust: beta
|
||||
env: TARGET=x86_64-unknown-linux-musl
|
||||
- os: linux
|
||||
rust: beta
|
||||
env: TARGET=x86_64-unknown-linux-gnu
|
||||
# Minimum Rust supported channel.
|
||||
- os: linux
|
||||
rust: 1.12.0
|
||||
env: TARGET=x86_64-unknown-linux-gnu
|
||||
- os: linux
|
||||
rust: 1.12.0
|
||||
env: TARGET=x86_64-unknown-linux-musl
|
||||
|
||||
before_install:
|
||||
- export PATH="$PATH:$HOME/.cargo/bin"
|
||||
|
||||
install:
|
||||
- bash ci/install.sh
|
||||
|
||||
script:
|
||||
- bash ci/script.sh
|
||||
|
||||
before_deploy:
|
||||
- bash ci/before_deploy.sh
|
||||
|
||||
deploy:
|
||||
provider: releases
|
||||
api_key:
|
||||
secure: "IbSnsbGkxSydR/sozOf1/SRvHplzwRUHzcTjM7BKnr7GccL86gRPUrsrvD103KjQUGWIc1TnK1YTq5M0Onswg/ORDjqa1JEJPkPdPnVh9ipbF7M2De/7IlB4X4qXLKoApn8+bx2x/mfYXu4G+G1/2QdbaKK2yfXZKyjz0YFx+6CNrVCT2Nk8q7aHvOOzAL58vsG8iPDpupuhxlMDDn/UhyOWVInmPPQ0iJR1ZUJN8xJwXvKvBbfp3AhaBiAzkhXHNLgBR8QC5noWWMXnuVDMY3k4f3ic0V+p/qGUCN/nhptuceLxKFicMCYObSZeUzE5RAI0/OBW7l3z2iCoc+TbAnn+JrX/ObJCfzgAOXAU3tLaBFMiqQPGFKjKg1ltSYXomOFP/F7zALjpvFp4lYTBajRR+O3dqaxA9UQuRjw27vOeUpMcga4ZzL4VXFHzrxZKBHN//XIGjYAVhJ1NSSeGpeJV5/+jYzzWKfwSagRxQyVCzMooYFFXzn8Yxdm3PJlmp3GaAogNkdB9qKcrEvRINCelalzALPi0hD/HUDi8DD2PNTCLLMo6VSYtvc685Zbe+KgNzDV1YyTrRCUW6JotrS0r2ULLwnsh40hSB//nNv3XmwNmC/CmW5QAnIGj8cBMF4S2t6ohADIndojdAfNiptmaZOIT6owK7bWMgPMyopo="
|
||||
file_glob: true
|
||||
file: ${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}.*
|
||||
# don't delete the artifacts from previous phases
|
||||
skip_cleanup: true
|
||||
# deploy when a new tag is pushed
|
||||
on:
|
||||
# channel to use to produce the release artifacts
|
||||
# NOTE make sure you only release *once* per target
|
||||
# TODO you may want to pick a different channel
|
||||
condition: $TRAVIS_RUST_VERSION = nightly-2017-03-13
|
||||
tags: true
|
||||
|
||||
branches:
|
||||
only:
|
||||
# Pushes and PR to the master branch
|
||||
- master
|
||||
# IMPORTANT Ruby regex to match tags. Required, or travis won't trigger deploys when a new tag
|
||||
# is pushed. This regex matches semantic versions like v1.2.3-rc4+2016.02.22
|
||||
- /^\d+\.\d+\.\d+.*$/
|
||||
|
||||
notifications:
|
||||
email:
|
||||
on_success: never
|
||||
845
CHANGELOG.md
845
CHANGELOG.md
@@ -1,3 +1,848 @@
|
||||
12.0.0 (2020-03-15)
|
||||
===================
|
||||
ripgrep 12 is a new major version release of ripgrep that contains many bug
|
||||
fixes, several important performance improvements and a few minor new features.
|
||||
|
||||
In a near future release, I am hoping to add an
|
||||
[indexing feature](https://github.com/BurntSushi/ripgrep/issues/1497)
|
||||
to ripgrep, which will dramatically speed up searching by building an index.
|
||||
Feedback would very much be appreciated, especially on the user experience
|
||||
which will be difficult to get right.
|
||||
|
||||
This release has no known breaking changes.
|
||||
|
||||
Deprecations:
|
||||
|
||||
* The `--no-pcre2-unicode` flag is deprecated. Instead, use the `--no-unicode`
|
||||
flag, which applies to both the default regex engine and PCRE2. For now,
|
||||
`--no-pcre2-unicode` and `--pcre2-unicode` are aliases to `--no-unicode`
|
||||
and `--unicode`, respectively. The `--[no-]pcre2-unicode` flags may be
|
||||
removed in a future release.
|
||||
* The `--auto-hybrid-regex` flag is deprecated. Instead, use the new `--engine`
|
||||
flag with the `auto` value.
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* [PERF #1087](https://github.com/BurntSushi/ripgrep/pull/1087):
|
||||
ripgrep is smarter when detected literals are whitespace.
|
||||
* [PERF #1381](https://github.com/BurntSushi/ripgrep/pull/1381):
|
||||
Directory traversal is sped up with speculative ignore-file existence checks.
|
||||
* [PERF cd8ec38a](https://github.com/BurntSushi/ripgrep/commit/cd8ec38a):
|
||||
Improve inner literal detection to cover more cases more effectively.
|
||||
e.g., ` +Sherlock Holmes +` now has ` Sherlock Holmes ` extracted instead
|
||||
of ` `.
|
||||
* [PERF 6a0e0147](https://github.com/BurntSushi/ripgrep/commit/6a0e0147):
|
||||
Improve literal detection when the `-w/--word-regexp` flag is used.
|
||||
* [PERF ad97e9c9](https://github.com/BurntSushi/ripgrep/commit/ad97e9c9):
|
||||
Improve overall performance of the `-w/--word-regexp` flag.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for erb, diff, Gradle, HAML, Org,
|
||||
Postscript, Skim, Slim, Slime, RPM Spec files, Typoscript, xml.
|
||||
* [FEATURE #1370](https://github.com/BurntSushi/ripgrep/pull/1370):
|
||||
Add `--include-zero` flag that shows files searched without matches.
|
||||
* [FEATURE #1390](https://github.com/BurntSushi/ripgrep/pull/1390):
|
||||
Add `--no-context-separator` flag that always hides context separators.
|
||||
* [FEATURE #1414](https://github.com/BurntSushi/ripgrep/pull/1414):
|
||||
Add `--no-require-git` flag to allow ripgrep to respect gitignores anywhere.
|
||||
* [FEATURE #1420](https://github.com/BurntSushi/ripgrep/pull/1420):
|
||||
Add `--no-ignore-exclude` to disregard rules in `.git/info/exclude` files.
|
||||
* [FEATURE #1466](https://github.com/BurntSushi/ripgrep/pull/1466):
|
||||
Add `--no-ignore-files` flag to disable all `--ignore-file` flags.
|
||||
* [FEATURE #1488](https://github.com/BurntSushi/ripgrep/pull/1488):
|
||||
Add '--engine' flag for easier switching between regex engines.
|
||||
* [FEATURE 75cbe88f](https://github.com/BurntSushi/ripgrep/commit/75cbe88f):
|
||||
Add `--no-unicode` flag. This works on all supported regex engines.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1291](https://github.com/BurntSushi/ripgrep/issues/1291):
|
||||
ripgrep now works in non-existent directories.
|
||||
* [BUG #1319](https://github.com/BurntSushi/ripgrep/issues/1319):
|
||||
Fix match bug due to errant literal detection.
|
||||
* [**BUG #1335**](https://github.com/BurntSushi/ripgrep/issues/1335):
|
||||
Fixes a performance bug when searching plain text files with very long lines.
|
||||
This was a serious performance regression in some cases.
|
||||
* [BUG #1344](https://github.com/BurntSushi/ripgrep/issues/1344):
|
||||
Document usage of `--type all`.
|
||||
* [BUG #1389](https://github.com/BurntSushi/ripgrep/issues/1389):
|
||||
Fixes a bug where ripgrep would panic when searching a symlinked directory.
|
||||
* [BUG #1439](https://github.com/BurntSushi/ripgrep/issues/1439):
|
||||
Improve documentation for ripgrep's automatic stdin detection.
|
||||
* [BUG #1441](https://github.com/BurntSushi/ripgrep/issues/1441):
|
||||
Remove CPU features from man page.
|
||||
* [BUG #1442](https://github.com/BurntSushi/ripgrep/issues/1442),
|
||||
[BUG #1478](https://github.com/BurntSushi/ripgrep/issues/1478):
|
||||
Improve documentation of the `-g/--glob` flag.
|
||||
* [BUG #1445](https://github.com/BurntSushi/ripgrep/issues/1445):
|
||||
ripgrep now respects ignore rules from .git/info/exclude in worktrees.
|
||||
* [BUG #1485](https://github.com/BurntSushi/ripgrep/issues/1485):
|
||||
Fish shell completions from the release Debian package are now installed to
|
||||
`/usr/share/fish/vendor_completions.d/rg.fish`.
|
||||
|
||||
|
||||
11.0.2 (2019-08-01)
|
||||
===================
|
||||
ripgrep 11.0.2 is a new patch release that fixes a few bugs, including a
|
||||
performance regression and a matching bug when using the `-F/--fixed-strings`
|
||||
flag.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* [FEATURE #1293](https://github.com/BurntSushi/ripgrep/issues/1293):
|
||||
Added `--glob-case-insensitive` flag that makes `--glob` behave as `--iglob`.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1246](https://github.com/BurntSushi/ripgrep/issues/1246):
|
||||
Add translations to README, starting with an unofficial Chinese translation.
|
||||
* [BUG #1259](https://github.com/BurntSushi/ripgrep/issues/1259):
|
||||
Fix bug where the last byte of a `-f file` was stripped if it wasn't a `\n`.
|
||||
* [BUG #1261](https://github.com/BurntSushi/ripgrep/issues/1261):
|
||||
Document that no error is reported when searching for `\n` with `-P/--pcre2`.
|
||||
* [BUG #1284](https://github.com/BurntSushi/ripgrep/issues/1284):
|
||||
Mention `.ignore` and `.rgignore` more prominently in the README.
|
||||
* [BUG #1292](https://github.com/BurntSushi/ripgrep/issues/1292):
|
||||
Fix bug where `--with-filename` was sometimes enabled incorrectly.
|
||||
* [BUG #1268](https://github.com/BurntSushi/ripgrep/issues/1268):
|
||||
Fix major performance regression in GitHub `x86_64-linux` binary release.
|
||||
* [BUG #1302](https://github.com/BurntSushi/ripgrep/issues/1302):
|
||||
Show better error messages when a non-existent preprocessor command is given.
|
||||
* [BUG #1334](https://github.com/BurntSushi/ripgrep/issues/1334):
|
||||
Fix match regression with `-F` flag when patterns contain meta characters.
|
||||
|
||||
|
||||
11.0.1 (2019-04-16)
|
||||
===================
|
||||
ripgrep 11.0.1 is a new patch release that fixes a search regression introduced
|
||||
in the previous 11.0.0 release. In particular, ripgrep can enter an infinite
|
||||
loop for some search patterns when searching invalid UTF-8.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1247](https://github.com/BurntSushi/ripgrep/issues/1247):
|
||||
Fix search bug that can cause ripgrep to enter an infinite loop.
|
||||
|
||||
|
||||
11.0.0 (2019-04-15)
|
||||
===================
|
||||
ripgrep 11 is a new major version release of ripgrep that contains many bug
|
||||
fixes, some performance improvements and a few feature enhancements. Notably,
|
||||
ripgrep's user experience for binary file filtering has been improved. See the
|
||||
[guide's new section on binary data](GUIDE.md#binary-data) for more details.
|
||||
|
||||
This release also marks a change in ripgrep's versioning. Where as the previous
|
||||
version was `0.10.0`, this version is `11.0.0`. Moving forward, ripgrep's
|
||||
major version will be increased a few times per year. ripgrep will continue to
|
||||
be conservative with respect to backwards compatibility, but may occasionally
|
||||
introduce breaking changes, which will always be documented in this CHANGELOG.
|
||||
See [issue 1172](https://github.com/BurntSushi/ripgrep/issues/1172) for a bit
|
||||
more detail on why this versioning change was made.
|
||||
|
||||
This release increases the **minimum supported Rust version** from 1.28.0 to
|
||||
1.34.0.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
* ripgrep has tweaked its exit status codes to be more like GNU grep's. Namely,
|
||||
if a non-fatal error occurs during a search, then ripgrep will now always
|
||||
emit a `2` exit status code, regardless of whether a match is found or not.
|
||||
Previously, ripgrep would only emit a `2` exit status code for a catastrophic
|
||||
error (e.g., regex syntax error). One exception to this is if ripgrep is run
|
||||
with `-q/--quiet`. In that case, if an error occurs and a match is found,
|
||||
then ripgrep will exit with a `0` exit status code.
|
||||
* Supplying the `-u/--unrestricted` flag three times is now equivalent to
|
||||
supplying `--no-ignore --hidden --binary`. Previously, `-uuu` was equivalent
|
||||
to `--no-ignore --hidden --text`. The difference is that `--binary` disables
|
||||
binary file filtering without potentially dumping binary data into your
|
||||
terminal. That is, `rg -uuu foo` should now be equivalent to `grep -r foo`.
|
||||
* The `avx-accel` feature of ripgrep has been removed since it is no longer
|
||||
necessary. All uses of AVX in ripgrep are now enabled automatically via
|
||||
runtime CPU feature detection. The `simd-accel` feature does remain available
|
||||
(only for enabling SIMD for transcoding), however, it does increase
|
||||
compilation times substantially at the moment.
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* [PERF #497](https://github.com/BurntSushi/ripgrep/issues/497),
|
||||
[PERF #838](https://github.com/BurntSushi/ripgrep/issues/838):
|
||||
Make `rg -F -f dictionary-of-literals` much faster.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for Apache Thrift, ASP, Bazel, Brotli,
|
||||
BuildStream, bzip2, C, C++, Cython, gzip, Java, Make, Postscript, QML, Tex,
|
||||
XML, xz, zig and zstd.
|
||||
* [FEATURE #855](https://github.com/BurntSushi/ripgrep/issues/855):
|
||||
Add `--binary` flag for disabling binary file filtering.
|
||||
* [FEATURE #1078](https://github.com/BurntSushi/ripgrep/pull/1078):
|
||||
Add `--max-columns-preview` flag for showing a preview of long lines.
|
||||
* [FEATURE #1099](https://github.com/BurntSushi/ripgrep/pull/1099):
|
||||
Add support for Brotli and Zstd to the `-z/--search-zip` flag.
|
||||
* [FEATURE #1138](https://github.com/BurntSushi/ripgrep/pull/1138):
|
||||
Add `--no-ignore-dot` flag for ignoring `.ignore` files.
|
||||
* [FEATURE #1155](https://github.com/BurntSushi/ripgrep/pull/1155):
|
||||
Add `--auto-hybrid-regex` flag for automatically falling back to PCRE2.
|
||||
* [FEATURE #1159](https://github.com/BurntSushi/ripgrep/pull/1159):
|
||||
ripgrep's exit status logic should now match GNU grep. See updated man page.
|
||||
* [FEATURE #1164](https://github.com/BurntSushi/ripgrep/pull/1164):
|
||||
Add `--ignore-file-case-insensitive` for case insensitive ignore globs.
|
||||
* [FEATURE #1185](https://github.com/BurntSushi/ripgrep/pull/1185):
|
||||
Add `-I` flag as a short option for the `--no-filename` flag.
|
||||
* [FEATURE #1207](https://github.com/BurntSushi/ripgrep/pull/1207):
|
||||
Add `none` value to `-E/--encoding` to forcefully disable all transcoding.
|
||||
* [FEATURE da9d7204](https://github.com/BurntSushi/ripgrep/commit/da9d7204):
|
||||
Add `--pcre2-version` for querying showing PCRE2 version information.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #306](https://github.com/BurntSushi/ripgrep/issues/306),
|
||||
[BUG #855](https://github.com/BurntSushi/ripgrep/issues/855):
|
||||
Improve the user experience for ripgrep's binary file filtering.
|
||||
* [BUG #373](https://github.com/BurntSushi/ripgrep/issues/373),
|
||||
[BUG #1098](https://github.com/BurntSushi/ripgrep/issues/1098):
|
||||
`**` is now accepted as valid syntax anywhere in a glob.
|
||||
* [BUG #916](https://github.com/BurntSushi/ripgrep/issues/916):
|
||||
ripgrep no longer hangs when searching `/proc` with a zombie process present.
|
||||
* [BUG #1052](https://github.com/BurntSushi/ripgrep/issues/1052):
|
||||
Fix bug where ripgrep could panic when transcoding UTF-16 files.
|
||||
* [BUG #1055](https://github.com/BurntSushi/ripgrep/issues/1055):
|
||||
Suggest `-U/--multiline` when a pattern contains a `\n`.
|
||||
* [BUG #1063](https://github.com/BurntSushi/ripgrep/issues/1063):
|
||||
Always strip a BOM if it's present, even for UTF-8.
|
||||
* [BUG #1064](https://github.com/BurntSushi/ripgrep/issues/1064):
|
||||
Fix inner literal detection that could lead to incorrect matches.
|
||||
* [BUG #1079](https://github.com/BurntSushi/ripgrep/issues/1079):
|
||||
Fixes a bug where the order of globs could result in missing a match.
|
||||
* [BUG #1089](https://github.com/BurntSushi/ripgrep/issues/1089):
|
||||
Fix another bug where ripgrep could panic when transcoding UTF-16 files.
|
||||
* [BUG #1091](https://github.com/BurntSushi/ripgrep/issues/1091):
|
||||
Add note about inverted flags to the man page.
|
||||
* [BUG #1093](https://github.com/BurntSushi/ripgrep/pull/1093):
|
||||
Fix handling of literal slashes in gitignore patterns.
|
||||
* [BUG #1095](https://github.com/BurntSushi/ripgrep/issues/1095):
|
||||
Fix corner cases involving the `--crlf` flag.
|
||||
* [BUG #1101](https://github.com/BurntSushi/ripgrep/issues/1101):
|
||||
Fix AsciiDoc escaping for man page output.
|
||||
* [BUG #1103](https://github.com/BurntSushi/ripgrep/issues/1103):
|
||||
Clarify what `--encoding auto` does.
|
||||
* [BUG #1106](https://github.com/BurntSushi/ripgrep/issues/1106):
|
||||
`--files-with-matches` and `--files-without-match` work with one file.
|
||||
* [BUG #1121](https://github.com/BurntSushi/ripgrep/issues/1121):
|
||||
Fix bug that was triggering Windows antimalware when using the `--files`
|
||||
flag.
|
||||
* [BUG #1125](https://github.com/BurntSushi/ripgrep/issues/1125),
|
||||
[BUG #1159](https://github.com/BurntSushi/ripgrep/issues/1159):
|
||||
ripgrep shouldn't panic for `rg -h | rg` and should emit correct exit status.
|
||||
* [BUG #1144](https://github.com/BurntSushi/ripgrep/issues/1144):
|
||||
Fixes a bug where line numbers could be wrong on big-endian machines.
|
||||
* [BUG #1154](https://github.com/BurntSushi/ripgrep/issues/1154):
|
||||
Windows files with "hidden" attribute are now treated as hidden.
|
||||
* [BUG #1173](https://github.com/BurntSushi/ripgrep/issues/1173):
|
||||
Fix handling of `**` patterns in gitignore files.
|
||||
* [BUG #1174](https://github.com/BurntSushi/ripgrep/issues/1174):
|
||||
Fix handling of repeated `**` patterns in gitignore files.
|
||||
* [BUG #1176](https://github.com/BurntSushi/ripgrep/issues/1176):
|
||||
Fix bug where `-F`/`-x` weren't applied to patterns given via `-f`.
|
||||
* [BUG #1189](https://github.com/BurntSushi/ripgrep/issues/1189):
|
||||
Document cases where ripgrep may use a lot of memory.
|
||||
* [BUG #1203](https://github.com/BurntSushi/ripgrep/issues/1203):
|
||||
Fix a matching bug related to the suffix literal optimization.
|
||||
* [BUG 8f14cb18](https://github.com/BurntSushi/ripgrep/commit/8f14cb18):
|
||||
Increase the default stack size for PCRE2's JIT.
|
||||
|
||||
|
||||
0.10.0 (2018-09-07)
|
||||
===================
|
||||
This is a new minor version release of ripgrep that contains some major new
|
||||
features, a huge number of bug fixes, and is the first release based on
|
||||
libripgrep. The entirety of ripgrep's core search and printing code has been
|
||||
rewritten and generalized so that anyone can make use of it.
|
||||
|
||||
Major new features include PCRE2 support, multi-line search and a JSON output
|
||||
format.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
* The minimum version required to compile Rust has now changed to track the
|
||||
latest stable version of Rust. Patch releases will continue to compile with
|
||||
the same version of Rust as the previous patch release, but new minor
|
||||
versions will use the current stable version of the Rust compile as its
|
||||
minimum supported version.
|
||||
* The match semantics of `-w/--word-regexp` have changed slightly. They used
|
||||
to be `\b(?:<your pattern>)\b`, but now it's
|
||||
`(?:^|\W)(?:<your pattern>)(?:$|\W)`. This matches the behavior of GNU grep
|
||||
and is believed to be closer to the intended semantics of the flag. See
|
||||
[#389](https://github.com/BurntSushi/ripgrep/issues/389) for more details.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* [FEATURE #162](https://github.com/BurntSushi/ripgrep/issues/162):
|
||||
libripgrep is now a thing. The primary crate is
|
||||
[`grep`](https://docs.rs/grep).
|
||||
* [FEATURE #176](https://github.com/BurntSushi/ripgrep/issues/176):
|
||||
Add `-U/--multiline` flag that permits matching over multiple lines.
|
||||
* [FEATURE #188](https://github.com/BurntSushi/ripgrep/issues/188):
|
||||
Add `-P/--pcre2` flag that gives support for look-around and backreferences.
|
||||
* [FEATURE #244](https://github.com/BurntSushi/ripgrep/issues/244):
|
||||
Add `--json` flag that prints results in a JSON Lines format.
|
||||
* [FEATURE #321](https://github.com/BurntSushi/ripgrep/issues/321):
|
||||
Add `--one-file-system` flag to skip directories on different file systems.
|
||||
* [FEATURE #404](https://github.com/BurntSushi/ripgrep/issues/404):
|
||||
Add `--sort` and `--sortr` flag for more sorting. Deprecate `--sort-files`.
|
||||
* [FEATURE #416](https://github.com/BurntSushi/ripgrep/issues/416):
|
||||
Add `--crlf` flag to permit `$` to work with carriage returns on Windows.
|
||||
* [FEATURE #917](https://github.com/BurntSushi/ripgrep/issues/917):
|
||||
The `--trim` flag strips prefix whitespace from all lines printed.
|
||||
* [FEATURE #993](https://github.com/BurntSushi/ripgrep/issues/993):
|
||||
Add `--null-data` flag, which makes ripgrep use NUL as a line terminator.
|
||||
* [FEATURE #997](https://github.com/BurntSushi/ripgrep/issues/997):
|
||||
The `--passthru` flag now works with the `--replace` flag.
|
||||
* [FEATURE #1038-1](https://github.com/BurntSushi/ripgrep/issues/1038):
|
||||
Add `--line-buffered` and `--block-buffered` for forcing a buffer strategy.
|
||||
* [FEATURE #1038-2](https://github.com/BurntSushi/ripgrep/issues/1038):
|
||||
Add `--pre-glob` for filtering files through the `--pre` flag.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #2](https://github.com/BurntSushi/ripgrep/issues/2):
|
||||
Searching with non-zero context can now use memory maps if appropriate.
|
||||
* [BUG #200](https://github.com/BurntSushi/ripgrep/issues/200):
|
||||
ripgrep will now stop correctly when its output pipe is closed.
|
||||
* [BUG #389](https://github.com/BurntSushi/ripgrep/issues/389):
|
||||
The `-w/--word-regexp` flag now works more intuitively.
|
||||
* [BUG #643](https://github.com/BurntSushi/ripgrep/issues/643):
|
||||
Detection of readable stdin has improved on Windows.
|
||||
* [BUG #441](https://github.com/BurntSushi/ripgrep/issues/441),
|
||||
[BUG #690](https://github.com/BurntSushi/ripgrep/issues/690),
|
||||
[BUG #980](https://github.com/BurntSushi/ripgrep/issues/980):
|
||||
Matching empty lines now works correctly in several corner cases.
|
||||
* [BUG #764](https://github.com/BurntSushi/ripgrep/issues/764):
|
||||
Color escape sequences now coalesce, which reduces output size.
|
||||
* [BUG #842](https://github.com/BurntSushi/ripgrep/issues/842):
|
||||
Add man page to binary Debian package.
|
||||
* [BUG #922](https://github.com/BurntSushi/ripgrep/issues/922):
|
||||
ripgrep is now more robust with respect to memory maps failing.
|
||||
* [BUG #937](https://github.com/BurntSushi/ripgrep/issues/937):
|
||||
Color escape sequences are no longer emitted for empty matches.
|
||||
* [BUG #940](https://github.com/BurntSushi/ripgrep/issues/940):
|
||||
Context from the `--passthru` flag should not impact process exit status.
|
||||
* [BUG #984](https://github.com/BurntSushi/ripgrep/issues/984):
|
||||
Fixes bug in `ignore` crate where first path was always treated as a symlink.
|
||||
* [BUG #990](https://github.com/BurntSushi/ripgrep/issues/990):
|
||||
Read stderr asynchronously when running a process.
|
||||
* [BUG #1013](https://github.com/BurntSushi/ripgrep/issues/1013):
|
||||
Add compile time and runtime CPU features to `--version` output.
|
||||
* [BUG #1028](https://github.com/BurntSushi/ripgrep/pull/1028):
|
||||
Don't complete bare pattern after `-f` in zsh.
|
||||
|
||||
|
||||
0.9.0 (2018-08-03)
|
||||
==================
|
||||
This is a new minor version release of ripgrep that contains some minor new
|
||||
features and a panoply of bug fixes.
|
||||
|
||||
Releases provided on Github for `x86_64` will now work on all target CPUs, and
|
||||
will also automatically take advantage of features found on modern CPUs (such
|
||||
as AVX2) for additional optimizations.
|
||||
|
||||
This release increases the **minimum supported Rust version** from 1.20.0 to
|
||||
1.23.0.
|
||||
|
||||
It is anticipated that the next release of ripgrep (0.10.0) will provide
|
||||
multi-line search support and a JSON output format.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
* When `--count` and `--only-matching` are provided simultaneously, the
|
||||
behavior of ripgrep is as if the `--count-matches` flag was given. That is,
|
||||
the total number of matches is reported, where there may be multiple matches
|
||||
per line. Previously, the behavior of ripgrep was to report the total number
|
||||
of matching lines. (Note that this behavior diverges from the behavior of
|
||||
GNU grep.)
|
||||
* Octal syntax is no longer supported. ripgrep previously accepted expressions
|
||||
like `\1` as syntax for matching `U+0001`, but ripgrep will now report an
|
||||
error instead.
|
||||
* The `--line-number-width` flag has been removed. Its functionality was not
|
||||
carefully considered with all ripgrep output formats.
|
||||
See [#795](https://github.com/BurntSushi/ripgrep/issues/795) for more
|
||||
details.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for Android, Bazel, Fuchsia, Haskell,
|
||||
Java and Puppet.
|
||||
* [FEATURE #411](https://github.com/BurntSushi/ripgrep/issues/411):
|
||||
Add a `--stats` flag, which emits aggregate statistics after search results.
|
||||
* [FEATURE #646](https://github.com/BurntSushi/ripgrep/issues/646):
|
||||
Add a `--no-ignore-messages` flag, which suppresses parse errors from reading
|
||||
`.ignore` and `.gitignore` files.
|
||||
* [FEATURE #702](https://github.com/BurntSushi/ripgrep/issues/702):
|
||||
Support `\u{..}` Unicode escape sequences.
|
||||
* [FEATURE #812](https://github.com/BurntSushi/ripgrep/issues/812):
|
||||
Add `-b/--byte-offset` flag that shows the byte offset of each matching line.
|
||||
* [FEATURE #814](https://github.com/BurntSushi/ripgrep/issues/814):
|
||||
Add `--count-matches` flag, which is like `--count`, but for each match.
|
||||
* [FEATURE #880](https://github.com/BurntSushi/ripgrep/issues/880):
|
||||
Add a `--no-column` flag, which disables column numbers in the output.
|
||||
* [FEATURE #898](https://github.com/BurntSushi/ripgrep/issues/898):
|
||||
Add support for `lz4` when using the `-z/--search-zip` flag.
|
||||
* [FEATURE #924](https://github.com/BurntSushi/ripgrep/issues/924):
|
||||
`termcolor` has moved to its own repository:
|
||||
https://github.com/BurntSushi/termcolor
|
||||
* [FEATURE #934](https://github.com/BurntSushi/ripgrep/issues/934):
|
||||
Add a new flag, `--no-ignore-global`, that permits disabling global
|
||||
gitignores.
|
||||
* [FEATURE #967](https://github.com/BurntSushi/ripgrep/issues/967):
|
||||
Rename `--maxdepth` to `--max-depth` for consistency. Keep `--maxdepth` for
|
||||
backwards compatibility.
|
||||
* [FEATURE #978](https://github.com/BurntSushi/ripgrep/issues/978):
|
||||
Add a `--pre` option to filter inputs with an arbitrary program.
|
||||
* [FEATURE fca9709d](https://github.com/BurntSushi/ripgrep/commit/fca9709d):
|
||||
Improve zsh completion.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #135](https://github.com/BurntSushi/ripgrep/issues/135):
|
||||
Release portable binaries that conditionally use SSSE3, AVX2, etc., at
|
||||
runtime.
|
||||
* [BUG #268](https://github.com/BurntSushi/ripgrep/issues/268):
|
||||
Print descriptive error message when trying to use look-around or
|
||||
backreferences.
|
||||
* [BUG #395](https://github.com/BurntSushi/ripgrep/issues/395):
|
||||
Show comprehensible error messages for regexes like `\s*{`.
|
||||
* [BUG #526](https://github.com/BurntSushi/ripgrep/issues/526):
|
||||
Support backslash escapes in globs.
|
||||
* [BUG #795](https://github.com/BurntSushi/ripgrep/issues/795):
|
||||
Fix problems with `--line-number-width` by removing it.
|
||||
* [BUG #832](https://github.com/BurntSushi/ripgrep/issues/832):
|
||||
Clarify usage instructions for `-f/--file` flag.
|
||||
* [BUG #835](https://github.com/BurntSushi/ripgrep/issues/835):
|
||||
Fix small performance regression while crawling very large directory trees.
|
||||
* [BUG #851](https://github.com/BurntSushi/ripgrep/issues/851):
|
||||
Fix `-S/--smart-case` detection once and for all.
|
||||
* [BUG #852](https://github.com/BurntSushi/ripgrep/issues/852):
|
||||
Be robust with respect to `ENOMEM` errors returned by `mmap`.
|
||||
* [BUG #853](https://github.com/BurntSushi/ripgrep/issues/853):
|
||||
Upgrade `grep` crate to `regex-syntax 0.6.0`.
|
||||
* [BUG #893](https://github.com/BurntSushi/ripgrep/issues/893):
|
||||
Improve support for git submodules.
|
||||
* [BUG #900](https://github.com/BurntSushi/ripgrep/issues/900):
|
||||
When no patterns are given, ripgrep should never match anything.
|
||||
* [BUG #907](https://github.com/BurntSushi/ripgrep/issues/907):
|
||||
ripgrep will now stop traversing after the first file when `--quiet --files`
|
||||
is used.
|
||||
* [BUG #918](https://github.com/BurntSushi/ripgrep/issues/918):
|
||||
Don't skip tar archives when `-z/--search-zip` is used.
|
||||
* [BUG #934](https://github.com/BurntSushi/ripgrep/issues/934):
|
||||
Don't respect gitignore files when searching outside git repositories.
|
||||
* [BUG #948](https://github.com/BurntSushi/ripgrep/issues/948):
|
||||
Use exit code 2 to indicate error, and use exit code 1 to indicate no
|
||||
matches.
|
||||
* [BUG #951](https://github.com/BurntSushi/ripgrep/issues/951):
|
||||
Add stdin example to ripgrep usage documentation.
|
||||
* [BUG #955](https://github.com/BurntSushi/ripgrep/issues/955):
|
||||
Use buffered writing when not printing to a tty, which fixes a performance
|
||||
regression.
|
||||
* [BUG #957](https://github.com/BurntSushi/ripgrep/issues/957):
|
||||
Improve the error message shown for `--path separator /` in some Windows
|
||||
shells.
|
||||
* [BUG #964](https://github.com/BurntSushi/ripgrep/issues/964):
|
||||
Add a `--no-fixed-strings` flag to disable `-F/--fixed-strings`.
|
||||
* [BUG #988](https://github.com/BurntSushi/ripgrep/issues/988):
|
||||
Fix a bug in the `ignore` crate that prevented the use of explicit ignore
|
||||
files after disabling all other ignore rules.
|
||||
* [BUG #995](https://github.com/BurntSushi/ripgrep/issues/995):
|
||||
Respect `$XDG_CONFIG_DIR/git/config` for detecting `core.excludesFile`.
|
||||
|
||||
|
||||
0.8.1 (2018-02-20)
|
||||
==================
|
||||
This is a patch release of ripgrep that primarily fixes regressions introduced
|
||||
in 0.8.0 (#820 and #824) in directory traversal on Windows. These regressions
|
||||
do not impact non-Windows users.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for csv and VHDL.
|
||||
* [FEATURE #798](https://github.com/BurntSushi/ripgrep/issues/798):
|
||||
Add `underline` support to `termcolor` and ripgrep. See documentation on the
|
||||
`--colors` flag for details.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #684](https://github.com/BurntSushi/ripgrep/issues/684):
|
||||
Improve documentation for the `--ignore-file` flag.
|
||||
* [BUG #789](https://github.com/BurntSushi/ripgrep/issues/789):
|
||||
Don't show `(rev )` if the revision wasn't available during the build.
|
||||
* [BUG #791](https://github.com/BurntSushi/ripgrep/issues/791):
|
||||
Add man page to ARM release.
|
||||
* [BUG #797](https://github.com/BurntSushi/ripgrep/issues/797):
|
||||
Improve documentation for "intense" setting in `termcolor`.
|
||||
* [BUG #800](https://github.com/BurntSushi/ripgrep/issues/800):
|
||||
Fix a bug in the `ignore` crate for custom ignore files. This had no impact
|
||||
on ripgrep.
|
||||
* [BUG #807](https://github.com/BurntSushi/ripgrep/issues/807):
|
||||
Fix a bug where `rg --hidden .` behaved differently from `rg --hidden ./`.
|
||||
* [BUG #815](https://github.com/BurntSushi/ripgrep/issues/815):
|
||||
Clarify a common failure mode in user guide.
|
||||
* [BUG #820](https://github.com/BurntSushi/ripgrep/issues/820):
|
||||
Fixes a bug on Windows where symlinks were followed even if not requested.
|
||||
* [BUG #824](https://github.com/BurntSushi/ripgrep/issues/824):
|
||||
Fix a performance regression in directory traversal on Windows.
|
||||
|
||||
|
||||
0.8.0 (2018-02-11)
|
||||
==================
|
||||
This is a new minor version releae of ripgrep that satisfies several popular
|
||||
feature requests (config files, search compressed files, true colors), fixes
|
||||
many bugs and improves the quality of life for ripgrep maintainers. This
|
||||
release also includes greatly improved documentation in the form of a
|
||||
[User Guide](GUIDE.md) and a [FAQ](FAQ.md).
|
||||
|
||||
This release increases the **minimum supported Rust version** from 1.17 to
|
||||
1.20.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
Note that these are all very minor and unlikely to impact most users.
|
||||
|
||||
* In order to support configuration files, flag overrides needed to be
|
||||
rethought. In some cases, this changed ripgrep's behavior. For example,
|
||||
in ripgrep 0.7.1, `rg foo -s -i` will perform a case sensitive search
|
||||
since the `-s/--case-sensitive` flag was defined to always take precedence
|
||||
over the `-i/--ignore-case` flag, regardless of position. In ripgrep 0.8.0
|
||||
however, the override rule for all flags has changed to "the most recent
|
||||
flag wins among competing flags." That is, `rg foo -s -i` now performs a
|
||||
case insensitive search.
|
||||
* The `-M/--max-columns` flag was tweaked so that specifying a value of `0`
|
||||
now makes ripgrep behave as if the flag was absent. This makes it possible
|
||||
to set a default value in a configuration file and then override it. The
|
||||
previous ripgrep behavior was to suppress all matching non-empty lines.
|
||||
* In all globs, `[^...]` is now equivalent to `[!...]` (indicating class
|
||||
negation). Previously, `^` had no special significance in a character class.
|
||||
* For **downstream packagers**, the directory hierarchy in ripgrep's archive
|
||||
releases has changed. The root directory now only contains the executable,
|
||||
README and license. There is now a new directory called `doc` which contains
|
||||
the man page (previously in the root), a user guide (new), a FAQ (new) and
|
||||
the CHANGELOG (previously not included in release). The `complete`
|
||||
directory remains the same.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for
|
||||
Apache Avro, C++, GN, Google Closure Templates, Jupyter notebooks, man pages,
|
||||
Protocol Buffers, Smarty and Web IDL.
|
||||
* [FEATURE #196](https://github.com/BurntSushi/ripgrep/issues/196):
|
||||
Support a configuration file. See
|
||||
[the new user guide](GUIDE.md#configuration-file)
|
||||
for details.
|
||||
* [FEATURE #261](https://github.com/BurntSushi/ripgrep/issues/261):
|
||||
Add extended or "true" color support. Works in Windows 10!
|
||||
[See the FAQ for details.](FAQ.md#colors)
|
||||
* [FEATURE #539](https://github.com/BurntSushi/ripgrep/issues/539):
|
||||
Search gzip, bzip2, lzma or xz files when given `-z/--search-zip` flag.
|
||||
* [FEATURE #544](https://github.com/BurntSushi/ripgrep/issues/544):
|
||||
Add support for line number alignment via a new `--line-number-width` flag.
|
||||
* [FEATURE #654](https://github.com/BurntSushi/ripgrep/pull/654):
|
||||
Support linuxbrew in ripgrep's Brew tap.
|
||||
* [FEATURE #673](https://github.com/BurntSushi/ripgrep/issues/673):
|
||||
Bring back `.rgignore` files. (A higher precedent, application specific
|
||||
version of `.ignore`.)
|
||||
* [FEATURE #676](https://github.com/BurntSushi/ripgrep/issues/676):
|
||||
Provide ARM binaries. **WARNING:** This will be provided on a best effort
|
||||
basis.
|
||||
* [FEATURE #709](https://github.com/BurntSushi/ripgrep/issues/709):
|
||||
Suggest `-F/--fixed-strings` flag on a regex syntax error.
|
||||
* [FEATURE #740](https://github.com/BurntSushi/ripgrep/issues/740):
|
||||
Add a `--passthru` flag that causes ripgrep to print every line it reads.
|
||||
* [FEATURE #785](https://github.com/BurntSushi/ripgrep/pull/785):
|
||||
Overhaul documentation. Cleaned up README, added user guide and FAQ.
|
||||
* [FEATURE 7f5c07](https://github.com/BurntSushi/ripgrep/commit/7f5c07434be92103b5bf7e216b9c7494aed2d8cb):
|
||||
Add hidden flags for convenient overrides (e.g., `--no-text`).
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #553](https://github.com/BurntSushi/ripgrep/issues/553):
|
||||
Permit flags to be repeated.
|
||||
* [BUG #633](https://github.com/BurntSushi/ripgrep/issues/633):
|
||||
Fix a bug where ripgrep would panic on Windows while following symlinks.
|
||||
* [BUG #649](https://github.com/BurntSushi/ripgrep/issues/649):
|
||||
Fix handling of `!**/` in `.gitignore`.
|
||||
* [BUG #663](https://github.com/BurntSushi/ripgrep/issues/663):
|
||||
**BREAKING CHANGE:** Support `[^...]` glob syntax (as identical to `[!...]`).
|
||||
* [BUG #693](https://github.com/BurntSushi/ripgrep/issues/693):
|
||||
Don't display context separators when not printing matches.
|
||||
* [BUG #705](https://github.com/BurntSushi/ripgrep/issues/705):
|
||||
Fix a bug that prevented ripgrep from searching OneDrive directories.
|
||||
* [BUG #717](https://github.com/BurntSushi/ripgrep/issues/717):
|
||||
Improve `--smart-case` uppercase character detection.
|
||||
* [BUG #725](https://github.com/BurntSushi/ripgrep/issues/725):
|
||||
Clarify that globs do not override explicitly given paths to search.
|
||||
* [BUG #742](https://github.com/BurntSushi/ripgrep/pull/742):
|
||||
Write ANSI reset code as `\x1B[0m` instead of `\x1B[m`.
|
||||
* [BUG #747](https://github.com/BurntSushi/ripgrep/issues/747):
|
||||
Remove `yarn.lock` from YAML file type.
|
||||
* [BUG #760](https://github.com/BurntSushi/ripgrep/issues/760):
|
||||
ripgrep can now search `/sys/devices/system/cpu/vulnerabilities/*` files.
|
||||
* [BUG #761](https://github.com/BurntSushi/ripgrep/issues/761):
|
||||
Fix handling of gitignore patterns that contain a `/`.
|
||||
* [BUG #776](https://github.com/BurntSushi/ripgrep/pull/776):
|
||||
**BREAKING CHANGE:** `--max-columns=0` now disables the limit.
|
||||
* [BUG #779](https://github.com/BurntSushi/ripgrep/issues/779):
|
||||
Clarify documentation for `--files-without-match`.
|
||||
* [BUG #780](https://github.com/BurntSushi/ripgrep/issues/780),
|
||||
[BUG #781](https://github.com/BurntSushi/ripgrep/issues/781):
|
||||
Fix bug where ripgrep missed some matching lines.
|
||||
|
||||
Maintenance fixes:
|
||||
|
||||
* [MAINT #772](https://github.com/BurntSushi/ripgrep/pull/772):
|
||||
Drop `env_logger` in favor of simpler logger to avoid many new dependencies.
|
||||
* [MAINT #772](https://github.com/BurntSushi/ripgrep/pull/772):
|
||||
Add git revision hash to ripgrep's version string.
|
||||
* [MAINT #772](https://github.com/BurntSushi/ripgrep/pull/772):
|
||||
(Seemingly) improve compile times.
|
||||
* [MAINT #776](https://github.com/BurntSushi/ripgrep/pull/776):
|
||||
Automatically generate man page during build.
|
||||
* [MAINT #786](https://github.com/BurntSushi/ripgrep/pull/786):
|
||||
Remove use of `unsafe` in `globset`. :tada:
|
||||
* [MAINT e9d448](https://github.com/BurntSushi/ripgrep/commit/e9d448e93bb4e1fb3b0c1afc29adb5af6ed5283d):
|
||||
Add an issue template (has already drastically improved bug reports).
|
||||
* [MAINT ae2d03](https://github.com/BurntSushi/ripgrep/commit/ae2d036dd4ba2a46acac9c2d77c32e7c667eb850):
|
||||
Remove the `compile` script.
|
||||
|
||||
Friends of ripgrep:
|
||||
|
||||
I'd like to extend my gratitude to
|
||||
[@balajisivaraman](https://github.com/balajisivaraman)
|
||||
for their recent hard work in a number of areas, and in particular, for
|
||||
implementing the "search compressed files" feature. Their work in sketching out
|
||||
a specification for that and other work has been exemplary.
|
||||
|
||||
Thanks
|
||||
[@balajisivaraman](https://github.com/balajisivaraman)!
|
||||
|
||||
|
||||
0.7.1 (2017-10-22)
|
||||
==================
|
||||
This is a patch release of ripgrep that includes a fix to very bad regression
|
||||
introduced in ripgrep 0.7.0.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #648](https://github.com/BurntSushi/ripgrep/issues/648):
|
||||
Fix a bug where it was very easy to exceed standard file descriptor limits.
|
||||
|
||||
|
||||
0.7.0 (2017-10-20)
|
||||
==================
|
||||
This is a new minor version release of ripgrep that includes mostly bug fixes.
|
||||
|
||||
ripgrep continues to require Rust 1.17, and there are no known breaking changes
|
||||
introduced in this release.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for config & license files, Elm,
|
||||
Purescript, Standard ML, sh, systemd, Terraform
|
||||
* [FEATURE #593](https://github.com/BurntSushi/ripgrep/pull/593):
|
||||
Using both `-o/--only-matching` and `-r/--replace` does the right thing.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #200](https://github.com/BurntSushi/ripgrep/issues/200):
|
||||
ripgrep will stop when its pipe is closed.
|
||||
* [BUG #402](https://github.com/BurntSushi/ripgrep/issues/402):
|
||||
Fix context printing bug when the `-m/--max-count` flag is used.
|
||||
* [BUG #521](https://github.com/BurntSushi/ripgrep/issues/521):
|
||||
Fix interaction between `-r/--replace` and terminal colors.
|
||||
* [BUG #559](https://github.com/BurntSushi/ripgrep/issues/559):
|
||||
Ignore test that tried reading a non-UTF-8 file path on macOS.
|
||||
* [BUG #599](https://github.com/BurntSushi/ripgrep/issues/599):
|
||||
Fix color escapes on empty matches.
|
||||
* [BUG #600](https://github.com/BurntSushi/ripgrep/issues/600):
|
||||
Avoid expensive (on Windows) file handle check when using --files.
|
||||
* [BUG #618](https://github.com/BurntSushi/ripgrep/issues/618):
|
||||
Clarify installation instructions for Ubuntu users.
|
||||
* [BUG #633](https://github.com/BurntSushi/ripgrep/issues/633):
|
||||
Faster symlink loop checking on Windows.
|
||||
|
||||
|
||||
0.6.0 (2017-08-23)
|
||||
==================
|
||||
This is a new minor version release of ripgrep that includes many bug fixes
|
||||
and a few new features such as `--iglob` and `-x/--line-regexp`.
|
||||
|
||||
Note that this release increases the minimum supported Rust version from 1.12
|
||||
to 1.17.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for BitBake, C++, Cabal, cshtml, Julia,
|
||||
Make, msbuild, QMake, Yocto
|
||||
* [FEATURE #163](https://github.com/BurntSushi/ripgrep/issues/163):
|
||||
Add an `--iglob` flag that is like `-g/--glob`, but matches globs
|
||||
case insensitively.
|
||||
* [FEATURE #520](https://github.com/BurntSushi/ripgrep/pull/518):
|
||||
Add `-x/--line-regexp` flag, which requires a match to span an entire line.
|
||||
* [FEATURE #551](https://github.com/BurntSushi/ripgrep/pull/551),
|
||||
[FEATURE #554](https://github.com/BurntSushi/ripgrep/pull/554):
|
||||
`ignore`: add new `matched_path_or_any_parents` method.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #342](https://github.com/BurntSushi/ripgrep/issues/342):
|
||||
Fix invisible text in some PowerShell environments by changing the
|
||||
default color scheme on Windows.
|
||||
* [BUG #413](https://github.com/BurntSushi/ripgrep/issues/413):
|
||||
Release binaries on Unix are now `strip`'d by default. This decreases
|
||||
binary size by an order of magnitude.
|
||||
* [BUG #483](https://github.com/BurntSushi/ripgrep/issues/483):
|
||||
When `--quiet` is passed, `--files` should be quiet.
|
||||
* [BUG #488](https://github.com/BurntSushi/ripgrep/pull/488):
|
||||
When `--vimgrep` is passed, `--with-filename` should be enabled
|
||||
automatically.
|
||||
* [BUG #493](https://github.com/BurntSushi/ripgrep/issues/493):
|
||||
Fix another bug in the implementation of the `-o/--only-matching`
|
||||
flag.
|
||||
* [BUG #499](https://github.com/BurntSushi/ripgrep/pull/499):
|
||||
Permit certain flags to override others.
|
||||
* [BUG #523](https://github.com/BurntSushi/ripgrep/pull/523):
|
||||
`wincolor`: Re-fetch Windows console on all calls.
|
||||
* [BUG #523](https://github.com/BurntSushi/ripgrep/issues/524):
|
||||
`--version` now shows enabled compile-time features.
|
||||
* [BUG #532](https://github.com/BurntSushi/ripgrep/issues/532),
|
||||
[BUG #536](https://github.com/BurntSushi/ripgrep/pull/536),
|
||||
[BUG #538](https://github.com/BurntSushi/ripgrep/pull/538),
|
||||
[BUG #540](https://github.com/BurntSushi/ripgrep/pull/540),
|
||||
[BUG #560](https://github.com/BurntSushi/ripgrep/pull/560),
|
||||
[BUG #565](https://github.com/BurntSushi/ripgrep/pull/565):
|
||||
Improve zsh completion.
|
||||
* [BUG #578](https://github.com/BurntSushi/ripgrep/pull/578):
|
||||
Enable SIMD for `encoding_rs` when appropriate.
|
||||
* [BUG #580](https://github.com/BurntSushi/ripgrep/issues/580):
|
||||
Fix `-w/--word-regexp` in the presence of capturing groups.
|
||||
* [BUG #581](https://github.com/BurntSushi/ripgrep/issues/581):
|
||||
Document that ripgrep may terminate unexpectedly when searching via
|
||||
memory maps (which can happen using default settings).
|
||||
|
||||
Friends of ripgrep:
|
||||
|
||||
I'd like to give a big Thank You to @okdana for their recent hard work on
|
||||
ripgrep. This includes new features like `--line-regexp`, heroic effort on
|
||||
zsh auto-completion and thinking through some thorny argv issues with me.
|
||||
|
||||
I'd also like to thank @ericbn for their work on improving ripgrep's argv
|
||||
parsing by allowing some flags to override others.
|
||||
|
||||
Thanks @okdana and @ericbn!
|
||||
|
||||
|
||||
0.5.2 (2017-05-11)
|
||||
==================
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for Nix.
|
||||
* [FEATURE #362](https://github.com/BurntSushi/ripgrep/issues/362):
|
||||
Add `--regex-size-limit` and `--dfa-size-limit` flags.
|
||||
* [FEATURE #444](https://github.com/BurntSushi/ripgrep/issues/444):
|
||||
Improve error messages for invalid globs.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #442](https://github.com/BurntSushi/ripgrep/issues/442):
|
||||
Fix line wrapping in `--help` output.
|
||||
* [BUG #451](https://github.com/BurntSushi/ripgrep/issues/451):
|
||||
Fix bug with duplicate output when using `-o/--only-matching` flag.
|
||||
|
||||
|
||||
0.5.1 (2017-04-09)
|
||||
==================
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for vim.
|
||||
* [FEATURE #34](https://github.com/BurntSushi/ripgrep/issues/34):
|
||||
Add a `-o/--only-matching` flag.
|
||||
* [FEATURE #377](https://github.com/BurntSushi/ripgrep/issues/377):
|
||||
Column numbers can now be customized with a color. (The default is
|
||||
no color.)
|
||||
* [FEATURE #419](https://github.com/BurntSushi/ripgrep/issues/419):
|
||||
Added `-0` short flag option for `--null`.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #381](https://github.com/BurntSushi/ripgrep/issues/381):
|
||||
Include license text in all subcrates.
|
||||
* [BUG #418](https://github.com/BurntSushi/ripgrep/issues/418),
|
||||
[BUG #426](https://github.com/BurntSushi/ripgrep/issues/426),
|
||||
[BUG #439](https://github.com/BurntSushi/ripgrep/issues/439):
|
||||
Fix a few bugs with `-h/--help` output.
|
||||
|
||||
|
||||
0.5.0 (2017-03-12)
|
||||
==================
|
||||
This is a new minor version release of ripgrep that includes one minor breaking
|
||||
change, bug fixes and several new features including support for text encodings
|
||||
other than UTF-8.
|
||||
|
||||
A notable accomplishment with respect to Rust is that ripgrep proper now only
|
||||
contains a single `unsafe` use (for accessing the contents of a memory map).
|
||||
|
||||
The **breaking change** is:
|
||||
|
||||
* [FEATURE #380](https://github.com/BurntSushi/ripgrep/issues/380):
|
||||
Line numbers are now hidden by default when ripgrep is printing to a tty
|
||||
**and** the only thing searched is stdin.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for Ceylon, CSS, Elixir, HTML, log,
|
||||
SASS, SVG, Twig
|
||||
* [FEATURE #1](https://github.com/BurntSushi/ripgrep/issues/1):
|
||||
Add support for additional text encodings, including automatic detection for
|
||||
UTF-16 via BOM sniffing. Explicit text encoding support with the
|
||||
`-E/--encoding` flag was also added for latin-1, GBK, EUC-JP
|
||||
and Shift_JIS, among others. The full list can be found here:
|
||||
https://encoding.spec.whatwg.org/#concept-encoding-get
|
||||
* [FEATURE #129](https://github.com/BurntSushi/ripgrep/issues/129):
|
||||
Add a new `-M/--max-columns` flag that omits lines longer than the given
|
||||
number of bytes. (Disabled by default!)
|
||||
* [FEATURE #369](https://github.com/BurntSushi/ripgrep/issues/369):
|
||||
A new flag, `--max-filesize`, was added for limiting searches to files with
|
||||
a maximum file size.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #52](https://github.com/BurntSushi/ripgrep/issues/52),
|
||||
[BUG #311](https://github.com/BurntSushi/ripgrep/issues/311):
|
||||
Tweak how binary files are detected and handled. (We are slightly less
|
||||
conservative and will no longer use memory without bound.)
|
||||
* [BUG #326](https://github.com/BurntSushi/ripgrep/issues/326):
|
||||
When --files flag is given, we should never attempt to parse positional
|
||||
arguments as regexes.
|
||||
* [BUG #327](https://github.com/BurntSushi/ripgrep/issues/327):
|
||||
Permit the --heading flag to override the --no-heading flag.
|
||||
* [BUG #340](https://github.com/BurntSushi/ripgrep/pull/340):
|
||||
Clarify that the `-u/--unrestricted` flags are aliases.
|
||||
* [BUG #343](https://github.com/BurntSushi/ripgrep/pull/343):
|
||||
Global git ignore config should use `$HOME/.config/git/ignore` and not
|
||||
`$HOME/git/ignore`.
|
||||
* [BUG #345](https://github.com/BurntSushi/ripgrep/pull/345):
|
||||
Clarify docs for `-g/--glob` flag.
|
||||
* [BUG #381](https://github.com/BurntSushi/ripgrep/issues/381):
|
||||
Add license files to each sub-crate.
|
||||
* [BUG #383](https://github.com/BurntSushi/ripgrep/issues/383):
|
||||
Use latest version of clap (for argv parsing).
|
||||
* [BUG #392](https://github.com/BurntSushi/ripgrep/issues/391):
|
||||
Fix translation of set globs (e.g., `{foo,bar,quux}`) to regexes.
|
||||
* [BUG #401](https://github.com/BurntSushi/ripgrep/pull/401):
|
||||
Add PowerShell completion file to Windows release.
|
||||
* [BUG #405](https://github.com/BurntSushi/ripgrep/issues/405):
|
||||
Fix bug when excluding absolute paths with the `-g/--glob` flag.
|
||||
|
||||
|
||||
0.4.0
|
||||
=====
|
||||
This is a new minor version release of ripgrep that includes a couple very
|
||||
|
||||
899
Cargo.lock
generated
899
Cargo.lock
generated
@@ -1,370 +1,599 @@
|
||||
[root]
|
||||
name = "ripgrep"
|
||||
version = "0.4.0"
|
||||
dependencies = [
|
||||
"atty 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bytecount 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.21.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep 0.1.6",
|
||||
"ignore 0.1.8",
|
||||
"lazy_static 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 0.3.1",
|
||||
]
|
||||
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "0.6.2"
|
||||
version = "0.7.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.2"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "bytecount"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"simd 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.21.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"atty 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bitflags 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"term_size 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"vec_map 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam"
|
||||
version = "0.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "env_logger"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "fs2"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "globset"
|
||||
version = "0.1.4"
|
||||
dependencies = [
|
||||
"aho-corasick 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep"
|
||||
version = "0.1.6"
|
||||
dependencies = [
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ignore"
|
||||
version = "0.1.8"
|
||||
dependencies = [
|
||||
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.1.4",
|
||||
"lazy_static 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kernel32-sys"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memmap"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"fs2 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"aho-corasick 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"simd 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "same-file"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simd"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "term_size"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "0.3.1"
|
||||
dependencies = [
|
||||
"wincolor 0.1.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thread-id"
|
||||
version = "3.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thread_local"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"thread-id 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-segmentation"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "unreachable"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "utf8-ranges"
|
||||
name = "autocfg"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.7.0"
|
||||
name = "base64"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "void"
|
||||
version = "1.0.2"
|
||||
name = "bitflags"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "bstr"
|
||||
version = "0.2.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-automata 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bytecount"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.50"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.33.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs"
|
||||
version = "0.8.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"packed_simd 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs_io"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "fs_extra"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "glob"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "globset"
|
||||
version = "0.4.5"
|
||||
dependencies = [
|
||||
"aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep"
|
||||
version = "0.2.5"
|
||||
dependencies = [
|
||||
"grep-cli 0.1.4",
|
||||
"grep-matcher 0.1.4",
|
||||
"grep-pcre2 0.1.4",
|
||||
"grep-printer 0.1.4",
|
||||
"grep-regex 0.1.6",
|
||||
"grep-searcher 0.1.7",
|
||||
"termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-cli"
|
||||
version = "0.1.4"
|
||||
dependencies = [
|
||||
"atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.4.5",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-matcher"
|
||||
version = "0.1.4"
|
||||
dependencies = [
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-pcre2"
|
||||
version = "0.1.4"
|
||||
dependencies = [
|
||||
"grep-matcher 0.1.4",
|
||||
"pcre2 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-printer"
|
||||
version = "0.1.4"
|
||||
dependencies = [
|
||||
"base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep-matcher 0.1.4",
|
||||
"grep-regex 0.1.6",
|
||||
"grep-searcher 0.1.7",
|
||||
"serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-regex"
|
||||
version = "0.1.6"
|
||||
dependencies = [
|
||||
"aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep-matcher 0.1.4",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-searcher"
|
||||
version = "0.1.7"
|
||||
dependencies = [
|
||||
"bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bytecount 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs_io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep-matcher 0.1.4",
|
||||
"grep-regex 0.1.6",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ignore"
|
||||
version = "0.4.12"
|
||||
dependencies = [
|
||||
"crossbeam-channel 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.4.5",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "jemalloc-sys"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jemallocator"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"jemalloc-sys 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.67"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "maybe-uninit"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "memmap"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "packed_simd"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pcre2"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pcre2-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pcre2-sys"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.3.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "ripgrep"
|
||||
version = "12.0.0"
|
||||
dependencies = [
|
||||
"bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep 0.2.5",
|
||||
"ignore 0.4.12",
|
||||
"jemallocator 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "same-file"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.48"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thread_local"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "walkdir"
|
||||
version = "1.0.7"
|
||||
version = "2.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.2.8"
|
||||
version = "0.3.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-build"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "wincolor"
|
||||
version = "0.1.3"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[metadata]
|
||||
"checksum aho-corasick 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0638fd549427caa90c499814196d1b9e3725eb4d15d7339d6de073a680ed0ca2"
|
||||
"checksum ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "23ac7c30002a5accbf7e8987d0632fa6de155b7c3d39d0067317a391e00a2ef6"
|
||||
"checksum atty 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d912da0db7fa85514874458ca3651fe2cddace8d0b0505571dbdcd41ab490159"
|
||||
"checksum bitflags 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e1ab483fc81a8143faa7203c4a3c02888ebd1a782e37e41fa34753ba9a162"
|
||||
"checksum bytecount 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "1e8f09fbc8c6726a4b616dcfbd4f54491068d6bb1b93ac03c78ac18ff9a5924a"
|
||||
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
|
||||
"checksum clap 2.21.1 (registry+https://github.com/rust-lang/crates.io-index)" = "74a80f603221c9cd9aa27a28f52af452850051598537bb6b359c38a7d61e5cda"
|
||||
"checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97"
|
||||
"checksum encoding_rs 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a1cca0a26f904955d80d70b9bff1019e4f4cbc06f2fcbccf8bd3d889cc1c9b7"
|
||||
"checksum env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e3856f1697098606fc6cb97a93de88ca3f3bc35bb878c725920e6e82ecf05e83"
|
||||
"checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344"
|
||||
"checksum fs2 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "34edaee07555859dc13ca387e6ae05686bb4d0364c95d649b6dab959511f4baf"
|
||||
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
|
||||
"checksum lazy_static 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7291b1dd97d331f752620b02dfdbc231df7fc01bf282a00769e1cdb963c460dc"
|
||||
"checksum libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)" = "88ee81885f9f04bff991e306fea7c1c60a5f0f9e409e99f6b40e3311a3363135"
|
||||
"checksum log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "5141eca02775a762cc6cd564d8d2c50f67c0ea3a372cbf1c51592b3e029e10ad"
|
||||
"checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4"
|
||||
"checksum memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46f3c7359028b31999287dae4e5047ddfe90a23b7dca2282ce759b491080c99b"
|
||||
"checksum num_cpus 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a18c392466409c50b87369414a2680c93e739aedeb498eb2bff7d7eb569744e2"
|
||||
"checksum regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4278c17d0f6d62dfef0ab00028feb45bd7d2102843f80763474eeb1be8a10c01"
|
||||
"checksum regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9191b1f57603095f105d317e375d19b1c9c5c3185ea9633a99a6dcbed04457"
|
||||
"checksum same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d931a44fdaa43b8637009e7632a02adc4f2b2e0733c08caa4cf00e8da4a117a7"
|
||||
"checksum simd 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "63b5847c2d766ca7ce7227672850955802fabd779ba616aeabead4c2c3877023"
|
||||
"checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694"
|
||||
"checksum term_size 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "07b6c1ac5b3fffd75073276bca1ceed01f67a28537097a2a9539e116e50fb21a"
|
||||
"checksum thread-id 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4437c97558c70d129e40629a5b385b3fb1ffac301e63941335e4d354081ec14a"
|
||||
"checksum thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c85048c6260d17cf486ceae3282d9fb6b90be220bf5b28c400f5485ffc29f0c7"
|
||||
"checksum unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18127285758f0e2c6cf325bb3f3d138a12fee27de4f23e146cd6a179f26c2cf3"
|
||||
"checksum unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bf3a113775714a22dcb774d8ea3655c53a32debae63a063acc00a91cc586245f"
|
||||
"checksum unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f2ae5ddb18e1c92664717616dd9549dde73f539f01bd7b77c2edb2446bdff91"
|
||||
"checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122"
|
||||
"checksum vec_map 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8cdc8b93bd0198ed872357fb2e667f7125646b1762f16d60b2c96350d361897"
|
||||
"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
|
||||
"checksum walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "bb08f9e670fab86099470b97cd2b252d6527f0b3cc1401acdb595ffc9dd288ff"
|
||||
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
|
||||
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
|
||||
"checksum aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)" = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada"
|
||||
"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
|
||||
"checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7"
|
||||
"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
"checksum bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "2889e6d50f394968c8bf4240dc3f2a7eb4680844d27308f798229ac9d4725f41"
|
||||
"checksum bytecount 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b0017894339f586ccb943b01b9555de56770c11cda818e7e3d8bd93f4ed7f46e"
|
||||
"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
|
||||
"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd"
|
||||
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
|
||||
"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9"
|
||||
"checksum crossbeam-channel 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cced8691919c02aac3cb0a1bc2e9b73d89e832bf9a06fc579d4e71b68a2da061"
|
||||
"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
|
||||
"checksum encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)" = "cd8d03faa7fe0c1431609dfad7bbe827af30f82e1e2ae6f7ee4fca6bd764bc28"
|
||||
"checksum encoding_rs_io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1cc3c5651fb62ab8aa3103998dade57efdd028544bd300516baa31840c252a83"
|
||||
"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
|
||||
"checksum fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5f2a4a2034423744d2cc7ca2068453168dcdb82c438419e639a26bd87839c674"
|
||||
"checksum glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
|
||||
"checksum hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8"
|
||||
"checksum itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e"
|
||||
"checksum jemalloc-sys 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0d3b9f3f5c9b31aa0f5ed3260385ac205db665baa41d49bb8338008ae94ede45"
|
||||
"checksum jemallocator 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "43ae63fcfc45e99ab3d1b29a46782ad679e98436c3169d15a167a1108a724b69"
|
||||
"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
"checksum libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018"
|
||||
"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7"
|
||||
"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
|
||||
"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
|
||||
"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
|
||||
"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6"
|
||||
"checksum packed_simd 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a85ea9fc0d4ac0deb6fe7911d38786b32fc11119afd9e9d38b84ff691ce64220"
|
||||
"checksum pcre2 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "85b30f2f69903b439dd9dc9e824119b82a55bf113b29af8d70948a03c1b11ab1"
|
||||
"checksum pcre2-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "876c72d05059d23a84bd9fcdc3b1d31c50ea7fe00fe1522b4e68cd3608db8d5b"
|
||||
"checksum pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677"
|
||||
"checksum proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435"
|
||||
"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f"
|
||||
"checksum regex 1.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "8900ebc1363efa7ea1c399ccc32daed870b4002651e0bed86e72d501ebbe0048"
|
||||
"checksum regex-automata 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
|
||||
"checksum regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)" = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae"
|
||||
"checksum ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76"
|
||||
"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
|
||||
"checksum serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449"
|
||||
"checksum serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64"
|
||||
"checksum serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)" = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25"
|
||||
"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
"checksum syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859"
|
||||
"checksum termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f"
|
||||
"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
|
||||
"checksum unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479"
|
||||
"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
|
||||
"checksum walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d"
|
||||
"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
|
||||
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80"
|
||||
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
109
Cargo.toml
109
Cargo.toml
@@ -1,10 +1,11 @@
|
||||
[package]
|
||||
name = "ripgrep"
|
||||
version = "0.4.0" #:version
|
||||
version = "12.0.0" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Line oriented search tool using Rust's regex library. Combines the raw
|
||||
performance of grep with the usability of the silver searcher.
|
||||
ripgrep is a line-oriented search tool that recursively searches your current
|
||||
directory for a regex pattern while respecting your gitignore rules. ripgrep
|
||||
has first class support on Windows, macOS and Linux.
|
||||
"""
|
||||
documentation = "https://github.com/BurntSushi/ripgrep"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep"
|
||||
@@ -12,44 +13,100 @@ repository = "https://github.com/BurntSushi/ripgrep"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "grep", "egrep", "search", "pattern"]
|
||||
categories = ["command-line-utilities", "text-processing"]
|
||||
license = "Unlicense/MIT"
|
||||
license = "Unlicense OR MIT"
|
||||
exclude = ["HomebrewFormula"]
|
||||
build = "build.rs"
|
||||
autotests = false
|
||||
edition = "2018"
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "BurntSushi/ripgrep" }
|
||||
appveyor = { repository = "BurntSushi/ripgrep" }
|
||||
|
||||
[[bin]]
|
||||
bench = false
|
||||
path = "src/main.rs"
|
||||
path = "crates/core/main.rs"
|
||||
name = "rg"
|
||||
|
||||
[[test]]
|
||||
name = "integration"
|
||||
path = "tests/tests.rs"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
"crates/globset",
|
||||
"crates/grep",
|
||||
"crates/cli",
|
||||
"crates/matcher",
|
||||
"crates/pcre2",
|
||||
"crates/printer",
|
||||
"crates/regex",
|
||||
"crates/searcher",
|
||||
"crates/ignore",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2.2"
|
||||
bytecount = "0.1.4"
|
||||
clap = "2.20.5"
|
||||
encoding_rs = "0.5.0"
|
||||
env_logger = { version = "0.4", default-features = false }
|
||||
grep = { version = "0.1.5", path = "grep" }
|
||||
ignore = { version = "0.1.7", path = "ignore" }
|
||||
lazy_static = "0.2"
|
||||
libc = "0.2"
|
||||
log = "0.3"
|
||||
memchr = "1"
|
||||
memmap = "0.5"
|
||||
num_cpus = "1"
|
||||
regex = "0.2.1"
|
||||
same-file = "0.1.1"
|
||||
termcolor = { version = "0.3.0", path = "termcolor" }
|
||||
bstr = "0.2.12"
|
||||
grep = { version = "0.2.5", path = "crates/grep" }
|
||||
ignore = { version = "0.4.12", path = "crates/ignore" }
|
||||
lazy_static = "1.1.0"
|
||||
log = "0.4.5"
|
||||
num_cpus = "1.8.0"
|
||||
regex = "1.3.5"
|
||||
serde_json = "1.0.23"
|
||||
termcolor = "1.1.0"
|
||||
|
||||
[dependencies.clap]
|
||||
version = "2.33.0"
|
||||
default-features = false
|
||||
features = ["suggestions"]
|
||||
|
||||
[target.'cfg(all(target_env = "musl", target_pointer_width = "64"))'.dependencies.jemallocator]
|
||||
version = "0.3.0"
|
||||
|
||||
[build-dependencies]
|
||||
clap = "2.18"
|
||||
lazy_static = "0.2"
|
||||
lazy_static = "1.1.0"
|
||||
|
||||
[build-dependencies.clap]
|
||||
version = "2.33.0"
|
||||
default-features = false
|
||||
features = ["suggestions"]
|
||||
|
||||
[dev-dependencies]
|
||||
serde = "1.0.77"
|
||||
serde_derive = "1.0.77"
|
||||
walkdir = "2"
|
||||
|
||||
[features]
|
||||
avx-accel = ["bytecount/avx-accel"]
|
||||
simd-accel = ["bytecount/simd-accel", "regex/simd-accel"]
|
||||
simd-accel = ["grep/simd-accel"]
|
||||
pcre2 = ["grep/pcre2"]
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
debug = 1
|
||||
|
||||
[package.metadata.deb]
|
||||
features = ["pcre2"]
|
||||
section = "utils"
|
||||
assets = [
|
||||
["target/release/rg", "usr/bin/", "755"],
|
||||
["COPYING", "usr/share/doc/ripgrep/", "644"],
|
||||
["LICENSE-MIT", "usr/share/doc/ripgrep/", "644"],
|
||||
["UNLICENSE", "usr/share/doc/ripgrep/", "644"],
|
||||
["CHANGELOG.md", "usr/share/doc/ripgrep/CHANGELOG", "644"],
|
||||
["README.md", "usr/share/doc/ripgrep/README", "644"],
|
||||
["FAQ.md", "usr/share/doc/ripgrep/FAQ", "644"],
|
||||
# The man page is automatically generated by ripgrep's build process, so
|
||||
# this file isn't actually commited. Instead, to create a dpkg, either
|
||||
# create a deployment/deb directory and copy the man page to it, or use the
|
||||
# 'ci/build-deb' script.
|
||||
["deployment/deb/rg.1", "usr/share/man/man1/rg.1", "644"],
|
||||
# Similarly for shell completions.
|
||||
["deployment/deb/rg.bash", "usr/share/bash-completion/completions/rg", "644"],
|
||||
["deployment/deb/rg.fish", "usr/share/fish/vendor_completions.d/rg.fish", "644"],
|
||||
["deployment/deb/_rg", "usr/share/zsh/vendor-completions/", "644"],
|
||||
]
|
||||
extended-description = """\
|
||||
ripgrep (rg) recursively searches your current directory for a regex pattern.
|
||||
By default, ripgrep will respect your .gitignore and automatically skip hidden
|
||||
files/directories and binary files.
|
||||
"""
|
||||
|
||||
11
Cross.toml
Normal file
11
Cross.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[target.x86_64-unknown-linux-musl]
|
||||
image = "burntsushi/cross:x86_64-unknown-linux-musl"
|
||||
|
||||
[target.i686-unknown-linux-gnu]
|
||||
image = "burntsushi/cross:i686-unknown-linux-gnu"
|
||||
|
||||
[target.mips64-unknown-linux-gnuabi64]
|
||||
image = "burntsushi/cross:mips64-unknown-linux-gnuabi64"
|
||||
|
||||
[target.arm-unknown-linux-gnueabihf]
|
||||
image = "burntsushi/cross:arm-unknown-linux-gnueabihf"
|
||||
800
GUIDE.md
Normal file
800
GUIDE.md
Normal file
@@ -0,0 +1,800 @@
|
||||
## User Guide
|
||||
|
||||
This guide is intended to give an elementary description of ripgrep and an
|
||||
overview of its capabilities. This guide assumes that ripgrep is
|
||||
[installed](README.md#installation)
|
||||
and that readers have passing familiarity with using command line tools. This
|
||||
also assumes a Unix-like system, although most commands are probably easily
|
||||
translatable to any command line shell environment.
|
||||
|
||||
|
||||
### Table of Contents
|
||||
|
||||
* [Basics](#basics)
|
||||
* [Recursive search](#recursive-search)
|
||||
* [Automatic filtering](#automatic-filtering)
|
||||
* [Manual filtering: globs](#manual-filtering-globs)
|
||||
* [Manual filtering: file types](#manual-filtering-file-types)
|
||||
* [Replacements](#replacements)
|
||||
* [Configuration file](#configuration-file)
|
||||
* [File encoding](#file-encoding)
|
||||
* [Binary data](#binary-data)
|
||||
* [Common options](#common-options)
|
||||
|
||||
|
||||
### Basics
|
||||
|
||||
ripgrep is a command line tool that searches your files for patterns that
|
||||
you give it. ripgrep behaves as if reading each file line by line. If a line
|
||||
matches the pattern provided to ripgrep, then that line will be printed. If a
|
||||
line does not match the pattern, then the line is not printed.
|
||||
|
||||
The best way to see how this works is with an example. To show an example, we
|
||||
need something to search. Let's try searching ripgrep's source code. First
|
||||
grab a ripgrep source archive from
|
||||
https://github.com/BurntSushi/ripgrep/archive/0.7.1.zip
|
||||
and extract it:
|
||||
|
||||
```
|
||||
$ curl -LO https://github.com/BurntSushi/ripgrep/archive/0.7.1.zip
|
||||
$ unzip 0.7.1.zip
|
||||
$ cd ripgrep-0.7.1
|
||||
$ ls
|
||||
benchsuite grep tests Cargo.toml LICENSE-MIT
|
||||
ci ignore wincolor CHANGELOG.md README.md
|
||||
complete pkg appveyor.yml compile snapcraft.yaml
|
||||
doc src build.rs COPYING UNLICENSE
|
||||
globset termcolor Cargo.lock HomebrewFormula
|
||||
```
|
||||
|
||||
Let's try our first search by looking for all occurrences of the word `fast`
|
||||
in `README.md`:
|
||||
|
||||
```
|
||||
$ rg fast README.md
|
||||
75: faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast while
|
||||
119:### Is it really faster than everything else?
|
||||
124:Summarizing, `ripgrep` is fast because:
|
||||
129: optimizations to make searching very fast.
|
||||
```
|
||||
|
||||
(**Note:** If you see an error message from ripgrep saying that it didn't
|
||||
search any files, then re-run ripgrep with the `--debug` flag. One likely cause
|
||||
of this is that you have a `*` rule in a `$HOME/.gitignore` file.)
|
||||
|
||||
So what happened here? ripgrep read the contents of `README.md`, and for each
|
||||
line that contained `fast`, ripgrep printed it to your terminal. ripgrep also
|
||||
included the line number for each line by default. If your terminal supports
|
||||
colors, then your output might actually look something like this screenshot:
|
||||
|
||||
[](https://burntsushi.net/stuff/ripgrep-guide-sample.png)
|
||||
|
||||
In this example, we searched for something called a "literal" string. This
|
||||
means that our pattern was just some normal text that we asked ripgrep to
|
||||
find. But ripgrep supports the ability to specify patterns via [regular
|
||||
expressions](https://en.wikipedia.org/wiki/Regular_expression). As an example,
|
||||
what if we wanted to find all lines have a word that contains `fast` followed
|
||||
by some number of other letters?
|
||||
|
||||
```
|
||||
$ rg 'fast\w+' README.md
|
||||
75: faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
119:### Is it really faster than everything else?
|
||||
```
|
||||
|
||||
In this example, we used the pattern `fast\w+`. This pattern tells ripgrep to
|
||||
look for any lines containing the letters `fast` followed by *one or more*
|
||||
word-like characters. Namely, `\w` matches characters that compose words (like
|
||||
`a` and `L` but unlike `.` and ` `). The `+` after the `\w` means, "match the
|
||||
previous pattern one or more times." This means that the word `fast` won't
|
||||
match because there are no word characters following the final `t`. But a word
|
||||
like `faster` will. `faste` would also match!
|
||||
|
||||
Here's a different variation on this same theme:
|
||||
|
||||
```
|
||||
$ rg 'fast\w*' README.md
|
||||
75: faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast while
|
||||
119:### Is it really faster than everything else?
|
||||
124:Summarizing, `ripgrep` is fast because:
|
||||
129: optimizations to make searching very fast.
|
||||
```
|
||||
|
||||
In this case, we used `fast\w*` for our pattern instead of `fast\w+`. The `*`
|
||||
means that it should match *zero* or more times. In this case, ripgrep will
|
||||
print the same lines as the pattern `fast`, but if your terminal supports
|
||||
colors, you'll notice that `faster` will be highlighted instead of just the
|
||||
`fast` prefix.
|
||||
|
||||
It is beyond the scope of this guide to provide a full tutorial on regular
|
||||
expressions, but ripgrep's specific syntax is documented here:
|
||||
https://docs.rs/regex/*/regex/#syntax
|
||||
|
||||
|
||||
### Recursive search
|
||||
|
||||
In the previous section, we showed how to use ripgrep to search a single file.
|
||||
In this section, we'll show how to use ripgrep to search an entire directory
|
||||
of files. In fact, *recursively* searching your current working directory is
|
||||
the default mode of operation for ripgrep, which means doing this is very
|
||||
simple.
|
||||
|
||||
Using our unzipped archive of ripgrep source code, here's how to find all
|
||||
function definitions whose name is `write`:
|
||||
|
||||
```
|
||||
$ rg 'fn write\('
|
||||
src/printer.rs
|
||||
469: fn write(&mut self, buf: &[u8]) {
|
||||
|
||||
termcolor/src/lib.rs
|
||||
227: fn write(&mut self, b: &[u8]) -> io::Result<usize> {
|
||||
250: fn write(&mut self, b: &[u8]) -> io::Result<usize> {
|
||||
428: fn write(&mut self, b: &[u8]) -> io::Result<usize> { self.wtr.write(b) }
|
||||
441: fn write(&mut self, b: &[u8]) -> io::Result<usize> { self.wtr.write(b) }
|
||||
454: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
511: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
848: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
915: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
949: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
1114: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
1348: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
1353: fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
```
|
||||
|
||||
(**Note:** We escape the `(` here because `(` has special significance inside
|
||||
regular expressions. You could also use `rg -F 'fn write('` to achieve the
|
||||
same thing, where `-F` interprets your pattern as a literal string instead of
|
||||
a regular expression.)
|
||||
|
||||
In this example, we didn't specify a file at all. Instead, ripgrep defaulted
|
||||
to searching your current directory in the absence of a path. In general,
|
||||
`rg foo` is equivalent to `rg foo ./`.
|
||||
|
||||
This particular search showed us results in both the `src` and `termcolor`
|
||||
directories. The `src` directory is the core ripgrep code where as `termcolor`
|
||||
is a dependency of ripgrep (and is used by other tools). What if we only wanted
|
||||
to search core ripgrep code? Well, that's easy, just specify the directory you
|
||||
want:
|
||||
|
||||
```
|
||||
$ rg 'fn write\(' src
|
||||
src/printer.rs
|
||||
469: fn write(&mut self, buf: &[u8]) {
|
||||
```
|
||||
|
||||
Here, ripgrep limited its search to the `src` directory. Another way of doing
|
||||
this search would be to `cd` into the `src` directory and simply use `rg 'fn
|
||||
write\('` again.
|
||||
|
||||
|
||||
### Automatic filtering
|
||||
|
||||
After recursive search, ripgrep's most important feature is what it *doesn't*
|
||||
search. By default, when you search a directory, ripgrep will ignore all of
|
||||
the following:
|
||||
|
||||
1. Files and directories that match the rules in your `.gitignore` glob
|
||||
pattern.
|
||||
2. Hidden files and directories.
|
||||
3. Binary files. (ripgrep considers any file with a `NUL` byte to be binary.)
|
||||
4. Symbolic links aren't followed.
|
||||
|
||||
All of these things can be toggled using various flags provided by ripgrep:
|
||||
|
||||
1. You can disable `.gitignore` handling with the `--no-ignore` flag.
|
||||
2. Hidden files and directories can be searched with the `--hidden` flag.
|
||||
3. Binary files can be searched via the `--text` (`-a` for short) flag.
|
||||
Be careful with this flag! Binary files may emit control characters to your
|
||||
terminal, which might cause strange behavior.
|
||||
4. ripgrep can follow symlinks with the `--follow` (`-L` for short) flag.
|
||||
|
||||
As a special convenience, ripgrep also provides a flag called `--unrestricted`
|
||||
(`-u` for short). Repeated uses of this flag will cause ripgrep to disable
|
||||
more and more of its filtering. That is, `-u` will disable `.gitignore`
|
||||
handling, `-uu` will search hidden files and directories and `-uuu` will search
|
||||
binary files. This is useful when you're using ripgrep and you aren't sure
|
||||
whether its filtering is hiding results from you. Tacking on a couple `-u`
|
||||
flags is a quick way to find out. (Use the `--debug` flag if you're still
|
||||
perplexed, and if that doesn't help,
|
||||
[file an issue](https://github.com/BurntSushi/ripgrep/issues/new).)
|
||||
|
||||
ripgrep's `.gitignore` handling actually goes a bit beyond just `.gitignore`
|
||||
files. ripgrep will also respect repository specific rules found in
|
||||
`$GIT_DIR/info/exclude`, as well as any global ignore rules in your
|
||||
`core.excludesFile` (which is usually `$XDG_CONFIG_HOME/git/ignore` on
|
||||
Unix-like systems).
|
||||
|
||||
Sometimes you want to search files that are in your `.gitignore`, so it is
|
||||
possible to specify additional ignore rules or overrides in a `.ignore`
|
||||
(application agnostic) or `.rgignore` (ripgrep specific) file.
|
||||
|
||||
For example, let's say you have a `.gitignore` file that looks like this:
|
||||
|
||||
```
|
||||
log/
|
||||
```
|
||||
|
||||
This generally means that any `log` directory won't be tracked by `git`.
|
||||
However, perhaps it contains useful output that you'd like to include in your
|
||||
searches, but you still don't want to track it in `git`. You can achieve this
|
||||
by creating a `.ignore` file in the same directory as the `.gitignore` file
|
||||
with the following contents:
|
||||
|
||||
```
|
||||
!log/
|
||||
```
|
||||
|
||||
ripgrep treats `.ignore` files with higher precedence than `.gitignore` files
|
||||
(and treats `.rgignore` files with higher precedence than `.ignore` files).
|
||||
This means ripgrep will see the `!log/` whitelist rule first and search that
|
||||
directory.
|
||||
|
||||
Like `.gitignore`, a `.ignore` file can be placed in any directory. Its rules
|
||||
will be processed with respect to the directory it resides in, just like
|
||||
`.gitignore`.
|
||||
|
||||
To process `.gitignore` and `.ignore` files case insensitively, use the flag
|
||||
`--ignore-file-case-insensitive`. This is especially useful on case insensitive
|
||||
file systems like those on Windows and macOS. Note though that this can come
|
||||
with a significant performance penalty, and is therefore disabled by default.
|
||||
|
||||
For a more in depth description of how glob patterns in a `.gitignore` file
|
||||
are interpreted, please see `man gitignore`.
|
||||
|
||||
|
||||
### Manual filtering: globs
|
||||
|
||||
In the previous section, we talked about ripgrep's filtering that it does by
|
||||
default. It is "automatic" because it reacts to your environment. That is, it
|
||||
uses already existing `.gitignore` files to produce more relevant search
|
||||
results.
|
||||
|
||||
In addition to automatic filtering, ripgrep also provides more manual or ad hoc
|
||||
filtering. This comes in two varieties: additional glob patterns specified in
|
||||
your ripgrep commands and file type filtering. This section covers glob
|
||||
patterns while the next section covers file type filtering.
|
||||
|
||||
In our ripgrep source code (see [Basics](#basics) for instructions on how to
|
||||
get a source archive to search), let's say we wanted to see which things depend
|
||||
on `clap`, our argument parser.
|
||||
|
||||
We could do this:
|
||||
|
||||
```
|
||||
$ rg clap
|
||||
[lots of results]
|
||||
```
|
||||
|
||||
But this shows us many things, and we're only interested in where we wrote
|
||||
`clap` as a dependency. Instead, we could limit ourselves to TOML files, which
|
||||
is how dependencies are communicated to Rust's build tool, Cargo:
|
||||
|
||||
```
|
||||
$ rg clap -g '*.toml'
|
||||
Cargo.toml
|
||||
35:clap = "2.26"
|
||||
51:clap = "2.26"
|
||||
```
|
||||
|
||||
The `-g '*.toml'` syntax says, "make sure every file searched matches this
|
||||
glob pattern." Note that we put `'*.toml'` in single quotes to prevent our
|
||||
shell from expanding the `*`.
|
||||
|
||||
If we wanted, we could tell ripgrep to search anything *but* `*.toml` files:
|
||||
|
||||
```
|
||||
$ rg clap -g '!*.toml'
|
||||
[lots of results]
|
||||
```
|
||||
|
||||
This will give you a lot of results again as above, but they won't include
|
||||
files ending with `.toml`. Note that the use of a `!` here to mean "negation"
|
||||
is a bit non-standard, but it was chosen to be consistent with how globs in
|
||||
`.gitignore` files are written. (Although, the meaning is reversed. In
|
||||
`.gitignore` files, a `!` prefix means whitelist, and on the command line, a
|
||||
`!` means blacklist.)
|
||||
|
||||
Globs are interpreted in exactly the same way as `.gitignore` patterns. That
|
||||
is, later globs will override earlier globs. For example, the following command
|
||||
will search only `*.toml` files:
|
||||
|
||||
```
|
||||
$ rg clap -g '!*.toml' -g '*.toml'
|
||||
```
|
||||
|
||||
Interestingly, reversing the order of the globs in this case will match
|
||||
nothing, since the presence of at least one non-blacklist glob will institute a
|
||||
requirement that every file searched must match at least one glob. In this
|
||||
case, the blacklist glob takes precedence over the previous glob and prevents
|
||||
any file from being searched at all!
|
||||
|
||||
|
||||
### Manual filtering: file types
|
||||
|
||||
Over time, you might notice that you use the same glob patterns over and over.
|
||||
For example, you might find yourself doing a lot of searches where you only
|
||||
want to see results for Rust files:
|
||||
|
||||
```
|
||||
$ rg 'fn run' -g '*.rs'
|
||||
```
|
||||
|
||||
Instead of writing out the glob every time, you can use ripgrep's support for
|
||||
file types:
|
||||
|
||||
```
|
||||
$ rg 'fn run' --type rust
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg 'fn run' -trust
|
||||
```
|
||||
|
||||
The way the `--type` flag functions is simple. It acts as a name that is
|
||||
assigned to one or more globs that match the relevant files. This lets you
|
||||
write a single type that might encompass a broad range of file extensions. For
|
||||
example, if you wanted to search C files, you'd have to check both C source
|
||||
files and C header files:
|
||||
|
||||
```
|
||||
$ rg 'int main' -g '*.{c,h}'
|
||||
```
|
||||
|
||||
or you could just use the C file type:
|
||||
|
||||
```
|
||||
$ rg 'int main' -tc
|
||||
```
|
||||
|
||||
Just as you can write blacklist globs, you can blacklist file types too:
|
||||
|
||||
```
|
||||
$ rg clap --type-not rust
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg clap -Trust
|
||||
```
|
||||
|
||||
That is, `-t` means "include files of this type" where as `-T` means "exclude
|
||||
files of this type."
|
||||
|
||||
To see the globs that make up a type, run `rg --type-list`:
|
||||
|
||||
```
|
||||
$ rg --type-list | rg '^make:'
|
||||
make: *.mak, *.mk, GNUmakefile, Gnumakefile, Makefile, gnumakefile, makefile
|
||||
```
|
||||
|
||||
By default, ripgrep comes with a bunch of pre-defined types. Generally, these
|
||||
types correspond to well known public formats. But you can define your own
|
||||
types as well. For example, perhaps you frequently search "web" files, which
|
||||
consist of Javascript, HTML and CSS:
|
||||
|
||||
```
|
||||
$ rg --type-add 'web:*.html' --type-add 'web:*.css' --type-add 'web:*.js' -tweb title
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg --type-add 'web:*.{html,css,js}' -tweb title
|
||||
```
|
||||
|
||||
The above command defines a new type, `web`, corresponding to the glob
|
||||
`*.{html,css,js}`. It then applies the new filter with `-tweb` and searches for
|
||||
the pattern `title`. If you ran
|
||||
|
||||
```
|
||||
$ rg --type-add 'web:*.{html,css,js}' --type-list
|
||||
```
|
||||
|
||||
Then you would see your `web` type show up in the list, even though it is not
|
||||
part of ripgrep's built-in types.
|
||||
|
||||
It is important to stress here that the `--type-add` flag only applies to the
|
||||
current command. It does not add a new file type and save it somewhere in a
|
||||
persistent form. If you want a type to be available in every ripgrep command,
|
||||
then you should either create a shell alias:
|
||||
|
||||
```
|
||||
alias rg="rg --type-add 'web:*.{html,css,js}'"
|
||||
```
|
||||
|
||||
or add `--type-add=web:*.{html,css,js}` to your ripgrep configuration file.
|
||||
([Configuration files](#configuration-file) are covered in more detail later.)
|
||||
|
||||
#### The special `all` file type
|
||||
|
||||
A special option supported by the `--type` flag is `all`. `--type all` looks
|
||||
for a match in any of the supported file types listed by `--type-list`,
|
||||
including those added on the command line using `--type-add`. It's equivalent
|
||||
to the command `rg --type agda --type asciidoc --type asm ...`, where `...`
|
||||
stands for a list of `--type` flags for the rest of the types in `--type-list`.
|
||||
|
||||
As an example, let's suppose you have a shell script in your current directory,
|
||||
`my-shell-script`, which includes a shell library, `my-shell-library.bash`.
|
||||
Both `rg --type sh` and `rg --type all` would only search for matches in
|
||||
`my-shell-library.bash`, not `my-shell-script`, because the globs matched
|
||||
by the `sh` file type don't include files without an extension. On the
|
||||
other hand, `rg --type-not all` would search `my-shell-script` but not
|
||||
`my-shell-library.bash`.
|
||||
|
||||
### Replacements
|
||||
|
||||
ripgrep provides a limited ability to modify its output by replacing matched
|
||||
text with some other text. This is easiest to explain with an example. Remember
|
||||
when we searched for the word `fast` in ripgrep's README?
|
||||
|
||||
```
|
||||
$ rg fast README.md
|
||||
75: faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast while
|
||||
119:### Is it really faster than everything else?
|
||||
124:Summarizing, `ripgrep` is fast because:
|
||||
129: optimizations to make searching very fast.
|
||||
```
|
||||
|
||||
What if we wanted to *replace* all occurrences of `fast` with `FAST`? That's
|
||||
easy with ripgrep's `--replace` flag:
|
||||
|
||||
```
|
||||
$ rg fast README.md --replace FAST
|
||||
75: FASTer than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays FAST while
|
||||
119:### Is it really FASTer than everything else?
|
||||
124:Summarizing, `ripgrep` is FAST because:
|
||||
129: optimizations to make searching very FAST.
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg fast README.md -r FAST
|
||||
[snip]
|
||||
```
|
||||
|
||||
In essence, the `--replace` flag applies *only* to the matching portion of text
|
||||
in the output. If you instead wanted to replace an entire line of text, then
|
||||
you need to include the entire line in your match. For example:
|
||||
|
||||
```
|
||||
$ rg '^.*fast.*$' README.md -r FAST
|
||||
75:FAST
|
||||
88:FAST
|
||||
119:FAST
|
||||
124:FAST
|
||||
129:FAST
|
||||
```
|
||||
|
||||
Alternatively, you can combine the `--only-matching` (or `-o` for short) with
|
||||
the `--replace` flag to achieve the same result:
|
||||
|
||||
```
|
||||
$ rg fast README.md --only-matching --replace FAST
|
||||
75:FAST
|
||||
88:FAST
|
||||
119:FAST
|
||||
124:FAST
|
||||
129:FAST
|
||||
```
|
||||
|
||||
or, more succinctly,
|
||||
|
||||
```
|
||||
$ rg fast README.md -or FAST
|
||||
[snip]
|
||||
```
|
||||
|
||||
Finally, replacements can include capturing groups. For example, let's say
|
||||
we wanted to find all occurrences of `fast` followed by another word and
|
||||
join them together with a dash. The pattern we might use for that is
|
||||
`fast\s+(\w+)`, which matches `fast`, followed by any amount of whitespace,
|
||||
followed by any number of "word" characters. We put the `\w+` in a "capturing
|
||||
group" (indicated by parentheses) so that we can reference it later in our
|
||||
replacement string. For example:
|
||||
|
||||
```
|
||||
$ rg 'fast\s+(\w+)' README.md -r 'fast-$1'
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast-while
|
||||
124:Summarizing, `ripgrep` is fast-because:
|
||||
```
|
||||
|
||||
Our replacement string here, `fast-$1`, consists of `fast-` followed by the
|
||||
contents of the capturing group at index `1`. (Capturing groups actually start
|
||||
at index 0, but the `0`th capturing group always corresponds to the entire
|
||||
match. The capturing group at index `1` always corresponds to the first
|
||||
explicit capturing group found in the regex pattern.)
|
||||
|
||||
Capturing groups can also be named, which is sometimes more convenient than
|
||||
using the indices. For example, the following command is equivalent to the
|
||||
above command:
|
||||
|
||||
```
|
||||
$ rg 'fast\s+(?P<word>\w+)' README.md -r 'fast-$word'
|
||||
88: color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast-while
|
||||
124:Summarizing, `ripgrep` is fast-because:
|
||||
```
|
||||
|
||||
It is important to note that ripgrep **will never modify your files**. The
|
||||
`--replace` flag only controls ripgrep's output. (And there is no flag to let
|
||||
you do a replacement in a file.)
|
||||
|
||||
|
||||
### Configuration file
|
||||
|
||||
It is possible that ripgrep's default options aren't suitable in every case.
|
||||
For that reason, and because shell aliases aren't always convenient, ripgrep
|
||||
supports configuration files.
|
||||
|
||||
Setting up a configuration file is simple. ripgrep will not look in any
|
||||
predetermined directory for a config file automatically. Instead, you need to
|
||||
set the `RIPGREP_CONFIG_PATH` environment variable to the file path of your
|
||||
config file. Once the environment variable is set, open the file and just type
|
||||
in the flags you want set automatically. There are only two rules for
|
||||
describing the format of the config file:
|
||||
|
||||
1. Every line is a shell argument, after trimming whitespace.
|
||||
2. Lines starting with `#` (optionally preceded by any amount of whitespace)
|
||||
are ignored.
|
||||
|
||||
In particular, there is no escaping. Each line is given to ripgrep as a single
|
||||
command line argument verbatim.
|
||||
|
||||
Here's an example of a configuration file, which demonstrates some of the
|
||||
formatting peculiarities:
|
||||
|
||||
```
|
||||
$ cat $HOME/.ripgreprc
|
||||
# Don't let ripgrep vomit really long lines to my terminal, and show a preview.
|
||||
--max-columns=150
|
||||
--max-columns-preview
|
||||
|
||||
# Add my 'web' type.
|
||||
--type-add
|
||||
web:*.{html,css,js}*
|
||||
|
||||
# Using glob patterns to include/exclude files or folders
|
||||
--glob=!git/*
|
||||
|
||||
# or
|
||||
--glob
|
||||
!git/*
|
||||
|
||||
# Set the colors.
|
||||
--colors=line:none
|
||||
--colors=line:style:bold
|
||||
|
||||
# Because who cares about case!?
|
||||
--smart-case
|
||||
```
|
||||
|
||||
When we use a flag that has a value, we either put the flag and the value on
|
||||
the same line but delimited by an `=` sign (e.g., `--max-columns=150`), or we
|
||||
put the flag and the value on two different lines. This is because ripgrep's
|
||||
argument parser knows to treat the single argument `--max-columns=150` as a
|
||||
flag with a value, but if we had written `--max-columns 150` in our
|
||||
configuration file, then ripgrep's argument parser wouldn't know what to do
|
||||
with it.
|
||||
|
||||
Putting the flag and value on different lines is exactly equivalent and is a
|
||||
matter of style.
|
||||
|
||||
Comments are encouraged so that you remember what the config is doing. Empty
|
||||
lines are OK too.
|
||||
|
||||
So let's say you're using the above configuration file, but while you're at a
|
||||
terminal, you really want to be able to see lines longer than 150 columns. What
|
||||
do you do? Thankfully, all you need to do is pass `--max-columns 0` (or `-M0`
|
||||
for short) on the command line, which will override your configuration file's
|
||||
setting. This works because ripgrep's configuration file is *prepended* to the
|
||||
explicit arguments you give it on the command line. Since flags given later
|
||||
override flags given earlier, everything works as expected. This works for most
|
||||
other flags as well, and each flag's documentation states which other flags
|
||||
override it.
|
||||
|
||||
If you're confused about what configuration file ripgrep is reading arguments
|
||||
from, then running ripgrep with the `--debug` flag should help clarify things.
|
||||
The debug output should note what config file is being loaded and the arguments
|
||||
that have been read from the configuration.
|
||||
|
||||
Finally, if you want to make absolutely sure that ripgrep *isn't* reading a
|
||||
configuration file, then you can pass the `--no-config` flag, which will always
|
||||
prevent ripgrep from reading extraneous configuration from the environment,
|
||||
regardless of what other methods of configuration are added to ripgrep in the
|
||||
future.
|
||||
|
||||
|
||||
### File encoding
|
||||
|
||||
[Text encoding](https://en.wikipedia.org/wiki/Character_encoding) is a complex
|
||||
topic, but we can try to summarize its relevancy to ripgrep:
|
||||
|
||||
* Files are generally just a bundle of bytes. There is no reliable way to know
|
||||
their encoding.
|
||||
* Either the encoding of the pattern must match the encoding of the files being
|
||||
searched, or a form of transcoding must be performed that converts either the
|
||||
pattern or the file to the same encoding as the other.
|
||||
* ripgrep tends to work best on plain text files, and among plain text files,
|
||||
the most popular encodings likely consist of ASCII, latin1 or UTF-8. As
|
||||
a special exception, UTF-16 is prevalent in Windows environments
|
||||
|
||||
In light of the above, here is how ripgrep behaves when `--encoding auto` is
|
||||
given, which is the default:
|
||||
|
||||
* All input is assumed to be ASCII compatible (which means every byte that
|
||||
corresponds to an ASCII codepoint actually is an ASCII codepoint). This
|
||||
includes ASCII itself, latin1 and UTF-8.
|
||||
* ripgrep works best with UTF-8. For example, ripgrep's regular expression
|
||||
engine supports Unicode features. Namely, character classes like `\w` will
|
||||
match all word characters by Unicode's definition and `.` will match any
|
||||
Unicode codepoint instead of any byte. These constructions assume UTF-8,
|
||||
so they simply won't match when they come across bytes in a file that aren't
|
||||
UTF-8.
|
||||
* To handle the UTF-16 case, ripgrep will do something called "BOM sniffing"
|
||||
by default. That is, the first three bytes of a file will be read, and if
|
||||
they correspond to a UTF-16 BOM, then ripgrep will transcode the contents of
|
||||
the file from UTF-16 to UTF-8, and then execute the search on the transcoded
|
||||
version of the file. (This incurs a performance penalty since transcoding
|
||||
is slower than regex searching.) If the file contains invalid UTF-16, then
|
||||
the Unicode replacement codepoint is substituted in place of invalid code
|
||||
units.
|
||||
* To handle other cases, ripgrep provides a `-E/--encoding` flag, which permits
|
||||
you to specify an encoding from the
|
||||
[Encoding Standard](https://encoding.spec.whatwg.org/#concept-encoding-get).
|
||||
ripgrep will assume *all* files searched are the encoding specified (unless
|
||||
the file has a BOM) and will perform a transcoding step just like in the
|
||||
UTF-16 case described above.
|
||||
|
||||
By default, ripgrep will not require its input be valid UTF-8. That is, ripgrep
|
||||
can and will search arbitrary bytes. The key here is that if you're searching
|
||||
content that isn't UTF-8, then the usefulness of your pattern will degrade. If
|
||||
you're searching bytes that aren't ASCII compatible, then it's likely the
|
||||
pattern won't find anything. With all that said, this mode of operation is
|
||||
important, because it lets you find ASCII or UTF-8 *within* files that are
|
||||
otherwise arbitrary bytes.
|
||||
|
||||
As a special case, the `-E/--encoding` flag supports the value `none`, which
|
||||
will completely disable all encoding related logic, including BOM sniffing.
|
||||
When `-E/--encoding` is set to `none`, ripgrep will search the raw bytes of
|
||||
the underlying file with no transcoding step. For example, here's how you might
|
||||
search the raw UTF-16 encoding of the string `Шерлок`:
|
||||
|
||||
```
|
||||
$ rg '(?-u)\(\x045\x04@\x04;\x04>\x04:\x04' -E none -a some-utf16-file
|
||||
```
|
||||
|
||||
Of course, that's just an example meant to show how one can drop down into
|
||||
raw bytes. Namely, the simpler command works as you might expect automatically:
|
||||
|
||||
```
|
||||
$ rg 'Шерлок' some-utf16-file
|
||||
```
|
||||
|
||||
Finally, it is possible to disable ripgrep's Unicode support from within the
|
||||
regular expression. For example, let's say you wanted `.` to match any byte
|
||||
rather than any Unicode codepoint. (You might want this while searching a
|
||||
binary file, since `.` by default will not match invalid UTF-8.) You could do
|
||||
this by disabling Unicode via a regular expression flag:
|
||||
|
||||
```
|
||||
$ rg '(?-u:.)'
|
||||
```
|
||||
|
||||
This works for any part of the pattern. For example, the following will find
|
||||
any Unicode word character followed by any ASCII word character followed by
|
||||
another Unicode word character:
|
||||
|
||||
```
|
||||
$ rg '\w(?-u:\w)\w'
|
||||
```
|
||||
|
||||
|
||||
### Binary data
|
||||
|
||||
In addition to skipping hidden files and files in your `.gitignore` by default,
|
||||
ripgrep also attempts to skip binary files. ripgrep does this by default
|
||||
because binary files (like PDFs or images) are typically not things you want to
|
||||
search when searching for regex matches. Moreover, if content in a binary file
|
||||
did match, then it's possible for undesirable binary data to be printed to your
|
||||
terminal and wreak havoc.
|
||||
|
||||
Unfortunately, unlike skipping hidden files and respecting your `.gitignore`
|
||||
rules, a file cannot as easily be classified as binary. In order to figure out
|
||||
whether a file is binary, the most effective heuristic that balances
|
||||
correctness with performance is to simply look for `NUL` bytes. At that point,
|
||||
the determination is simple: a file is considered "binary" if and only if it
|
||||
contains a `NUL` byte somewhere in its contents.
|
||||
|
||||
The issue is that while most binary files will have a `NUL` byte toward the
|
||||
beginning of its contents, this is not necessarily true. The `NUL` byte might
|
||||
be the very last byte in a large file, but that file is still considered
|
||||
binary. While this leads to a fair amount of complexity inside ripgrep's
|
||||
implementation, it also results in some unintuitive user experiences.
|
||||
|
||||
At a high level, ripgrep operates in three different modes with respect to
|
||||
binary files:
|
||||
|
||||
1. The default mode is to attempt to remove binary files from a search
|
||||
completely. This is meant to mirror how ripgrep removes hidden files and
|
||||
files in your `.gitignore` automatically. That is, as soon as a file is
|
||||
detected as binary, searching stops. If a match was already printed (because
|
||||
it was detected long before a `NUL` byte), then ripgrep will print a warning
|
||||
message indicating that the search stopped prematurely. This default mode
|
||||
**only applies to files searched by ripgrep as a result of recursive
|
||||
directory traversal**, which is consistent with ripgrep's other automatic
|
||||
filtering. For example, `rg foo .file` will search `.file` even though it
|
||||
is hidden. Similarly, `rg foo binary-file` will search `binary-file` in
|
||||
"binary" mode automatically.
|
||||
2. Binary mode is similar to the default mode, except it will not always
|
||||
stop searching after it sees a `NUL` byte. Namely, in this mode, ripgrep
|
||||
will continue searching a file that is known to be binary until the first
|
||||
of two conditions is met: 1) the end of the file has been reached or 2) a
|
||||
match is or has been seen. This means that in binary mode, if ripgrep
|
||||
reports no matches, then there are no matches in the file. When a match does
|
||||
occur, ripgrep prints a message similar to one it prints when in its default
|
||||
mode indicating that the search has stopped prematurely. This mode can be
|
||||
forcefully enabled for all files with the `--binary` flag. The purpose of
|
||||
binary mode is to provide a way to discover matches in all files, but to
|
||||
avoid having binary data dumped into your terminal.
|
||||
3. Text mode completely disables all binary detection and searches all files
|
||||
as if they were text. This is useful when searching a file that is
|
||||
predominantly text but contains a `NUL` byte, or if you are specifically
|
||||
trying to search binary data. This mode can be enabled with the `-a/--text`
|
||||
flag. Note that when using this mode on very large binary files, it is
|
||||
possible for ripgrep to use a lot of memory.
|
||||
|
||||
Unfortunately, there is one additional complexity in ripgrep that can make it
|
||||
difficult to reason about binary files. That is, the way binary detection works
|
||||
depends on the way that ripgrep searches your files. Specifically:
|
||||
|
||||
* When ripgrep uses memory maps, then binary detection is only performed on the
|
||||
first few kilobytes of the file in addition to every matching line.
|
||||
* When ripgrep doesn't use memory maps, then binary detection is performed on
|
||||
all bytes searched.
|
||||
|
||||
This means that whether a file is detected as binary or not can change based
|
||||
on the internal search strategy used by ripgrep. If you prefer to keep
|
||||
ripgrep's binary file detection consistent, then you can disable memory maps
|
||||
via the `--no-mmap` flag. (The cost will be a small performance regression when
|
||||
searching very large files on some platforms.)
|
||||
|
||||
|
||||
### Common options
|
||||
|
||||
ripgrep has a lot of flags. Too many to keep in your head at once. This section
|
||||
is intended to give you a sampling of some of the most important and frequently
|
||||
used options that will likely impact how you use ripgrep on a regular basis.
|
||||
|
||||
* `-h`: Show ripgrep's condensed help output.
|
||||
* `--help`: Show ripgrep's longer form help output. (Nearly what you'd find in
|
||||
ripgrep's man page, so pipe it into a pager!)
|
||||
* `-i/--ignore-case`: When searching for a pattern, ignore case differences.
|
||||
That is `rg -i fast` matches `fast`, `fASt`, `FAST`, etc.
|
||||
* `-S/--smart-case`: This is similar to `--ignore-case`, but disables itself
|
||||
if the pattern contains any uppercase letters. Usually this flag is put into
|
||||
alias or a config file.
|
||||
* `-w/--word-regexp`: Require that all matches of the pattern be surrounded
|
||||
by word boundaries. That is, given `pattern`, the `--word-regexp` flag will
|
||||
cause ripgrep to behave as if `pattern` were actually `\b(?:pattern)\b`.
|
||||
* `-c/--count`: Report a count of total matched lines.
|
||||
* `--files`: Print the files that ripgrep *would* search, but don't actually
|
||||
search them.
|
||||
* `-a/--text`: Search binary files as if they were plain text.
|
||||
* `-z/--search-zip`: Search compressed files (gzip, bzip2, lzma, xz, lz4,
|
||||
brotli, zstd). This is disabled by default.
|
||||
* `-C/--context`: Show the lines surrounding a match.
|
||||
* `--sort path`: Force ripgrep to sort its output by file name. (This disables
|
||||
parallelism, so it might be slower.)
|
||||
* `-L/--follow`: Follow symbolic links while recursively searching.
|
||||
* `-M/--max-columns`: Limit the length of lines printed by ripgrep.
|
||||
* `--debug`: Shows ripgrep's debug output. This is useful for understanding
|
||||
why a particular file might be ignored from search, or what kinds of
|
||||
configuration ripgrep is loading from the environment.
|
||||
53
ISSUE_TEMPLATE.md
Normal file
53
ISSUE_TEMPLATE.md
Normal file
@@ -0,0 +1,53 @@
|
||||
#### What version of ripgrep are you using?
|
||||
|
||||
Replace this text with the output of `rg --version`.
|
||||
|
||||
#### How did you install ripgrep?
|
||||
|
||||
If you installed ripgrep with snap and are getting strange file permission or
|
||||
file not found errors, then please do not file a bug. Instead, use one of the
|
||||
Github binary releases.
|
||||
|
||||
#### What operating system are you using ripgrep on?
|
||||
|
||||
Replace this text with your operating system and version.
|
||||
|
||||
#### Describe your question, feature request, or bug.
|
||||
|
||||
If a question, please describe the problem you're trying to solve and give
|
||||
as much context as possible.
|
||||
|
||||
If a feature request, please describe the behavior you want and the motivation.
|
||||
Please also provide an example of how ripgrep would be used if your feature
|
||||
request were added.
|
||||
|
||||
If a bug, please see below.
|
||||
|
||||
#### If this is a bug, what are the steps to reproduce the behavior?
|
||||
|
||||
If possible, please include both your search patterns and the corpus on which
|
||||
you are searching. Unless the bug is very obvious, then it is unlikely that it
|
||||
will be fixed if the ripgrep maintainers cannot reproduce it.
|
||||
|
||||
If the corpus is too big and you cannot decrease its size, file the bug anyway
|
||||
and the ripgrep maintainers will help figure out next steps.
|
||||
|
||||
#### If this is a bug, what is the actual behavior?
|
||||
|
||||
Show the command you ran and the actual output. Include the `--debug` flag in
|
||||
your invocation of ripgrep.
|
||||
|
||||
If the output is large, put it in a gist: https://gist.github.com/
|
||||
|
||||
If the output is small, put it in code fences:
|
||||
|
||||
```
|
||||
your
|
||||
output
|
||||
goes
|
||||
here
|
||||
```
|
||||
|
||||
#### If this is a bug, what is the expected behavior?
|
||||
|
||||
What do you think ripgrep should have done?
|
||||
546
README.md
546
README.md
@@ -1,350 +1,361 @@
|
||||
ripgrep (rg)
|
||||
------------
|
||||
`ripgrep` is a line oriented search tool that combines the usability of The
|
||||
Silver Searcher (similar to `ack`) with the raw speed of GNU grep. `ripgrep`
|
||||
works by recursively searching your current directory for a regex pattern.
|
||||
`ripgrep` has first class support on Windows, Mac and Linux, with binary
|
||||
downloads available for
|
||||
[every release](https://github.com/BurntSushi/ripgrep/releases).
|
||||
ripgrep is a line-oriented search tool that recursively searches your current
|
||||
directory for a regex pattern. By default, ripgrep will respect your .gitignore
|
||||
and automatically skip hidden files/directories and binary files. ripgrep
|
||||
has first class support on Windows, macOS and Linux, with binary downloads
|
||||
available for [every release](https://github.com/BurntSushi/ripgrep/releases).
|
||||
ripgrep is similar to other popular search tools like The Silver Searcher, ack
|
||||
and grep.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://crates.io/crates/ripgrep)
|
||||
[](https://github.com/BurntSushi/ripgrep/actions)
|
||||
[](https://crates.io/crates/ripgrep)
|
||||
[](https://repology.org/project/ripgrep/badges)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org).
|
||||
|
||||
|
||||
### CHANGELOG
|
||||
|
||||
Please see the [CHANGELOG](CHANGELOG.md) for a release history.
|
||||
|
||||
### Documentation quick links
|
||||
|
||||
* [Installation](#installation)
|
||||
* [User Guide](GUIDE.md)
|
||||
* [Frequently Asked Questions](FAQ.md)
|
||||
* [Regex syntax](https://docs.rs/regex/1/regex/#syntax)
|
||||
* [Configuration files](GUIDE.md#configuration-file)
|
||||
* [Shell completions](FAQ.md#complete)
|
||||
* [Building](#building)
|
||||
* [Translations](#translations)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
|
||||
### Screenshot of search results
|
||||
|
||||
[](http://burntsushi.net/stuff/ripgrep1.png)
|
||||
[](https://burntsushi.net/stuff/ripgrep1.png)
|
||||
|
||||
|
||||
### Quick examples comparing tools
|
||||
|
||||
This example searches the entire Linux kernel source tree (after running
|
||||
`make defconfig && make -j8`) for `[A-Z]+_SUSPEND`, where all matches must be
|
||||
words. Timings were collected on a system with an Intel i7-6900K 3.2 GHz, and
|
||||
ripgrep was compiled using the `compile` script in this repo.
|
||||
This example searches the entire
|
||||
[Linux kernel source tree](https://github.com/BurntSushi/linux)
|
||||
(after running `make defconfig && make -j8`) for `[A-Z]+_SUSPEND`, where
|
||||
all matches must be words. Timings were collected on a system with an Intel
|
||||
i7-6900K 3.2 GHz.
|
||||
|
||||
Please remember that a single benchmark is never enough! See my
|
||||
[blog post on `ripgrep`](http://blog.burntsushi.net/ripgrep/)
|
||||
[blog post on ripgrep](https://blog.burntsushi.net/ripgrep/)
|
||||
for a very detailed comparison with more benchmarks and analysis.
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep (Unicode) | `rg -n -w '[A-Z]+_SUSPEND'` | 450 | **0.134s** |
|
||||
| [The Silver Searcher](https://github.com/ggreer/the_silver_searcher) | `ag -w '[A-Z]+_SUSPEND'` | 450 | 0.753s |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=C git grep -E -n -w '[A-Z]+_SUSPEND'` | 450 | 0.823s |
|
||||
| [git grep (Unicode)](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=en_US.UTF-8 git grep -E -n -w '[A-Z]+_SUSPEND'` | 450 | 2.880s |
|
||||
| [sift](https://github.com/svent/sift) | `sift --git -n -w '[A-Z]+_SUSPEND'` | 450 | 3.656s |
|
||||
| [The Platinum Searcher](https://github.com/monochromegane/the_platinum_searcher) | `pt -w -e '[A-Z]+_SUSPEND'` | 450 | 12.369s |
|
||||
| [ack](https://github.com/petdance/ack2) | `ack -w '[A-Z]+_SUSPEND'` | 1878 | 16.952s |
|
||||
| ripgrep (Unicode) | `rg -n -w '[A-Z]+_SUSPEND'` | 452 | **0.136s** |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `git grep -P -n -w '[A-Z]+_SUSPEND'` | 452 | 0.348s |
|
||||
| [ugrep (Unicode)](https://github.com/Genivia/ugrep) | `ugrep -r --ignore-files --no-hidden -I -w '[A-Z]+_SUSPEND'` | 452 | 0.506s |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=C git grep -E -n -w '[A-Z]+_SUSPEND'` | 452 | 1.150s |
|
||||
| [The Silver Searcher](https://github.com/ggreer/the_silver_searcher) | `ag -w '[A-Z]+_SUSPEND'` | 452 | 0.654s |
|
||||
| [ack](https://github.com/beyondgrep/ack3) | `ack -w '[A-Z]+_SUSPEND'` | 452 | 4.054s |
|
||||
| [git grep (Unicode)](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=en_US.UTF-8 git grep -E -n -w '[A-Z]+_SUSPEND'` | 452 | 4.205s |
|
||||
|
||||
(Yes, `ack` [has](https://github.com/petdance/ack2/issues/445) a
|
||||
[bug](https://github.com/petdance/ack2/issues/14).)
|
||||
|
||||
Here's another benchmark that disregards gitignore files and searches with a
|
||||
whitelist instead. The corpus is the same as in the previous benchmark, and the
|
||||
flags passed to each command ensures that they are doing equivalent work:
|
||||
Here's another benchmark on the same corpus as above that disregards gitignore
|
||||
files and searches with a whitelist instead. The corpus is the same as in the
|
||||
previous benchmark, and the flags passed to each command ensure that they are
|
||||
doing equivalent work:
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep | `rg -L -u -tc -n -w '[A-Z]+_SUSPEND'` | 404 | **0.108s** |
|
||||
| [ucg](https://github.com/gvansickle/ucg) | `ucg --type=cc -w '[A-Z]+_SUSPEND'` | 392 | 0.219s |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `egrep -R -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 404 | 0.733s |
|
||||
| ripgrep | `rg -uuu -tc -n -w '[A-Z]+_SUSPEND'` | 388 | **0.096s** |
|
||||
| [ugrep](https://github.com/Genivia/ugrep) | `ugrep -r -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 388 | 0.493s |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `egrep -r -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 388 | 0.806s |
|
||||
|
||||
(`ucg` [has slightly different behavior in the presence of symbolic links](https://github.com/gvansickle/ucg/issues/106).)
|
||||
|
||||
And finally, a straight up comparison between ripgrep and GNU grep on a single
|
||||
large file (~9.3GB,
|
||||
[`OpenSubtitles2016.raw.en.gz`](http://opus.lingfil.uu.se/OpenSubtitles2016/mono/OpenSubtitles2016.raw.en.gz)):
|
||||
And finally, a straight-up comparison between ripgrep, ugrep and GNU grep on a
|
||||
single large file cached in memory
|
||||
(~13GB, [`OpenSubtitles.raw.en.gz`](http://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/mono/OpenSubtitles.raw.en.gz)):
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep | `rg -w 'Sherlock [A-Z]\w+'` | 5268 | **2.520s** |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `LC_ALL=C egrep -w 'Sherlock [A-Z]\w+'` | 5268 | 7.143s |
|
||||
| ripgrep | `rg -w 'Sherlock [A-Z]\w+'` | 7882 | **2.769s** |
|
||||
| [ugrep](https://github.com/Genivia/ugrep) | `ugrep -w 'Sherlock [A-Z]\w+'` | 7882 | 6.802s |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `LC_ALL=en_US.UTF-8 egrep -w 'Sherlock [A-Z]\w+'` | 7882 | 9.027s |
|
||||
|
||||
In the above benchmark, passing the `-n` flag (for showing line numbers)
|
||||
increases the times to `3.081s` for ripgrep and `11.403s` for GNU grep.
|
||||
increases the times to `3.423s` for ripgrep and `13.031s` for GNU grep. ugrep
|
||||
times are unaffected by the presence or absence of `-n`.
|
||||
|
||||
### Why should I use `ripgrep`?
|
||||
|
||||
* It can replace both The Silver Searcher and GNU grep because it is faster
|
||||
than both. (N.B. It is not, strictly speaking, a "drop-in" replacement for
|
||||
both, but the feature sets are far more similar than different.)
|
||||
* Like The Silver Searcher, `ripgrep` defaults to recursive directory search
|
||||
and won't search files ignored by your `.gitignore` files. It also ignores
|
||||
hidden and binary files by default. `ripgrep` also implements full support
|
||||
for `.gitignore`, where as there are many bugs related to that functionality
|
||||
in The Silver Searcher.
|
||||
* `ripgrep` can search specific types of files. For example, `rg -tpy foo`
|
||||
### Why should I use ripgrep?
|
||||
|
||||
* It can replace many use cases served by other search tools
|
||||
because it contains most of their features and is generally faster. (See
|
||||
[the FAQ](FAQ.md#posix4ever) for more details on whether ripgrep can truly
|
||||
replace grep.)
|
||||
* Like other tools specialized to code search, ripgrep defaults to recursive
|
||||
directory search and won't search files ignored by your
|
||||
`.gitignore`/`.ignore`/`.rgignore` files. It also ignores hidden and binary
|
||||
files by default. ripgrep also implements full support for `.gitignore`,
|
||||
whereas there are many bugs related to that functionality in other code
|
||||
search tools claiming to provide the same functionality.
|
||||
* ripgrep can search specific types of files. For example, `rg -tpy foo`
|
||||
limits your search to Python files and `rg -Tjs foo` excludes Javascript
|
||||
files from your search. `ripgrep` can be taught about new file types with
|
||||
files from your search. ripgrep can be taught about new file types with
|
||||
custom matching rules.
|
||||
* `ripgrep` supports many features found in `grep`, such as showing the context
|
||||
* ripgrep supports many features found in `grep`, such as showing the context
|
||||
of search results, searching multiple patterns, highlighting matches with
|
||||
color and full Unicode support. Unlike GNU grep, `ripgrep` stays fast while
|
||||
color and full Unicode support. Unlike GNU grep, ripgrep stays fast while
|
||||
supporting Unicode (which is always on).
|
||||
* `ripgrep` supports searching files in text encodings other than UTF-8, such
|
||||
* ripgrep has optional support for switching its regex engine to use PCRE2.
|
||||
Among other things, this makes it possible to use look-around and
|
||||
backreferences in your patterns, which are not supported in ripgrep's default
|
||||
regex engine. PCRE2 support can be enabled with `-P/--pcre2` (use PCRE2
|
||||
always) or `--auto-hybrid-regex` (use PCRE2 only if needed). An alternative
|
||||
syntax is provided via the `--engine (default|pcre2|auto-hybrid)` option.
|
||||
* ripgrep supports searching files in text encodings other than UTF-8, such
|
||||
as UTF-16, latin-1, GBK, EUC-JP, Shift_JIS and more. (Some support for
|
||||
automatically detecting UTF-16 is provided. Other text encodings must be
|
||||
specifically specified with the `-E/--encoding` flag.)
|
||||
* ripgrep supports searching files compressed in a common format (brotli,
|
||||
bzip2, gzip, lz4, lzma, xz, or zstandard) with the `-z/--search-zip` flag.
|
||||
* ripgrep supports arbitrary input preprocessing filters which could be PDF
|
||||
text extraction, less supported decompression, decrypting, automatic encoding
|
||||
detection and so on.
|
||||
|
||||
In other words, use `ripgrep` if you like speed, filtering by default, fewer
|
||||
In other words, use ripgrep if you like speed, filtering by default, fewer
|
||||
bugs and Unicode support.
|
||||
|
||||
### Why shouldn't I use `ripgrep`?
|
||||
|
||||
I'd like to try to convince you why you *shouldn't* use `ripgrep`. This should
|
||||
give you a glimpse at some important downsides or missing features of
|
||||
`ripgrep`.
|
||||
### Why shouldn't I use ripgrep?
|
||||
|
||||
* `ripgrep` uses a regex engine based on finite automata, so if you want fancy
|
||||
regex features such as backreferences or look around, `ripgrep` won't give
|
||||
them to you. `ripgrep` does support lots of things though, including, but not
|
||||
limited to: lazy quantification (e.g., `a+?`), repetitions (e.g., `a{2,5}`),
|
||||
begin/end assertions (e.g., `^\w+$`), word boundaries (e.g., `\bfoo\b`), and
|
||||
support for Unicode categories (e.g., `\p{Sc}` to match currency symbols or
|
||||
`\p{Lu}` to match any uppercase letter). (Fancier regexes will never be
|
||||
supported.)
|
||||
* `ripgrep` doesn't yet support searching compressed files. (Likely to be
|
||||
supported in the future.)
|
||||
* `ripgrep` doesn't have multiline search. (Unlikely to ever be supported.)
|
||||
Despite initially not wanting to add every feature under the sun to ripgrep,
|
||||
over time, ripgrep has grown support for most features found in other file
|
||||
searching tools. This includes searching for results spanning across multiple
|
||||
lines, and opt-in support for PCRE2, which provides look-around and
|
||||
backreference support.
|
||||
|
||||
At this point, the primary reasons not to use ripgrep probably consist of one
|
||||
or more of the following:
|
||||
|
||||
* You need a portable and ubiquitous tool. While ripgrep works on Windows,
|
||||
macOS and Linux, it is not ubiquitous and it does not conform to any
|
||||
standard such as POSIX. The best tool for this job is good old grep.
|
||||
* There still exists some other feature (or bug) not listed in this README that
|
||||
you rely on that's in another tool that isn't in ripgrep.
|
||||
* There is a performance edge case where ripgrep doesn't do well where another
|
||||
tool does do well. (Please file a bug report!)
|
||||
* ripgrep isn't possible to install on your machine or isn't available for your
|
||||
platform. (Please file a bug report!)
|
||||
|
||||
In other words, if you like fancy regexes, searching compressed files or
|
||||
multiline search, then `ripgrep` may not quite meet your needs (yet).
|
||||
|
||||
### Is it really faster than everything else?
|
||||
|
||||
Yes. A large number of benchmarks with detailed analysis for each is
|
||||
[available on my blog](http://blog.burntsushi.net/ripgrep/).
|
||||
Generally, yes. A large number of benchmarks with detailed analysis for each is
|
||||
[available on my blog](https://blog.burntsushi.net/ripgrep/).
|
||||
|
||||
Summarizing, `ripgrep` is fast because:
|
||||
Summarizing, ripgrep is fast because:
|
||||
|
||||
* It is built on top of
|
||||
[Rust's regex engine](https://github.com/rust-lang-nursery/regex).
|
||||
[Rust's regex engine](https://github.com/rust-lang/regex).
|
||||
Rust's regex engine uses finite automata, SIMD and aggressive literal
|
||||
optimizations to make searching very fast.
|
||||
optimizations to make searching very fast. (PCRE2 support can be opted into
|
||||
with the `-P/--pcre2` flag.)
|
||||
* Rust's regex library maintains performance with full Unicode support by
|
||||
building UTF-8 decoding directly into its deterministic finite automaton
|
||||
engine.
|
||||
* It supports searching with either memory maps or by searching incrementally
|
||||
with an intermediate buffer. The former is better for single files and the
|
||||
latter is better for large directories. `ripgrep` chooses the best searching
|
||||
latter is better for large directories. ripgrep chooses the best searching
|
||||
strategy for you automatically.
|
||||
* Applies your ignore patterns in `.gitignore` files using a
|
||||
[`RegexSet`](https://doc.rust-lang.org/regex/regex/struct.RegexSet.html).
|
||||
[`RegexSet`](https://docs.rs/regex/1/regex/struct.RegexSet.html).
|
||||
That means a single file path can be matched against multiple glob patterns
|
||||
simultaneously.
|
||||
* It uses a lock-free parallel recursive directory iterator, courtesy of
|
||||
[`crossbeam`](https://docs.rs/crossbeam) and
|
||||
[`ignore`](https://docs.rs/ignore).
|
||||
|
||||
|
||||
### Feature comparison
|
||||
|
||||
Andy Lester, author of [ack](https://beyondgrep.com/), has published an
|
||||
excellent table comparing the features of ack, ag, git-grep, GNU grep and
|
||||
ripgrep: https://beyondgrep.com/feature-comparison/
|
||||
|
||||
Note that ripgrep has grown a few significant new features recently that
|
||||
are not yet present in Andy's table. This includes, but is not limited to,
|
||||
configuration files, passthru, support for searching compressed files,
|
||||
multiline search and opt-in fancy regex support via PCRE2.
|
||||
|
||||
|
||||
### Installation
|
||||
|
||||
The binary name for `ripgrep` is `rg`.
|
||||
The binary name for ripgrep is `rg`.
|
||||
|
||||
[Binaries for `ripgrep` are available for Windows, Mac and
|
||||
Linux.](https://github.com/BurntSushi/ripgrep/releases) Linux binaries are
|
||||
static executables. Windows binaries are available either as built with MinGW
|
||||
(GNU) or with Microsoft Visual C++ (MSVC). When possible, prefer MSVC over GNU,
|
||||
but you'll need to have the
|
||||
[Microsoft VC++ 2015 redistributable](https://www.microsoft.com/en-us/download/details.aspx?id=48145)
|
||||
**[Archives of precompiled binaries for ripgrep are available for Windows,
|
||||
macOS and Linux.](https://github.com/BurntSushi/ripgrep/releases)** Users of
|
||||
platforms not explicitly mentioned below are advised to download one of these
|
||||
archives.
|
||||
|
||||
Linux binaries are static executables. Windows binaries are available either as
|
||||
built with MinGW (GNU) or with Microsoft Visual C++ (MSVC). When possible,
|
||||
prefer MSVC over GNU, but you'll need to have the [Microsoft VC++ 2015
|
||||
redistributable](https://www.microsoft.com/en-us/download/details.aspx?id=48145)
|
||||
installed.
|
||||
|
||||
If you're a **Mac OS X Homebrew** user, then you can install ripgrep either
|
||||
from homebrew-core, (compiled with rust stable, no SIMD):
|
||||
If you're a **macOS Homebrew** or a **Linuxbrew** user, then you can install
|
||||
ripgrep from homebrew-core:
|
||||
|
||||
```
|
||||
$ brew install ripgrep
|
||||
```
|
||||
|
||||
or you can install a binary compiled with rust nightly (including SIMD and all
|
||||
optimizations) by utilizing a custom tap:
|
||||
If you're a **MacPorts** user, then you can install ripgrep from the
|
||||
[official ports](https://www.macports.org/ports.php?by=name&substr=ripgrep):
|
||||
|
||||
```
|
||||
$ brew tap burntsushi/ripgrep https://github.com/BurntSushi/ripgrep.git
|
||||
$ brew install burntsushi/ripgrep/ripgrep-bin
|
||||
$ sudo port install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Windows Chocolatey** user, then you can install `ripgrep` from the [official repo](https://chocolatey.org/packages/ripgrep):
|
||||
If you're a **Windows Chocolatey** user, then you can install ripgrep from the
|
||||
[official repo](https://chocolatey.org/packages/ripgrep):
|
||||
|
||||
```
|
||||
$ choco install ripgrep
|
||||
```
|
||||
|
||||
If you're an **Arch Linux** user, then you can install `ripgrep` from the official repos:
|
||||
If you're a **Windows Scoop** user, then you can install ripgrep from the
|
||||
[official bucket](https://github.com/ScoopInstaller/Main/blob/master/bucket/ripgrep.json):
|
||||
|
||||
```
|
||||
$ scoop install ripgrep
|
||||
```
|
||||
|
||||
If you're an **Arch Linux** user, then you can install ripgrep from the official repos:
|
||||
|
||||
```
|
||||
$ pacman -S ripgrep
|
||||
```
|
||||
|
||||
If you're a **Gentoo** user, you can install `ripgrep` from the [official repo](https://packages.gentoo.org/packages/sys-apps/ripgrep):
|
||||
If you're a **Gentoo** user, you can install ripgrep from the
|
||||
[official repo](https://packages.gentoo.org/packages/sys-apps/ripgrep):
|
||||
|
||||
```
|
||||
$ emerge ripgrep
|
||||
$ emerge sys-apps/ripgrep
|
||||
```
|
||||
|
||||
If you're a **Fedora 24+** user, you can install `ripgrep` from [copr](https://copr.fedorainfracloud.org/coprs/carlgeorge/ripgrep/):
|
||||
If you're a **Fedora** user, you can install ripgrep from official
|
||||
repositories.
|
||||
|
||||
```
|
||||
$ dnf copr enable carlgeorge/ripgrep
|
||||
$ dnf install ripgrep
|
||||
$ sudo dnf install ripgrep
|
||||
```
|
||||
|
||||
If you're a **RHEL/CentOS 7** user, you can install `ripgrep` from [copr](https://copr.fedorainfracloud.org/coprs/carlgeorge/ripgrep/):
|
||||
If you're an **openSUSE** user, ripgrep is included in **openSUSE Tumbleweed**
|
||||
and **openSUSE Leap** since 15.1.
|
||||
|
||||
```
|
||||
$ yum-config-manager --add-repo=https://copr.fedorainfracloud.org/coprs/carlgeorge/ripgrep/repo/epel-7/carlgeorge-ripgrep-epel-7.repo
|
||||
$ yum install ripgrep
|
||||
$ sudo zypper install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Nix** user, you can install `ripgrep` from
|
||||
If you're a **RHEL/CentOS 7/8** user, you can install ripgrep from
|
||||
[copr](https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/):
|
||||
|
||||
```
|
||||
$ sudo yum-config-manager --add-repo=https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/repo/epel-7/carlwgeorge-ripgrep-epel-7.repo
|
||||
$ sudo yum install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Nix** user, you can install ripgrep from
|
||||
[nixpkgs](https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/text/ripgrep/default.nix):
|
||||
|
||||
```
|
||||
$ nix-env --install ripgrep
|
||||
$ # (Or using the attribute name, which is also `ripgrep`.)
|
||||
$ # (Or using the attribute name, which is also ripgrep.)
|
||||
```
|
||||
|
||||
If you're a **Rust programmer**, `ripgrep` can be installed with `cargo`. Note
|
||||
that this requires you to have **Rust 1.12 or newer** installed.
|
||||
If you're a **Debian** user (or a user of a Debian derivative like **Ubuntu**),
|
||||
then ripgrep can be installed using a binary `.deb` file provided in each
|
||||
[ripgrep release](https://github.com/BurntSushi/ripgrep/releases).
|
||||
|
||||
```
|
||||
$ curl -LO https://github.com/BurntSushi/ripgrep/releases/download/11.0.2/ripgrep_11.0.2_amd64.deb
|
||||
$ sudo dpkg -i ripgrep_11.0.2_amd64.deb
|
||||
```
|
||||
|
||||
If you run Debian Buster (currently Debian stable) or Debian sid, ripgrep is
|
||||
[officially maintained by Debian](https://tracker.debian.org/pkg/rust-ripgrep).
|
||||
```
|
||||
$ sudo apt-get install ripgrep
|
||||
```
|
||||
|
||||
If you're an **Ubuntu Cosmic (18.10)** (or newer) user, ripgrep is
|
||||
[available](https://launchpad.net/ubuntu/+source/rust-ripgrep) using the same
|
||||
packaging as Debian:
|
||||
|
||||
```
|
||||
$ sudo apt-get install ripgrep
|
||||
```
|
||||
|
||||
(N.B. Various snaps for ripgrep on Ubuntu are also available, but none of them
|
||||
seem to work right and generate a number of very strange bug reports that I
|
||||
don't know how to fix and don't have the time to fix. Therefore, it is no
|
||||
longer a recommended installation option.)
|
||||
|
||||
If you're a **FreeBSD** user, then you can install ripgrep from the
|
||||
[official ports](https://www.freshports.org/textproc/ripgrep/):
|
||||
|
||||
```
|
||||
# pkg install ripgrep
|
||||
```
|
||||
|
||||
If you're an **OpenBSD** user, then you can install ripgrep from the
|
||||
[official ports](http://openports.se/textproc/ripgrep):
|
||||
|
||||
```
|
||||
$ doas pkg_add ripgrep
|
||||
```
|
||||
|
||||
If you're a **NetBSD** user, then you can install ripgrep from
|
||||
[pkgsrc](http://pkgsrc.se/textproc/ripgrep):
|
||||
|
||||
```
|
||||
# pkgin install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Haiku x86_64** user, then you can install ripgrep from the
|
||||
[official ports](https://github.com/haikuports/haikuports/tree/master/sys-apps/ripgrep):
|
||||
|
||||
```
|
||||
$ pkgman install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Haiku x86_gcc2** user, then you can install ripgrep from the
|
||||
same port as Haiku x86_64 using the x86 secondary architecture build:
|
||||
|
||||
```
|
||||
$ pkgman install ripgrep_x86
|
||||
```
|
||||
|
||||
If you're a **Rust programmer**, ripgrep can be installed with `cargo`.
|
||||
|
||||
* Note that the minimum supported version of Rust for ripgrep is **1.34.0**,
|
||||
although ripgrep may work with older versions.
|
||||
* Note that the binary may be bigger than expected because it contains debug
|
||||
symbols. This is intentional. To remove debug symbols and therefore reduce
|
||||
the file size, run `strip` on the binary.
|
||||
|
||||
```
|
||||
$ cargo install ripgrep
|
||||
```
|
||||
|
||||
`ripgrep` isn't currently in any other package repositories.
|
||||
[I'd like to change that](https://github.com/BurntSushi/ripgrep/issues/10).
|
||||
|
||||
### Whirlwind tour
|
||||
|
||||
The command line usage of `ripgrep` doesn't differ much from other tools that
|
||||
perform a similar function, so you probably already know how to use `ripgrep`.
|
||||
The full details can be found in `rg --help`, but let's go on a whirlwind tour.
|
||||
|
||||
`ripgrep` detects when its printing to a terminal, and will automatically
|
||||
colorize your output and show line numbers, just like The Silver Searcher.
|
||||
Coloring works on Windows too! Colors can be controlled more granularly with
|
||||
the `--color` flag.
|
||||
|
||||
One last thing before we get started: `ripgrep` assumes UTF-8 *everywhere*. It
|
||||
can still search files that are invalid UTF-8 (like, say, latin-1), but it will
|
||||
simply not work on UTF-16 encoded files or other more exotic encodings.
|
||||
[Support for other encodings may
|
||||
happen.](https://github.com/BurntSushi/ripgrep/issues/1)
|
||||
|
||||
To recursively search the current directory, while respecting all `.gitignore`
|
||||
files, ignore hidden files and directories and skip binary files:
|
||||
|
||||
```
|
||||
$ rg foobar
|
||||
```
|
||||
|
||||
The above command also respects all `.ignore` files, including in parent
|
||||
directories. `.ignore` files can be used when `.gitignore` files are
|
||||
insufficient. In all cases, `.ignore` patterns take precedence over
|
||||
`.gitignore`.
|
||||
|
||||
To ignore all ignore files, use `-u`. To additionally search hidden files
|
||||
and directories, use `-uu`. To additionally search binary files, use `-uuu`.
|
||||
(In other words, "search everything, dammit!") In particular, `rg -uuu` is
|
||||
similar to `grep -a -r`.
|
||||
|
||||
```
|
||||
$ rg -uu foobar # similar to `grep -r`
|
||||
$ rg -uuu foobar # similar to `grep -a -r`
|
||||
```
|
||||
|
||||
(Tip: If your ignore files aren't being adhered to like you expect, run your
|
||||
search with the `--debug` flag.)
|
||||
|
||||
Make the search case insensitive with `-i`, invert the search with `-v` or
|
||||
show the 2 lines before and after every search result with `-C2`.
|
||||
|
||||
Force all matches to be surrounded by word boundaries with `-w`.
|
||||
|
||||
Search and replace (find first and last names and swap them):
|
||||
|
||||
```
|
||||
$ rg '([A-Z][a-z]+)\s+([A-Z][a-z]+)' --replace '$2, $1'
|
||||
```
|
||||
|
||||
Named groups are supported:
|
||||
|
||||
```
|
||||
$ rg '(?P<first>[A-Z][a-z]+)\s+(?P<last>[A-Z][a-z]+)' --replace '$last, $first'
|
||||
```
|
||||
|
||||
Up the ante with full Unicode support, by matching any uppercase Unicode letter
|
||||
followed by any sequence of lowercase Unicode letters (good luck doing this
|
||||
with other search tools!):
|
||||
|
||||
```
|
||||
$ rg '(\p{Lu}\p{Ll}+)\s+(\p{Lu}\p{Ll}+)' --replace '$2, $1'
|
||||
```
|
||||
|
||||
Search only files matching a particular glob:
|
||||
|
||||
```
|
||||
$ rg foo -g 'README.*'
|
||||
```
|
||||
|
||||
<!--*-->
|
||||
|
||||
Or exclude files matching a particular glob:
|
||||
|
||||
```
|
||||
$ rg foo -g '!*.min.js'
|
||||
```
|
||||
|
||||
Search and return paths matching a particular glob (i.e., `-g` flag in ag/ack):
|
||||
|
||||
```
|
||||
$ rg -g 'doc*' --files
|
||||
```
|
||||
|
||||
Search only HTML and CSS files:
|
||||
|
||||
```
|
||||
$ rg -thtml -tcss foobar
|
||||
```
|
||||
|
||||
Search everything except for Javascript files:
|
||||
|
||||
```
|
||||
$ rg -Tjs foobar
|
||||
```
|
||||
|
||||
To see a list of types supported, run `rg --type-list`. To add a new type, use
|
||||
`--type-add`, which must be accompanied by a pattern for searching (`rg` won't
|
||||
persist your type settings):
|
||||
|
||||
```
|
||||
$ rg --type-add 'foo:*.{foo,foobar}' -tfoo bar
|
||||
```
|
||||
|
||||
The type `foo` will now match any file ending with the `.foo` or `.foobar`
|
||||
extensions.
|
||||
|
||||
### Regex syntax
|
||||
|
||||
The syntax supported is
|
||||
[documented as part of Rust's regex library](https://doc.rust-lang.org/regex/regex/index.html#syntax).
|
||||
|
||||
### Shell completions
|
||||
|
||||
Shell completion files are included in the release tarball for Bash, Fish, Zsh
|
||||
and PowerShell.
|
||||
|
||||
For **bash**, move `rg.bash-completion` to `$XDG_CONFIG_HOME/bash_completion`
|
||||
or `/etc/bash_completion.d/`.
|
||||
|
||||
For **fish**, move `rg.fish` to `$HOME/.config/fish/completions`.
|
||||
|
||||
### Building
|
||||
|
||||
`ripgrep` is written in Rust, so you'll need to grab a
|
||||
ripgrep is written in Rust, so you'll need to grab a
|
||||
[Rust installation](https://www.rust-lang.org/) in order to compile it.
|
||||
`ripgrep` compiles with Rust 1.12 (stable) or newer. Building is easy:
|
||||
ripgrep compiles with Rust 1.34.0 (stable) or newer. In general, ripgrep tracks
|
||||
the latest stable release of the Rust compiler.
|
||||
|
||||
To build ripgrep:
|
||||
|
||||
```
|
||||
$ git clone https://github.com/BurntSushi/ripgrep
|
||||
@@ -354,37 +365,68 @@ $ ./target/release/rg --version
|
||||
0.1.3
|
||||
```
|
||||
|
||||
If you have a Rust nightly compiler, then you can enable optional SIMD
|
||||
acceleration like so:
|
||||
If you have a Rust nightly compiler and a recent Intel CPU, then you can enable
|
||||
additional optional SIMD acceleration like so:
|
||||
|
||||
```
|
||||
RUSTFLAGS="-C target-cpu=native" cargo build --release --features 'simd-accel avx-accel'
|
||||
RUSTFLAGS="-C target-cpu=native" cargo build --release --features 'simd-accel'
|
||||
```
|
||||
|
||||
If your machine doesn't support AVX instructions, then simply remove
|
||||
`avx-accel` from the features list. Similarly for SIMD.
|
||||
The `simd-accel` feature enables SIMD support in certain ripgrep dependencies
|
||||
(responsible for transcoding). They are not necessary to get SIMD optimizations
|
||||
for search; those are enabled automatically. Hopefully, some day, the
|
||||
`simd-accel` feature will similarly become unnecessary. **WARNING:** Currently,
|
||||
enabling this option can increase compilation times dramatically.
|
||||
|
||||
Finally, optional PCRE2 support can be built with ripgrep by enabling the
|
||||
`pcre2` feature:
|
||||
|
||||
```
|
||||
$ cargo build --release --features 'pcre2'
|
||||
```
|
||||
|
||||
(Tip: use `--features 'pcre2 simd-accel'` to also include compile time SIMD
|
||||
optimizations, which will only work with a nightly compiler.)
|
||||
|
||||
Enabling the PCRE2 feature works with a stable Rust compiler and will
|
||||
attempt to automatically find and link with your system's PCRE2 library via
|
||||
`pkg-config`. If one doesn't exist, then ripgrep will build PCRE2 from source
|
||||
using your system's C compiler and then statically link it into the final
|
||||
executable. Static linking can be forced even when there is an available PCRE2
|
||||
system library by either building ripgrep with the MUSL target or by setting
|
||||
`PCRE2_SYS_STATIC=1`.
|
||||
|
||||
ripgrep can be built with the MUSL target on Linux by first installing the MUSL
|
||||
library on your system (consult your friendly neighborhood package manager).
|
||||
Then you just need to add MUSL support to your Rust toolchain and rebuild
|
||||
ripgrep, which yields a fully static executable:
|
||||
|
||||
```
|
||||
$ rustup target add x86_64-unknown-linux-musl
|
||||
$ cargo build --release --target x86_64-unknown-linux-musl
|
||||
```
|
||||
|
||||
Applying the `--features` flag from above works as expected. If you want to
|
||||
build a static executable with MUSL and with PCRE2, then you will need to have
|
||||
`musl-gcc` installed, which might be in a separate package from the actual
|
||||
MUSL library, depending on your Linux distribution.
|
||||
|
||||
|
||||
### Running tests
|
||||
|
||||
`ripgrep` is relatively well tested, including both unit tests and integration
|
||||
ripgrep is relatively well-tested, including both unit tests and integration
|
||||
tests. To run the full test suite, use:
|
||||
|
||||
```
|
||||
$ cargo test
|
||||
$ cargo test --all
|
||||
```
|
||||
|
||||
from the repository root.
|
||||
|
||||
### Known issues
|
||||
|
||||
#### I just hit Ctrl+C in the middle of ripgrep's output and now my terminal's foreground color is wrong!
|
||||
### Translations
|
||||
|
||||
Type in `color` on Windows and `echo -ne "\033[0m"` on Unix to restore your
|
||||
original foreground color.
|
||||
The following is a list of known translations of ripgrep's documentation. These
|
||||
are unofficially maintained and may not be up to date.
|
||||
|
||||
PR [#187](https://github.com/BurntSushi/ripgrep/pull/187) fixed this, and it
|
||||
was later deprecated in
|
||||
[#281](https://github.com/BurntSushi/ripgrep/issues/281). A full explanation is
|
||||
available [here][msys issue explanation].
|
||||
|
||||
[msys issue explanation]: https://github.com/BurntSushi/ripgrep/issues/281#issuecomment-269093893
|
||||
* [Chinese](https://github.com/chinanf-boy/ripgrep-zh#%E6%9B%B4%E6%96%B0-)
|
||||
|
||||
71
appveyor.yml
71
appveyor.yml
@@ -1,71 +0,0 @@
|
||||
environment:
|
||||
global:
|
||||
PROJECT_NAME: ripgrep
|
||||
matrix:
|
||||
- TARGET: i686-pc-windows-gnu
|
||||
CHANNEL: stable
|
||||
- TARGET: i686-pc-windows-msvc
|
||||
CHANNEL: stable
|
||||
- TARGET: x86_64-pc-windows-gnu
|
||||
CHANNEL: stable
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
CHANNEL: stable
|
||||
|
||||
# Install Rust and Cargo
|
||||
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
|
||||
install:
|
||||
- curl -sSf -o rustup-init.exe https://win.rustup.rs/
|
||||
- rustup-init.exe -y --default-host %TARGET%
|
||||
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
|
||||
- if defined MSYS2_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS2_BITS%\bin
|
||||
- rustc -V
|
||||
- cargo -V
|
||||
|
||||
# ???
|
||||
build: false
|
||||
|
||||
# Equivalent to Travis' `script` phase
|
||||
# TODO modify this phase as you see fit
|
||||
test_script:
|
||||
- cargo test --verbose
|
||||
- cargo test --verbose --manifest-path grep/Cargo.toml
|
||||
- cargo test --verbose --manifest-path globset/Cargo.toml
|
||||
- cargo test --verbose --manifest-path ignore/Cargo.toml
|
||||
- cargo test --verbose --manifest-path wincolor/Cargo.toml
|
||||
- cargo test --verbose --manifest-path termcolor/Cargo.toml
|
||||
|
||||
before_deploy:
|
||||
# Generate artifacts for release
|
||||
# TODO(burntsushi): How can we enable SSSE3 on Windows?
|
||||
- cargo build --release
|
||||
- mkdir staging
|
||||
- copy target\release\rg.exe staging
|
||||
- copy target\release\build\ripgrep-*\out\_rg.ps1 staging
|
||||
- cd staging
|
||||
# release zipfile will look like 'rust-everywhere-v1.2.3-x86_64-pc-windows-msvc'
|
||||
- 7z a ../%PROJECT_NAME%-%APPVEYOR_REPO_TAG_NAME%-%TARGET%.zip *
|
||||
- appveyor PushArtifact ../%PROJECT_NAME%-%APPVEYOR_REPO_TAG_NAME%-%TARGET%.zip
|
||||
|
||||
deploy:
|
||||
description: 'Automatically deployed release'
|
||||
# All the zipped artifacts will be deployed
|
||||
artifact: /.*\.zip/
|
||||
auth_token:
|
||||
secure: vv4vBCEosGlyQjaEC1+kraP2P6O4CQSa+Tw50oHWFTGcmuXxaWS0/yEXbxsIRLpw
|
||||
provider: GitHub
|
||||
# deploy when a new tag is pushed and only on the stable channel
|
||||
on:
|
||||
# channel to use to produce the release artifacts
|
||||
# NOTE make sure you only release *once* per target
|
||||
# TODO you may want to pick a different channel
|
||||
CHANNEL: stable
|
||||
appveyor_repo_tag: true
|
||||
|
||||
branches:
|
||||
only:
|
||||
- /\d+\.\d+\.\d+/
|
||||
- master
|
||||
# - appveyor
|
||||
# - /\d+\.\d+\.\d+/
|
||||
# except:
|
||||
# - master
|
||||
@@ -1082,7 +1082,7 @@ def download_subtitles_en(suite_dir):
|
||||
if not os.path.exists(en_path):
|
||||
if not os.path.exists(en_path_gz):
|
||||
run_cmd(['curl', '-LO', SUBTITLES_EN_URL], cwd=subtitle_dir)
|
||||
run_cmd(['gunzip', en_path_gz], cwd=subtitle_dir)
|
||||
run_cmd(['gunzip', en_path_gz])
|
||||
if not os.path.exists(en_path_sample):
|
||||
# Get a sample roughly the same size as the Russian corpus so that
|
||||
# benchmarks finish in a reasonable time.
|
||||
@@ -1109,7 +1109,7 @@ def download_subtitles_ru(suite_dir):
|
||||
if not os.path.exists(ru_path):
|
||||
if not os.path.exists(ru_path_gz):
|
||||
run_cmd(['curl', '-LO', SUBTITLES_RU_URL], cwd=subtitle_dir)
|
||||
run_cmd(['gunzip', ru_path_gz], cwd=subtitle_dir)
|
||||
run_cmd(['gunzip', ru_path_gz])
|
||||
|
||||
|
||||
def has_subtitles_ru(suite_dir):
|
||||
@@ -1184,6 +1184,7 @@ def collect_benchmarks(suite_dir, filter_pat=None,
|
||||
name,
|
||||
' '.join(['--download %s' % n for n in e.missing_names]),
|
||||
))
|
||||
continue
|
||||
except MissingCommands as e:
|
||||
fmt = 'missing commands: %s, skipping benchmark %s ' \
|
||||
'(run with --allow-missing to run incomplete benchmarks)'
|
||||
@@ -1239,7 +1240,7 @@ def main():
|
||||
benchmarks = collect_benchmarks(
|
||||
args.dir, filter_pat=args.bench,
|
||||
allow_missing_commands=args.allow_missing,
|
||||
disabled_cmds=args.disabled.split(','),
|
||||
disabled_cmds=(args.disabled or '').split(','),
|
||||
warmup_iter=args.warmup_iter, bench_iter=args.bench_iter)
|
||||
for b in benchmarks:
|
||||
print(b.name)
|
||||
@@ -1266,7 +1267,7 @@ def main():
|
||||
benchmarks = collect_benchmarks(
|
||||
args.dir, filter_pat=args.bench,
|
||||
allow_missing_commands=args.allow_missing,
|
||||
disabled_cmds=args.disabled.split(','),
|
||||
disabled_cmds=(args.disabled or '').split(','),
|
||||
warmup_iter=args.warmup_iter, bench_iter=args.bench_iter)
|
||||
for i, b in enumerate(benchmarks):
|
||||
result = b.run()
|
||||
|
||||
59
benchsuite/runs/2018-01-08-archlinux-cheetah/README
Normal file
59
benchsuite/runs/2018-01-08-archlinux-cheetah/README
Normal file
@@ -0,0 +1,59 @@
|
||||
This directory contains updated benchmarks as of 2018-01-08. They were captured
|
||||
via the benchsuite script at `benchsuite/benchsuite` from the root of this
|
||||
repository. The command that was run:
|
||||
|
||||
$ ./benchsuite \
|
||||
--dir /tmp/benchsuite \
|
||||
--raw runs/2018-01-08-archlinux-cheetah/raw.csv \
|
||||
--warmup-iter 1 \
|
||||
--bench-iter 5
|
||||
|
||||
These results are most directly comparable to the
|
||||
`2016-09-22-archlinux-cheetah` run in the parent directory.
|
||||
|
||||
The versions of each tool are as follows:
|
||||
|
||||
$ grep -V
|
||||
grep (GNU grep) 3.1
|
||||
|
||||
$ ag -V
|
||||
ag version 2.1.0
|
||||
Features:
|
||||
+jit +lzma +zlib
|
||||
|
||||
$ sift -V
|
||||
sift 0.8.0 (linux/amd64)
|
||||
built from commit 2ca94717 (which seems to be 0.9.0)
|
||||
|
||||
$ pt --version
|
||||
pt version 2.1.4
|
||||
|
||||
$ ucg -V
|
||||
UniversalCodeGrep 0.3.3
|
||||
[...]
|
||||
Build info
|
||||
|
||||
Repo version: 0.3.3-251-g9b5a3e3
|
||||
|
||||
Compiler info:
|
||||
Name ($(CXX)): "g++ -std=gnu++1z"
|
||||
Version string: "g++ (GCC) 7.2.1 20171224"
|
||||
|
||||
ISA extensions in use:
|
||||
sse4.2: yes
|
||||
popcnt: yes
|
||||
|
||||
libpcre info:
|
||||
Not linked against libpcre.
|
||||
|
||||
libpcre2-8 info:
|
||||
Version: 10.30 2017-08-14
|
||||
JIT support built in?: yes
|
||||
JIT target architecture: x86 64bit (little endian + unaligned)
|
||||
Newline style: LF
|
||||
|
||||
The version of ripgrep was compiled from source on commit 85d463c0, with the
|
||||
simd-accel and avx-accel features enabled:
|
||||
|
||||
$ export RUSTFLAGS="-C target-cpu=native"
|
||||
$ cargo build --release --features 'simd-accel avx-accel'
|
||||
806
benchsuite/runs/2018-01-08-archlinux-cheetah/raw.csv
Normal file
806
benchsuite/runs/2018-01-08-archlinux-cheetah/raw.csv
Normal file
@@ -0,0 +1,806 @@
|
||||
benchmark,warmup_iter,iter,name,command,duration,lines,env
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.10186767578125,68,
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.10199356079101562,68,
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09750819206237793,68,
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09634733200073242,68,
|
||||
linux_alternates,1,5,rg (ignore),rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.10117292404174805,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.49642109870910645,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.48993706703186035,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.4837028980255127,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.4773833751678467,68,
|
||||
linux_alternates,1,5,ag (ignore),ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.558436393737793,68,
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2605454921722412,68,LC_ALL=C
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.26748204231262207,68,LC_ALL=C
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.26719212532043457,68,LC_ALL=C
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2719383239746094,68,LC_ALL=C
|
||||
linux_alternates,1,5,git grep (ignore),git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.26963257789611816,68,LC_ALL=C
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.08797001838684082,68,
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09073781967163086,68,
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.0914468765258789,68,
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09071612358093262,68,
|
||||
linux_alternates,1,5,rg (whitelist),rg --no-ignore -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.0914316177368164,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.1372535228729248,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.13880419731140137,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.13315439224243164,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.1367807388305664,68,
|
||||
linux_alternates,1,5,ucg (whitelist),ucg --nosmart-case ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.13135552406311035,68,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12781810760498047,160,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.11988544464111328,160,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.1205439567565918,160,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12867259979248047,160,
|
||||
linux_alternates_casei,1,5,rg (ignore),rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.1215970516204834,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5444357395172119,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5511739253997803,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5382294654846191,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5499558448791504,160,
|
||||
linux_alternates_casei,1,5,ag (ignore),ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.6376545429229736,160,
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9767155647277832,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.920574426651001,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9352290630340576,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.8866012096405029,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep (ignore),git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9189445972442627,160,LC_ALL=C
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09351730346679688,160,
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09393739700317383,160,
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09986448287963867,160,
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09596824645996094,160,
|
||||
linux_alternates_casei,1,5,rg (whitelist),rg --no-ignore -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.09604883193969727,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.23943114280700684,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2587015628814697,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2543606758117676,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.2490406036376953,160,
|
||||
linux_alternates_casei,1,5,ucg (whitelist),ucg -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.24046540260314941,160,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08253765106201172,16,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08176755905151367,16,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08141684532165527,16,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08108830451965332,16,
|
||||
linux_literal,1,5,rg (ignore),rg -n PM_RESUME,0.08082938194274902,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.6870582103729248,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.807842493057251,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.8129942417144775,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.7582321166992188,16,
|
||||
linux_literal,1,5,rg (ignore) (mmap),rg -n --mmap PM_RESUME,0.6869800090789795,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.6534101963043213,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.6020612716674805,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.6712157726287842,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.6267571449279785,16,
|
||||
linux_literal,1,5,ag (ignore) (mmap),ag -s PM_RESUME,0.505136251449585,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.21415948867797852,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.19318318367004395,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.21352124214172363,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.18979454040527344,16,
|
||||
linux_literal,1,5,pt (ignore),pt PM_RESUME,0.16629600524902344,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.46967077255249023,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.46343088150024414,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.4723978042602539,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.4741063117980957,16,
|
||||
linux_literal,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git PM_RESUME,0.4613051414489746,16,
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.20196986198425293,16,LC_ALL=C
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.18932533264160156,16,LC_ALL=C
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.19396305084228516,16,LC_ALL=C
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.1952073574066162,16,LC_ALL=C
|
||||
linux_literal,1,5,git grep (ignore),git grep -I -n PM_RESUME,0.20149731636047363,16,LC_ALL=C
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.08270478248596191,16,
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.08414745330810547,16,
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.08627724647521973,16,
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.08978700637817383,16,
|
||||
linux_literal,1,5,rg (whitelist),rg -n --no-ignore -tall PM_RESUME,0.0836489200592041,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.15774202346801758,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.16005396842956543,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.15743708610534668,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.16156601905822754,16,
|
||||
linux_literal,1,5,ucg (whitelist),ucg --nosmart-case PM_RESUME,0.1557624340057373,16,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.1028127670288086,374,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.10258054733276367,374,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.10902261734008789,374,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.10802555084228516,374,
|
||||
linux_literal_casei,1,5,rg (ignore),rg -n -i PM_RESUME,0.10153412818908691,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.7902817726135254,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.7985179424285889,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.8208649158477783,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.7937076091766357,374,
|
||||
linux_literal_casei,1,5,rg (ignore) (mmap),rg -n -i --mmap PM_RESUME,0.7936429977416992,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.5215470790863037,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.46518707275390625,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.4467353820800781,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.4595184326171875,374,
|
||||
linux_literal_casei,1,5,ag (ignore) (mmap),ag -i PM_RESUME,0.4531285762786865,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.187762022018433,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.178058385848999,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.096448421478271,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.190524339675903,374,
|
||||
linux_literal_casei,1,5,pt (ignore),pt -i PM_RESUME,14.231573343276978,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.4668574333190918,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.46050214767456055,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.46228861808776855,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.44957947731018066,374,
|
||||
linux_literal_casei,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git PM_RESUME,0.4612581729888916,374,
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.1932981014251709,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.20561552047729492,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.19516706466674805,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.20196247100830078,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep (ignore),git grep -I -n -i PM_RESUME,0.19236421585083008,370,LC_ALL=C
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.09555959701538086,370,
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.09589338302612305,370,
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.09479856491088867,370,
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.09741568565368652,370,
|
||||
linux_literal_casei,1,5,rg (whitelist),rg -n -i --no-ignore -tall PM_RESUME,0.10127615928649902,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.15514039993286133,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.15668940544128418,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.15429425239562988,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.15332818031311035,370,
|
||||
linux_literal_casei,1,5,ucg (whitelist),ucg -i PM_RESUME,0.14861536026000977,370,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.08931398391723633,16,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.08717465400695801,16,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.0879361629486084,16,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.08688950538635254,16,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.09138607978820801,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.5342838764190674,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.47187042236328125,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.4456596374511719,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.4507424831390381,16,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.44472575187683105,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.15556907653808594,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.1533644199371338,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.15392351150512695,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.1535196304321289,16,
|
||||
linux_literal_default,1,5,ucg,ucg PM_RESUME,0.15589547157287598,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.2261514663696289,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.2731902599334717,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.2563004493713379,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.2575085163116455,16,
|
||||
linux_literal_default,1,5,pt,pt PM_RESUME,0.1724245548248291,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.13233542442321777,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.1256580352783203,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.12435102462768555,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.1259307861328125,16,
|
||||
linux_literal_default,1,5,sift,sift PM_RESUME,0.12412142753601074,16,
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.1742086410522461,16,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.16890597343444824,16,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.16680669784545898,16,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.16899871826171875,16,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.19794917106628418,16,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.33940672874450684,490,
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.3274960517883301,490,
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.32681775093078613,490,
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.32865071296691895,490,
|
||||
linux_no_literal,1,5,rg (ignore),rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.3240926265716553,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.17426586151123047,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.17265701293945312,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.1703634262084961,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.17192435264587402,490,
|
||||
linux_no_literal,1,5,rg (ignore) (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.1704559326171875,490,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.8443403244018555,766,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.6956703662872314,766,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.6938261985778809,766,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.695967435836792,766,
|
||||
linux_no_literal,1,5,ag (ignore) (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.6945271492004395,766,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.645716428756714,490,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.441533088684082,490,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.472522735595703,490,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.42497444152832,490,
|
||||
linux_no_literal,1,5,pt (ignore) (ASCII),pt -e \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},12.407486200332642,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},9.091489553451538,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},9.049214124679565,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.879419803619385,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},9.07261848449707,490,
|
||||
linux_no_literal,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.918747901916504,490,
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.334321975708008,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.993232727050781,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.622304916381836,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.35973048210144,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},8.39980435371399,490,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},2.0318400859832764,490,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},1.8587837219238281,490,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},1.873384714126587,490,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},1.8111364841461182,490,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ignore) (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},1.8385357856750488,490,LC_ALL=C
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.28792643547058105,458,
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.28545212745666504,458,
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.28576135635375977,458,
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.29883813858032227,458,
|
||||
linux_no_literal,1,5,rg (whitelist),rg -n --no-ignore -tall \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.28493285179138184,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.15974783897399902,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.15943312644958496,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.160233736038208,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.16201996803283691,458,
|
||||
linux_no_literal,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.16033530235290527,458,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.4639148712158203,416,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.46042823791503906,416,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.45925426483154297,416,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.477064847946167,416,
|
||||
linux_no_literal,1,5,ucg (whitelist) (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.507554292678833,416,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.08520364761352539,1652,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.08203816413879395,1652,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.08355021476745605,1652,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.0865166187286377,1652,
|
||||
linux_re_literal_suffix,1,5,rg (ignore),rg -n [A-Z]+_RESUME,0.08125448226928711,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.4846627712249756,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.48070311546325684,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.4813041687011719,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.4755582809448242,1652,
|
||||
linux_re_literal_suffix,1,5,ag (ignore),ag -s [A-Z]+_RESUME,0.4926290512084961,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.124520540237427,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.151537656784058,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.157994270324707,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.102291822433472,1652,
|
||||
linux_re_literal_suffix,1,5,pt (ignore),pt -e [A-Z]+_RESUME,14.103861093521118,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,4.182392835617065,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,4.190829277038574,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,3.9770240783691406,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,3.9978606700897217,1652,
|
||||
linux_re_literal_suffix,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git [A-Z]+_RESUME,4.146454572677612,1652,
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5080702304840088,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5281260013580322,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5350546836853027,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5474245548248291,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep (ignore),git grep -E -I -n [A-Z]+_RESUME,0.5256762504577637,1652,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.07924222946166992,1630,
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.0767812728881836,1630,
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.07874488830566406,1630,
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.0804905891418457,1630,
|
||||
linux_re_literal_suffix,1,5,rg (whitelist),rg -n --no-ignore -tall [A-Z]+_RESUME,0.07479119300842285,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13643193244934082,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13543128967285156,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13312768936157227,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13562273979187012,1630,
|
||||
linux_re_literal_suffix,1,5,ucg (whitelist),ucg --nosmart-case [A-Z]+_RESUME,0.13236212730407715,1630,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.17355775833129883,23,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.1676032543182373,23,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.1727275848388672,23,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.17095375061035156,23,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.17271947860717773,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.14364218711853,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.137334108352661,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.083475351333618,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.095231056213379,23,
|
||||
linux_unicode_greek,1,5,pt,pt -e \p{Greek},14.151906490325928,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.8376963138580322,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.8271427154541016,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.8310961723327637,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.826141595840454,23,
|
||||
linux_unicode_greek,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \p{Greek},2.805818796157837,23,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.16843819618225098,103,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.1704998016357422,103,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.17055058479309082,103,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.17064881324768066,103,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.1699228286743164,103,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.164355993270874,23,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.099931478500366,23,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.155095338821411,23,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.109308004379272,23,
|
||||
linux_unicode_greek_casei,1,5,pt,pt -i -e \p{Greek},14.072362422943115,23,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.003945589065551758,,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.004189729690551758,,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.0034589767456054688,,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.003614187240600586,,
|
||||
linux_unicode_greek_casei,1,5,sift,sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -i --git \p{Greek},0.003975629806518555,,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.09798526763916016,186,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.09575009346008301,186,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.10181760787963867,186,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.09650158882141113,186,
|
||||
linux_unicode_word,1,5,rg (ignore),rg -n \wAh,0.09717488288879395,186,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09417867660522461,174,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09903812408447266,174,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09407877922058105,174,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09681963920593262,174,
|
||||
linux_unicode_word,1,5,rg (ignore) (ASCII),rg -n (?-u)\wAh,0.09762454032897949,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.5779609680175781,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.635645866394043,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.6109263896942139,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.6260912418365479,174,
|
||||
linux_unicode_word,1,5,ag (ignore) (ASCII),ag -s \wAh,0.6823546886444092,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.178487062454224,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.190000057220459,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.16363000869751,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.160430431365967,174,
|
||||
linux_unicode_word,1,5,pt (ignore) (ASCII),pt -e \wAh,14.2189621925354,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.17629337310791,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.051238059997559,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.323853015899658,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.085661172866821,174,
|
||||
linux_unicode_word,1,5,sift (ignore) (ASCII),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n --git \wAh,4.036486625671387,174,
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,4.620476961135864,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,4.536192417144775,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,4.510494232177734,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,6.001620769500732,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore),git grep -E -I -n \wAh,4.602652311325073,186,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.3785994052886963,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.4163663387298584,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.402677297592163,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.3327512741088867,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ignore) (ASCII),git grep -E -I -n \wAh,1.3501760959625244,174,LC_ALL=C
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.07958698272705078,180,
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.0798649787902832,180,
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.08086204528808594,180,
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.0814356803894043,180,
|
||||
linux_unicode_word,1,5,rg (whitelist),rg -n --no-ignore -tall \wAh,0.08273720741271973,180,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.08280825614929199,168,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.08074021339416504,168,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.0821676254272461,168,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.07926368713378906,168,
|
||||
linux_unicode_word,1,5,rg (whitelist) (ASCII),rg -n --no-ignore -tall (?-u)\wAh,0.08405280113220215,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.1545090675354004,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.1517190933227539,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.15704965591430664,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.15523767471313477,168,
|
||||
linux_unicode_word,1,5,ucg (ASCII),ucg --nosmart-case \wAh,0.1582942008972168,168,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.09102368354797363,6,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.08986210823059082,6,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.08989477157592773,6,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.0895695686340332,6,
|
||||
linux_word,1,5,rg (ignore),rg -n -w PM_RESUME,0.09547114372253418,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.4948008060455322,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.45710110664367676,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.44803452491760254,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.44779396057128906,6,
|
||||
linux_word,1,5,ag (ignore),ag -s -w PM_RESUME,0.4563112258911133,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.233235597610474,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.277648687362671,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.218127727508545,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.171622037887573,6,
|
||||
linux_word,1,5,pt (ignore),pt -w PM_RESUME,14.214240312576294,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.1536731719970703,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.2415099143981934,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.2526626586914062,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.2590816020965576,6,
|
||||
linux_word,1,5,sift (ignore),sift --binary-skip --exclude-files .* --exclude-files *.pdf -n -w --git PM_RESUME,3.222473621368408,6,
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.16982412338256836,6,LC_ALL=C
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.16739583015441895,6,LC_ALL=C
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.16866540908813477,6,LC_ALL=C
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.18207120895385742,6,LC_ALL=C
|
||||
linux_word,1,5,git grep (ignore),git grep -E -I -n -w PM_RESUME,0.17716264724731445,6,LC_ALL=C
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.07490420341491699,6,
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.07714152336120605,6,
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.07552146911621094,6,
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.07651710510253906,6,
|
||||
linux_word,1,5,rg (whitelist),rg -n -w --no-ignore -tall PM_RESUME,0.0757131576538086,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.1530015468597412,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.15152239799499512,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.1571195125579834,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.15993595123291016,6,
|
||||
linux_word,1,5,ucg (whitelist),ucg --nosmart-case -w PM_RESUME,0.15633797645568848,6,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.33371877670288086,848,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3207988739013672,848,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3301675319671631,848,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.29731154441833496,848,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2711911201477051,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.186570405960083,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.1659939289093018,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.187847137451172,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.3522064685821533,848,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.316105842590332,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1400718688964844,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1492774486541748,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1337254047393799,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1037378311157227,848,
|
||||
subtitles_en_alternate,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1312851905822754,848,
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.8294000625610352,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.808884620666504,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.8134734630584717,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.8405649662017822,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -an Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.8500289916992188,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.21175312995910645,848,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2118232250213623,848,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.21287035942077637,848,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.21167230606079102,848,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.28102636337280273,848,
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5029187202453613,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.49977445602417,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.508340835571289,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5002548694610596,848,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E -a Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.629526138305664,848,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.730497360229492,862,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.781018018722534,862,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.7858059406280518,862,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.7127914428710938,862,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.717308759689331,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.428208351135254,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.389420509338379,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.403301954269409,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.4691550731658936,862,
|
||||
subtitles_en_alternate_casei,1,5,ucg (ASCII),ucg -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.4245004653930664,862,
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.978189706802368,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.974303722381592,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.982886552810669,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.90018630027771,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.0078439712524414,862,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9129142761230469,862,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9066660404205322,862,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.946380615234375,862,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9672930240631104,862,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.028451919555664,862,
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.9427030086517334,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.938739061355591,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.921248435974121,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.9194068908691406,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ani Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,2.917184829711914,862,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.12293672561645508,629,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1259000301361084,629,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.12285709381103516,629,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.12280964851379395,629,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1547396183013916,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.22011375427246094,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.23095202445983887,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2577846050262451,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2563819885253906,629,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.24869346618652344,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.415337324142456,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4208543300628662,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.416351079940796,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4270708560943604,629,
|
||||
subtitles_en_literal,1,5,pt,pt -N Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4243996143341064,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2245020866394043,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2382345199584961,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.23533034324645996,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2577829360961914,629,
|
||||
subtitles_en_literal,1,5,sift,sift Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2599349021911621,629,
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4733700752258301,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4598572254180908,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5303301811218262,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4775106906890869,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep -a Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4881136417388916,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.20051789283752441,629,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17326998710632324,629,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.20733428001403809,629,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.189713716506958,629,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17817258834838867,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5327835083007812,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5411181449890137,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.600783109664917,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5838911533355713,629,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.6051928997039795,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4090385437011719,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3816399574279785,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.38033008575439453,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3731727600097656,629,
|
||||
subtitles_en_literal,1,5,ucg (lines),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.38796329498291016,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4102630615234375,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4137451648712158,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.4649333953857422,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.430387258529663,629,
|
||||
subtitles_en_literal,1,5,pt (lines),pt Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.541991949081421,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.6231405735015869,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5986526012420654,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5821917057037354,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.6045489311218262,629,
|
||||
subtitles_en_literal,1,5,sift (lines),sift -n Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5986905097961426,629,
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8278565406799316,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.777052640914917,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7619414329528809,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8248744010925293,629,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -an Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.824932336807251,629,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.2718961238861084,642,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.27082157135009766,642,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.27086758613586426,642,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.274705171585083,642,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3337059020996094,642,
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9112112522125244,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.907888650894165,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.912668228149414,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9082865715026855,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.9177796840667725,642,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.6020669937133789,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.568228006362915,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5648214817047119,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5568234920501709,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -ai Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.5588953495025635,642,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3486766815185547,642,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.34010815620422363,642,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.33849263191223145,642,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3917088508605957,642,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.39266490936279297,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5564041137695312,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5533506870269775,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.6205368041992188,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5530028343200684,642,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.6189889907836914,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3834850788116455,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.41916346549987793,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3895289897918701,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4278140068054199,642,
|
||||
subtitles_en_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.4013493061065674,642,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17953085899353027,629,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17679834365844727,629,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17448186874389648,629,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.21117281913757324,629,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1848156452178955,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5236153602600098,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.52512526512146,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5218794345855713,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5384306907653809,629,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5150353908538818,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3757903575897217,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3744041919708252,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.37261366844177246,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.40795230865478516,629,
|
||||
subtitles_en_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.3868849277496338,629,
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8265349864959717,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8123743534088135,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7669925689697266,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.766636848449707,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7665839195251465,629,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1879115104675293,629,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18082356452941895,629,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18497347831726074,629,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1769394874572754,629,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1917715072631836,629,
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8192996978759766,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.8193323612213135,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7837738990783691,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7639024257659912,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -anw Sherlock Holmes /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.7634689807891846,629,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.7922985553741455,13,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.7885758876800537,13,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.802325963973999,13,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.792595386505127,13,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.7909605503082275,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5903098583221436,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5982813835144043,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5926671028137207,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.5976767539978027,13,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.593153953552246,13,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.614634275436401,48,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.574857473373413,48,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.54079270362854,48,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.600660800933838,48,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,6.531627178192139,48,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.361133337020874,13,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.456786870956421,13,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.403071403503418,13,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.398236274719238,13,
|
||||
subtitles_en_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,5.348573923110962,13,
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.5057969093322754,13,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.4157862663269043,13,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.471182346343994,13,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.4590909481048584,13,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.3759689331054688,13,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18518710136413574,317,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18791556358337402,317,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18598675727844238,317,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18552684783935547,317,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.19262075424194336,317,
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1321008205413818,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0709969997406006,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.1117346286773682,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0880234241485596,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0745558738708496,317,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.1827528476715088,317,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18874144554138184,317,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17983436584472656,317,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.18831133842468262,317,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,0.17810606956481934,317,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.5957207679748535,323,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.627211570739746,323,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.554431200027466,323,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.492656469345093,323,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,4.443558216094971,323,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.522758722305298,317,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.502918004989624,317,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.6503307819366455,317,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.58940052986145,317,
|
||||
subtitles_en_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,3.569624423980713,317,
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0672054290771484,317,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0729331970214844,317,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.052501916885376,317,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0711696147918701,317,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.sample.en,1.0863316059112549,317,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0312588214874268,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.063939094543457,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0000121593475342,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9842438697814941,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.95733642578125,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.7781903743743896,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.861164093017578,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.8268885612487793,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.8621268272399902,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.8216166496276855,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0069098472595215,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.025178909301758,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0631070137023926,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0902633666992188,691,
|
||||
subtitles_ru_alternate,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0272655487060547,691,
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.510146617889404,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.541701793670654,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.506088733673096,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.51838755607605,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.486810684204102,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9679937362670898,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9942011833190918,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9233448505401611,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9294781684875488,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.8729774951934814,691,
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.100147485733032,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.075790166854858,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.069685220718384,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.0526063442230225,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E -a Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.129194498062134,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.7894201278686523,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.7878782749176025,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.796328544616699,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.8249149322509766,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.7949724197387695,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.075739622116089,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.013590097427368,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.012375593185425,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.023118495941162,691,
|
||||
subtitles_ru_alternate_casei,1,5,ucg (ASCII),ucg -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0641982555389404,691,
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.467320442199707,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.486851692199707,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.479818344116211,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.516186475753784,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,7.471773862838745,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.026185274124146,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.168465614318848,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.039950370788574,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.089850425720215,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,11.112446546554565,735,
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.822641849517822,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.808355331420898,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.80171275138855,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.794351577758789,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.844403266906738,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.20681476593017578,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.190568208694458,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.18462657928466797,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.1873643398284912,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.20382428169250488,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.3085510730743408,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.318758487701416,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.3177149295806885,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.31236958503723145,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.31880998611450195,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.152938365936279,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.124867677688599,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.132290363311768,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.158328056335449,583,
|
||||
subtitles_ru_literal,1,5,pt,pt -N Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.1022467613220215,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.807113409042358,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.8178558349609375,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.925220012664795,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.861236333847046,583,
|
||||
subtitles_ru_literal,1,5,sift,sift Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.763278484344482,583,
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.704503059387207,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6887199878692627,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7092702388763428,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6964359283447266,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep -a Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6928379535675049,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2646975517272949,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.26806163787841797,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2700214385986328,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2669072151184082,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2656106948852539,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.9972407817840576,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.906053066253662,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.864766836166382,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7820546627044678,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7599871158599854,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.411653995513916,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.394604206085205,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.362853765487671,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.4795477390289307,583,
|
||||
subtitles_ru_literal,1,5,ucg (lines),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.4428844451904297,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.122563123703003,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.17008900642395,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.1965367794036865,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.152370929718018,583,
|
||||
subtitles_ru_literal,1,5,pt (lines),pt Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,5.106513738632202,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.408761978149414,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.423579454421997,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.2807464599609375,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.3771467208862305,583,
|
||||
subtitles_ru_literal,1,5,sift (lines),sift -n Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.378506422042847,583,
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.121800422668457,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.1189923286437988,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0678138732910156,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0668041706085205,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -an Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0713574886322021,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9427816867828369,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0397350788116455,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9732518196105957,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9387776851654053,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.9536802768707275,604,
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.338641405105591,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.280565023422241,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.241750240325928,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.316105604171753,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,6.307560205459595,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7379302978515625,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7226619720458984,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.683293342590332,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.714146614074707,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -ai Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7654330730438232,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0237820148468018,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0194151401519775,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0364336967468262,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.035005807876587,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0438766479492188,604,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.619025468826294,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.647244930267334,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6785612106323242,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6503715515136719,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6314499378204346,,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.8302316665649414,583,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7719593048095703,583,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7697594165802002,583,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7312629222869873,583,
|
||||
subtitles_ru_literal_casei,1,5,ucg (lines) (ASCII),ucg -i Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.767866849899292,583,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.19411826133728027,,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.18651676177978516,,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.19614577293395996,,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.18459081649780273,,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Шерлок Холмс(?-u:\b) /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.1797487735748291,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6507105827331543,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6480035781860352,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.7138750553131104,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6521759033203125,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.6728894710540771,,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.3646819591522217,583,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.3836848735809326,583,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.419490337371826,583,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.363335609436035,583,
|
||||
subtitles_ru_literal_word,1,5,ucg (ASCII),ucg --nosmart-case Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.488351345062256,583,
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.171506643295288,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.1602776050567627,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.084787368774414,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0714166164398193,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.083632469177246,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2769143581390381,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2694058418273926,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.26763367652893066,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2671318054199219,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2922348976135254,579,
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.083528757095337,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0857081413269043,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.07025146484375,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.071930170059204,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -anw Шерлок Холмс /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.0709245204925537,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.1552906036376953,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.164951801300049,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.175389289855957,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.1861774921417236,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,3.153625011444092,41,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7353317737579346,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7592883110046387,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.7242491245269775,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.747089385986328,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.732586145401001,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0796375274658203,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9670393466949463,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9413447380065918,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.916764497756958,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9110031127929688,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0622072219848633,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0975682735443115,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0741493701934814,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0423810482025146,,
|
||||
subtitles_ru_no_literal,1,5,ucg (ASCII),ucg --nosmart-case \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.000764846801758,,
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.6251120567321777,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.644089698791504,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.6416165828704834,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.6321892738342285,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.6264762878417969,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.29879307746887207,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.3226010799407959,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.32187771797180176,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.2825047969818115,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,0.283217191696167,278,
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.3977878093719482,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.4288139343261719,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.4054889678955078,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.4003441333770752,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.5269148349761963,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.8912529945373535,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9221522808074951,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9416618347167969,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.893650770187378,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.8895554542541504,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0110745429992676,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.9790067672729492,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.0426392555236816,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.121723175048828,,
|
||||
subtitles_ru_surrounding_words,1,5,ucg (ASCII),ucg --nosmart-case \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,2.1247596740722656,,
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.3579976558685303,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.382859468460083,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.393401861190796,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.474374532699585,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -an \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/OpenSubtitles2016.raw.ru,1.3835601806640625,,LC_ALL=C
|
||||
|
235
benchsuite/runs/2018-01-08-archlinux-cheetah/summary
Normal file
235
benchsuite/runs/2018-01-08-archlinux-cheetah/summary
Normal file
@@ -0,0 +1,235 @@
|
||||
linux_alternates (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------
|
||||
rg (ignore) 0.100 +/- 0.003 (lines: 68)
|
||||
ag (ignore) 0.501 +/- 0.033 (lines: 68)
|
||||
git grep (ignore) 0.267 +/- 0.004 (lines: 68)
|
||||
rg (whitelist)* 0.090 +/- 0.001 (lines: 68)*
|
||||
ucg (whitelist) 0.135 +/- 0.003 (lines: 68)
|
||||
|
||||
linux_alternates_casei (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------------
|
||||
rg (ignore) 0.124 +/- 0.004 (lines: 160)
|
||||
ag (ignore) 0.564 +/- 0.041 (lines: 160)
|
||||
git grep (ignore) 0.928 +/- 0.033 (lines: 160)
|
||||
rg (whitelist)* 0.096 +/- 0.003 (lines: 160)*
|
||||
ucg (whitelist) 0.248 +/- 0.008 (lines: 160)
|
||||
|
||||
linux_literal (pattern: PM_RESUME)
|
||||
----------------------------------
|
||||
rg (ignore)* 0.082 +/- 0.001 (lines: 16)*
|
||||
rg (ignore) (mmap) 0.751 +/- 0.062 (lines: 16)
|
||||
ag (ignore) (mmap) 0.612 +/- 0.065 (lines: 16)
|
||||
pt (ignore) 0.195 +/- 0.020 (lines: 16)
|
||||
sift (ignore) 0.468 +/- 0.006 (lines: 16)
|
||||
git grep (ignore) 0.196 +/- 0.005 (lines: 16)
|
||||
rg (whitelist) 0.085 +/- 0.003 (lines: 16)
|
||||
ucg (whitelist) 0.159 +/- 0.002 (lines: 16)
|
||||
|
||||
linux_literal_casei (pattern: PM_RESUME)
|
||||
----------------------------------------
|
||||
rg (ignore) 0.105 +/- 0.003 (lines: 374)
|
||||
rg (ignore) (mmap) 0.799 +/- 0.012 (lines: 374)
|
||||
ag (ignore) (mmap) 0.469 +/- 0.030 (lines: 374)
|
||||
pt (ignore) 14.177 +/- 0.049 (lines: 374)
|
||||
sift (ignore) 0.460 +/- 0.006 (lines: 374)
|
||||
git grep (ignore) 0.198 +/- 0.006 (lines: 370)
|
||||
rg (whitelist)* 0.097 +/- 0.003 (lines: 370)*
|
||||
ucg (whitelist) 0.154 +/- 0.003 (lines: 370)
|
||||
|
||||
linux_literal_default (pattern: PM_RESUME)
|
||||
------------------------------------------
|
||||
rg* 0.089 +/- 0.002 (lines: 16)*
|
||||
ag 0.469 +/- 0.038 (lines: 16)
|
||||
ucg 0.154 +/- 0.001 (lines: 16)
|
||||
pt 0.237 +/- 0.040 (lines: 16)
|
||||
sift 0.126 +/- 0.003 (lines: 16)
|
||||
git grep 0.175 +/- 0.013 (lines: 16)
|
||||
|
||||
linux_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
-----------------------------------------------------------------
|
||||
rg (ignore) 0.329 +/- 0.006 (lines: 490)
|
||||
rg (ignore) (ASCII) 0.172 +/- 0.002 (lines: 490)
|
||||
ag (ignore) (ASCII) 0.725 +/- 0.067 (lines: 766)
|
||||
pt (ignore) (ASCII) 12.478 +/- 0.097 (lines: 490)
|
||||
sift (ignore) (ASCII) 9.002 +/- 0.096 (lines: 490)
|
||||
git grep (ignore) 8.542 +/- 0.277 (lines: 490)
|
||||
git grep (ignore) (ASCII) 1.883 +/- 0.087 (lines: 490)
|
||||
rg (whitelist) 0.289 +/- 0.006 (lines: 458)
|
||||
rg (whitelist) (ASCII)* 0.160 +/- 0.001 (lines: 458)*
|
||||
ucg (whitelist) (ASCII) 0.474 +/- 0.020 (lines: 416)
|
||||
|
||||
linux_re_literal_suffix (pattern: [A-Z]+_RESUME)
|
||||
------------------------------------------------
|
||||
rg (ignore) 0.084 +/- 0.002 (lines: 1652)
|
||||
ag (ignore) 0.483 +/- 0.006 (lines: 1652)
|
||||
pt (ignore) 14.128 +/- 0.026 (lines: 1652)
|
||||
sift (ignore) 4.099 +/- 0.103 (lines: 1652)
|
||||
git grep (ignore) 0.529 +/- 0.014 (lines: 1652)
|
||||
rg (whitelist)* 0.078 +/- 0.002 (lines: 1630)*
|
||||
ucg (whitelist) 0.135 +/- 0.002 (lines: 1630)
|
||||
|
||||
linux_unicode_greek (pattern: \p{Greek})
|
||||
----------------------------------------
|
||||
rg* 0.172 +/- 0.002 (lines: 23)*
|
||||
pt 14.122 +/- 0.031 (lines: 23)
|
||||
sift 2.826 +/- 0.012 (lines: 23)
|
||||
|
||||
linux_unicode_greek_casei (pattern: \p{Greek})
|
||||
----------------------------------------------
|
||||
rg 0.170 +/- 0.001 (lines: 103)
|
||||
pt 14.120 +/- 0.039 (lines: 23)
|
||||
sift* 0.004 +/- 0.000 (lines: 0)*
|
||||
|
||||
linux_unicode_word (pattern: \wAh)
|
||||
----------------------------------
|
||||
rg (ignore) 0.098 +/- 0.002 (lines: 186)
|
||||
rg (ignore) (ASCII) 0.096 +/- 0.002 (lines: 174)
|
||||
ag (ignore) (ASCII) 0.627 +/- 0.038 (lines: 174)
|
||||
pt (ignore) (ASCII) 14.182 +/- 0.024 (lines: 174)
|
||||
sift (ignore) (ASCII) 4.135 +/- 0.119 (lines: 174)
|
||||
git grep (ignore) 4.854 +/- 0.643 (lines: 186)
|
||||
git grep (ignore) (ASCII) 1.376 +/- 0.035 (lines: 174)
|
||||
rg (whitelist) 0.081 +/- 0.001 (lines: 180)*
|
||||
rg (whitelist) (ASCII)* 0.082 +/- 0.002 (lines: 168)
|
||||
ucg (ASCII) 0.155 +/- 0.003 (lines: 168)
|
||||
|
||||
linux_word (pattern: PM_RESUME)
|
||||
-------------------------------
|
||||
rg (ignore) 0.091 +/- 0.002 (lines: 6)
|
||||
ag (ignore) 0.461 +/- 0.020 (lines: 6)
|
||||
pt (ignore) 14.223 +/- 0.038 (lines: 6)
|
||||
sift (ignore) 3.226 +/- 0.043 (lines: 6)
|
||||
git grep (ignore) 0.173 +/- 0.006 (lines: 6)
|
||||
rg (whitelist)* 0.076 +/- 0.001 (lines: 6)*
|
||||
ucg (whitelist) 0.156 +/- 0.003 (lines: 6)
|
||||
|
||||
subtitles_en_alternate (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 0.311 +/- 0.026 (lines: 848)
|
||||
ag (lines) 2.242 +/- 0.086 (lines: 848)
|
||||
ucg (lines) 1.132 +/- 0.017 (lines: 848)
|
||||
grep (lines) 1.828 +/- 0.017 (lines: 848)
|
||||
rg* 0.226 +/- 0.031 (lines: 848)*
|
||||
grep 1.528 +/- 0.057 (lines: 848)
|
||||
|
||||
subtitles_en_alternate_casei (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII) 3.745 +/- 0.035 (lines: 862)
|
||||
ucg (ASCII) 2.423 +/- 0.030 (lines: 862)
|
||||
grep (ASCII) 2.969 +/- 0.040 (lines: 862)
|
||||
rg* 1.952 +/- 0.049 (lines: 862)*
|
||||
grep 2.928 +/- 0.012 (lines: 862)
|
||||
|
||||
subtitles_en_literal (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------
|
||||
rg* 0.130 +/- 0.014 (lines: 629)*
|
||||
rg (no mmap) 0.243 +/- 0.017 (lines: 629)
|
||||
pt 1.421 +/- 0.005 (lines: 629)
|
||||
sift 0.243 +/- 0.015 (lines: 629)
|
||||
grep 0.486 +/- 0.027 (lines: 629)
|
||||
rg (lines) 0.190 +/- 0.014 (lines: 629)
|
||||
ag (lines) 1.573 +/- 0.034 (lines: 629)
|
||||
ucg (lines) 0.386 +/- 0.014 (lines: 629)
|
||||
pt (lines) 1.452 +/- 0.055 (lines: 629)
|
||||
sift (lines) 0.601 +/- 0.015 (lines: 629)
|
||||
grep (lines) 0.803 +/- 0.031 (lines: 629)
|
||||
|
||||
subtitles_en_literal_casei (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------------
|
||||
rg* 0.284 +/- 0.028 (lines: 642)*
|
||||
grep 1.912 +/- 0.004 (lines: 642)
|
||||
grep (ASCII) 0.570 +/- 0.018 (lines: 642)
|
||||
rg (lines) 0.362 +/- 0.028 (lines: 642)
|
||||
ag (lines) (ASCII) 1.580 +/- 0.036 (lines: 642)
|
||||
ucg (lines) (ASCII) 0.404 +/- 0.019 (lines: 642)
|
||||
|
||||
subtitles_en_literal_word (pattern: Sherlock Holmes)
|
||||
----------------------------------------------------
|
||||
rg (ASCII)* 0.185 +/- 0.015 (lines: 629)
|
||||
ag (ASCII) 1.525 +/- 0.009 (lines: 629)
|
||||
ucg (ASCII) 0.384 +/- 0.015 (lines: 629)
|
||||
grep (ASCII) 0.788 +/- 0.029 (lines: 629)
|
||||
rg 0.184 +/- 0.006 (lines: 629)*
|
||||
grep 0.790 +/- 0.028 (lines: 629)
|
||||
|
||||
subtitles_en_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 1.793 +/- 0.005 (lines: 13)
|
||||
rg (ASCII)* 1.594 +/- 0.003 (lines: 13)*
|
||||
ag (ASCII) 6.573 +/- 0.036 (lines: 48)
|
||||
ucg (ASCII) 5.394 +/- 0.042 (lines: 13)
|
||||
grep (ASCII) 3.446 +/- 0.050 (lines: 13)
|
||||
|
||||
subtitles_en_surrounding_words (pattern: \w+\s+Holmes\s+\w+)
|
||||
------------------------------------------------------------
|
||||
rg 0.187 +/- 0.003 (lines: 317)
|
||||
grep 1.095 +/- 0.026 (lines: 317)
|
||||
rg (ASCII)* 0.184 +/- 0.005 (lines: 317)*
|
||||
ag (ASCII) 4.543 +/- 0.075 (lines: 323)
|
||||
ucg (ASCII) 3.567 +/- 0.058 (lines: 317)
|
||||
grep (ASCII) 1.070 +/- 0.012 (lines: 317)
|
||||
|
||||
subtitles_ru_alternate (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 1.007 +/- 0.041 (lines: 691)
|
||||
ag (lines) 3.830 +/- 0.035 (lines: 691)
|
||||
ucg (lines) 2.043 +/- 0.034 (lines: 691)
|
||||
grep (lines) 7.513 +/- 0.020 (lines: 691)
|
||||
rg* 0.938 +/- 0.046 (lines: 691)*
|
||||
grep 7.085 +/- 0.030 (lines: 691)
|
||||
|
||||
subtitles_ru_alternate_casei (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII) 3.799 +/- 0.015 (lines: 691)
|
||||
ucg (ASCII)* 2.038 +/- 0.030 (lines: 691)*
|
||||
grep (ASCII) 7.484 +/- 0.019 (lines: 691)
|
||||
rg 11.087 +/- 0.057 (lines: 735)
|
||||
grep 6.814 +/- 0.020 (lines: 735)
|
||||
|
||||
subtitles_ru_literal (pattern: Шерлок Холмс)
|
||||
--------------------------------------------
|
||||
rg* 0.195 +/- 0.010 (lines: 583)*
|
||||
rg (no mmap) 0.315 +/- 0.005 (lines: 583)
|
||||
pt 5.134 +/- 0.023 (lines: 583)
|
||||
sift 5.835 +/- 0.061 (lines: 583)
|
||||
grep 0.698 +/- 0.008 (lines: 583)
|
||||
rg (lines) 0.267 +/- 0.002 (lines: 583)
|
||||
ag (lines) 2.862 +/- 0.096 (lines: 583)
|
||||
ucg (lines) 2.418 +/- 0.045 (lines: 583)
|
||||
pt (lines) 5.150 +/- 0.036 (lines: 583)
|
||||
sift (lines) 6.374 +/- 0.056 (lines: 583)
|
||||
grep (lines) 1.089 +/- 0.028 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_casei (pattern: Шерлок Холмс)
|
||||
--------------------------------------------------
|
||||
rg 0.970 +/- 0.041 (lines: 604)
|
||||
grep 6.297 +/- 0.037 (lines: 604)
|
||||
grep (ASCII) 0.725 +/- 0.030 (lines: 583)
|
||||
rg (lines) 1.032 +/- 0.010 (lines: 604)
|
||||
ag (lines) (ASCII)* 0.645 +/- 0.022 (lines: 0)*
|
||||
ucg (lines) (ASCII) 0.774 +/- 0.036 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_word (pattern: Шерлок Холмс)
|
||||
-------------------------------------------------
|
||||
rg (ASCII)* 0.188 +/- 0.007 (lines: 0)*
|
||||
ag (ASCII) 0.668 +/- 0.028 (lines: 0)
|
||||
ucg (ASCII) 2.404 +/- 0.052 (lines: 583)
|
||||
grep (ASCII) 1.114 +/- 0.048 (lines: 583)
|
||||
rg 0.275 +/- 0.011 (lines: 579)
|
||||
grep 1.076 +/- 0.008 (lines: 579)
|
||||
|
||||
subtitles_ru_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 3.167 +/- 0.014 (lines: 41)
|
||||
rg (ASCII) 2.740 +/- 0.014 (lines: 0)
|
||||
ag (ASCII) 1.963 +/- 0.069 (lines: 0)
|
||||
ucg (ASCII) 2.055 +/- 0.037 (lines: 0)
|
||||
grep (ASCII)* 1.634 +/- 0.009 (lines: 0)*
|
||||
|
||||
subtitles_ru_surrounding_words (pattern: \w+\s+Холмс\s+\w+)
|
||||
-----------------------------------------------------------
|
||||
rg* 0.302 +/- 0.020 (lines: 278)*
|
||||
grep 1.432 +/- 0.055 (lines: 278)
|
||||
ag (ASCII) 1.908 +/- 0.023 (lines: 0)
|
||||
ucg (ASCII) 2.056 +/- 0.066 (lines: 0)
|
||||
grep (ASCII) 1.398 +/- 0.044 (lines: 0)
|
||||
183
build.rs
183
build.rs
@@ -1,27 +1,190 @@
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::path::Path;
|
||||
use std::process;
|
||||
|
||||
use clap::Shell;
|
||||
|
||||
use app::{RGArg, RGArgKind};
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[path = "src/app.rs"]
|
||||
#[path = "crates/core/app.rs"]
|
||||
mod app;
|
||||
|
||||
fn main() {
|
||||
// OUT_DIR is set by Cargo and it's where any additional build artifacts
|
||||
// are written.
|
||||
let outdir = match env::var_os("OUT_DIR") {
|
||||
None => return,
|
||||
Some(outdir) => outdir,
|
||||
None => {
|
||||
eprintln!(
|
||||
"OUT_DIR environment variable not defined. \
|
||||
Please file a bug: \
|
||||
https://github.com/BurntSushi/ripgrep/issues/new"
|
||||
);
|
||||
process::exit(1);
|
||||
}
|
||||
};
|
||||
fs::create_dir_all(&outdir).unwrap();
|
||||
|
||||
let mut app = app::app_short();
|
||||
let stamp_path = Path::new(&outdir).join("ripgrep-stamp");
|
||||
if let Err(err) = File::create(&stamp_path) {
|
||||
panic!("failed to write {}: {}", stamp_path.display(), err);
|
||||
}
|
||||
if let Err(err) = generate_man_page(&outdir) {
|
||||
eprintln!("failed to generate man page: {}", err);
|
||||
}
|
||||
|
||||
// Use clap to build completion files.
|
||||
let mut app = app::app();
|
||||
app.gen_completions("rg", Shell::Bash, &outdir);
|
||||
app.gen_completions("rg", Shell::Fish, &outdir);
|
||||
app.gen_completions("rg", Shell::Zsh, &outdir);
|
||||
app.gen_completions("rg", Shell::PowerShell, &outdir);
|
||||
// Note that we do not use clap's support for zsh. Instead, zsh completions
|
||||
// are manually maintained in `complete/_rg`.
|
||||
|
||||
// Make the current git hash available to the build.
|
||||
if let Some(rev) = git_revision_hash() {
|
||||
println!("cargo:rustc-env=RIPGREP_BUILD_GIT_HASH={}", rev);
|
||||
}
|
||||
}
|
||||
|
||||
fn git_revision_hash() -> Option<String> {
|
||||
let result = process::Command::new("git")
|
||||
.args(&["rev-parse", "--short=10", "HEAD"])
|
||||
.output();
|
||||
result.ok().and_then(|output| {
|
||||
let v = String::from_utf8_lossy(&output.stdout).trim().to_string();
|
||||
if v.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(v)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn generate_man_page<P: AsRef<Path>>(outdir: P) -> io::Result<()> {
|
||||
// If asciidoc isn't installed, then don't do anything.
|
||||
if let Err(err) = process::Command::new("a2x").output() {
|
||||
eprintln!("Could not run 'a2x' binary, skipping man page generation.");
|
||||
eprintln!("Error from running 'a2x': {}", err);
|
||||
return Ok(());
|
||||
}
|
||||
// 1. Read asciidoc template.
|
||||
// 2. Interpolate template with auto-generated docs.
|
||||
// 3. Save interpolation to disk.
|
||||
// 4. Use a2x (part of asciidoc) to convert to man page.
|
||||
let outdir = outdir.as_ref();
|
||||
let cwd = env::current_dir()?;
|
||||
let tpl_path = cwd.join("doc").join("rg.1.txt.tpl");
|
||||
let txt_path = outdir.join("rg.1.txt");
|
||||
|
||||
let mut tpl = String::new();
|
||||
File::open(&tpl_path)?.read_to_string(&mut tpl)?;
|
||||
tpl = tpl.replace("{OPTIONS}", &formatted_options()?);
|
||||
|
||||
let githash = git_revision_hash();
|
||||
let githash = githash.as_ref().map(|x| &**x);
|
||||
tpl = tpl.replace("{VERSION}", &app::long_version(githash, false));
|
||||
|
||||
File::create(&txt_path)?.write_all(tpl.as_bytes())?;
|
||||
let result = process::Command::new("a2x")
|
||||
.arg("--no-xmllint")
|
||||
.arg("--doctype")
|
||||
.arg("manpage")
|
||||
.arg("--format")
|
||||
.arg("manpage")
|
||||
.arg(&txt_path)
|
||||
.spawn()?
|
||||
.wait()?;
|
||||
if !result.success() {
|
||||
let msg = format!("'a2x' failed with exit code {:?}", result.code());
|
||||
return Err(ioerr(msg));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn formatted_options() -> io::Result<String> {
|
||||
let mut args = app::all_args_and_flags();
|
||||
args.sort_by(|x1, x2| x1.name.cmp(&x2.name));
|
||||
|
||||
let mut formatted = vec![];
|
||||
for arg in args {
|
||||
if arg.hidden {
|
||||
continue;
|
||||
}
|
||||
// ripgrep only has two positional arguments, and probably will only
|
||||
// ever have two positional arguments, so we just hardcode them into
|
||||
// the template.
|
||||
if let app::RGArgKind::Positional { .. } = arg.kind {
|
||||
continue;
|
||||
}
|
||||
formatted.push(formatted_arg(&arg)?);
|
||||
}
|
||||
Ok(formatted.join("\n\n"))
|
||||
}
|
||||
|
||||
fn formatted_arg(arg: &RGArg) -> io::Result<String> {
|
||||
match arg.kind {
|
||||
RGArgKind::Positional { .. } => {
|
||||
panic!("unexpected positional argument")
|
||||
}
|
||||
RGArgKind::Switch { long, short, multiple } => {
|
||||
let mut out = vec![];
|
||||
|
||||
let mut header = format!("--{}", long);
|
||||
if let Some(short) = short {
|
||||
header = format!("-{}, {}", short, header);
|
||||
}
|
||||
if multiple {
|
||||
header = format!("*{}* ...::", header);
|
||||
} else {
|
||||
header = format!("*{}*::", header);
|
||||
}
|
||||
writeln!(out, "{}", header)?;
|
||||
writeln!(out, "{}", formatted_doc_txt(arg)?)?;
|
||||
|
||||
Ok(String::from_utf8(out).unwrap())
|
||||
}
|
||||
RGArgKind::Flag { long, short, value_name, multiple, .. } => {
|
||||
let mut out = vec![];
|
||||
|
||||
let mut header = format!("--{}", long);
|
||||
if let Some(short) = short {
|
||||
header = format!("-{}, {}", short, header);
|
||||
}
|
||||
if multiple {
|
||||
header = format!("*{}* _{}_ ...::", header, value_name);
|
||||
} else {
|
||||
header = format!("*{}* _{}_::", header, value_name);
|
||||
}
|
||||
writeln!(out, "{}", header)?;
|
||||
writeln!(out, "{}", formatted_doc_txt(arg)?)?;
|
||||
|
||||
Ok(String::from_utf8(out).unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn formatted_doc_txt(arg: &RGArg) -> io::Result<String> {
|
||||
let paragraphs: Vec<String> = arg
|
||||
.doc_long
|
||||
.replace("{", "{")
|
||||
.replace("}", r"}")
|
||||
.split("\n\n")
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
if paragraphs.is_empty() {
|
||||
return Err(ioerr(format!("missing docs for --{}", arg.name)));
|
||||
}
|
||||
let first = format!(" {}", paragraphs[0].replace("\n", "\n "));
|
||||
if paragraphs.len() == 1 {
|
||||
return Ok(first);
|
||||
}
|
||||
Ok(format!("{}\n+\n{}", first, paragraphs[1..].join("\n+\n")))
|
||||
}
|
||||
|
||||
fn ioerr(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
}
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
# `before_deploy` phase: here we package the build artifacts
|
||||
|
||||
set -ex
|
||||
|
||||
. $(dirname $0)/utils.sh
|
||||
|
||||
# Generate artifacts for release
|
||||
mk_artifacts() {
|
||||
RUSTFLAGS="-C target-feature=+ssse3" \
|
||||
cargo build --target $TARGET --release --features simd-accel
|
||||
}
|
||||
|
||||
mk_tarball() {
|
||||
# create a "staging" directory
|
||||
local td=$(mktempd)
|
||||
local out_dir=$(pwd)
|
||||
local name="${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}"
|
||||
mkdir "$td/$name"
|
||||
mkdir "$td/$name/complete"
|
||||
|
||||
cp target/$TARGET/release/rg "$td/$name/"
|
||||
cp {doc/rg.1,README.md,UNLICENSE,COPYING,LICENSE-MIT} "$td/$name/"
|
||||
cp target/$TARGET/release/build/ripgrep-*/out/{_rg,rg.bash-completion,rg.fish,_rg.ps1} "$td/$name/complete/"
|
||||
|
||||
pushd $td
|
||||
tar czf "$out_dir/$name.tar.gz" *
|
||||
popd
|
||||
rm -r $td
|
||||
}
|
||||
|
||||
main() {
|
||||
mk_artifacts
|
||||
mk_tarball
|
||||
}
|
||||
|
||||
main
|
||||
36
ci/build-deb
Executable file
36
ci/build-deb
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
D="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||
|
||||
# This script builds a binary dpkg for Debian based distros. It does not
|
||||
# currently run in CI, and is instead run manually and the resulting dpkg is
|
||||
# uploaded to GitHub via the web UI.
|
||||
#
|
||||
# Note that this requires 'cargo deb', which can be installed with
|
||||
# 'cargo install cargo-deb'.
|
||||
#
|
||||
# This should be run from the root of the ripgrep repo.
|
||||
|
||||
if ! command -V cargo-deb > /dev/null 2>&1; then
|
||||
echo "cargo-deb command missing" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 'cargo deb' does not seem to provide a way to specify an asset that is
|
||||
# created at build time, such as ripgrep's man page. To work around this,
|
||||
# we force a debug build, copy out the man page (and shell completions)
|
||||
# produced from that build, put it into a predictable location and then build
|
||||
# the deb, which knows where to look.
|
||||
|
||||
DEPLOY_DIR=deployment/deb
|
||||
OUT_DIR="$("$D"/cargo-out-dir target/debug/)"
|
||||
mkdir -p "$DEPLOY_DIR"
|
||||
cargo build
|
||||
|
||||
# Copy man page and shell completions.
|
||||
cp "$OUT_DIR"/{rg.1,rg.bash,rg.fish,_rg} "$DEPLOY_DIR/"
|
||||
|
||||
# Since we're distributing the dpkg, we don't know whether the user will have
|
||||
# PCRE2 installed, so just do a static build.
|
||||
PCRE2_SYS_STATIC=1 cargo deb
|
||||
19
ci/cargo-out-dir
Executable file
19
ci/cargo-out-dir
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Finds Cargo's `OUT_DIR` directory from the most recent build.
|
||||
#
|
||||
# This requires one parameter corresponding to the target directory
|
||||
# to search for the build output.
|
||||
|
||||
if [ $# != 1 ]; then
|
||||
echo "Usage: $(basename "$0") <target-dir>" >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# This works by finding the most recent stamp file, which is produced by
|
||||
# every ripgrep build.
|
||||
target_dir="$1"
|
||||
find "$target_dir" -name ripgrep-stamp -print0 \
|
||||
| xargs -0 ls -t \
|
||||
| head -n1 \
|
||||
| xargs dirname
|
||||
24
ci/docker/README.md
Normal file
24
ci/docker/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
These are Docker images used for cross compilation in CI builds (or locally)
|
||||
via the [Cross](https://github.com/rust-embedded/cross) tool.
|
||||
|
||||
The Cross tool actually provides its own Docker images, and all Docker images
|
||||
in this directory are derived from one of them. We provide our own in order
|
||||
to customize the environment. For example, we need to install some things like
|
||||
`asciidoc` in order to generate man pages. We also install compression tools
|
||||
like `xz` so that tests for the `-z/--search-zip` flag are run.
|
||||
|
||||
If you make a change to a Docker image, then you can re-build it. `cd` into the
|
||||
directory containing the `Dockerfile` and run:
|
||||
|
||||
$ cd x86_64-unknown-linux-musl
|
||||
$ ./build
|
||||
|
||||
At this point, subsequent uses of `cross` will now use your built image since
|
||||
Docker prefers local images over remote images. In order to make these changes
|
||||
stick, they need to be pushed to Docker Hub:
|
||||
|
||||
$ docker push burntsushi/cross:x86_64-unknown-linux-musl
|
||||
|
||||
Of course, only I (BurntSushi) can push to that location. To make `cross` use
|
||||
a different location, then edit `Cross.toml` in the root of this repo to use
|
||||
a different image name for the desired target.
|
||||
4
ci/docker/arm-unknown-linux-gnueabihf/Dockerfile
Normal file
4
ci/docker/arm-unknown-linux-gnueabihf/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM rustembedded/cross:arm-unknown-linux-gnueabihf
|
||||
|
||||
COPY stage/ubuntu-install-packages /
|
||||
RUN /ubuntu-install-packages
|
||||
5
ci/docker/arm-unknown-linux-gnueabihf/build
Executable file
5
ci/docker/arm-unknown-linux-gnueabihf/build
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
mkdir -p stage
|
||||
cp ../../ubuntu-install-packages ./stage/
|
||||
docker build -t burntsushi/cross:arm-unknown-linux-gnueabihf .
|
||||
4
ci/docker/i686-unknown-linux-gnu/Dockerfile
Normal file
4
ci/docker/i686-unknown-linux-gnu/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM rustembedded/cross:i686-unknown-linux-gnu
|
||||
|
||||
COPY stage/ubuntu-install-packages /
|
||||
RUN /ubuntu-install-packages
|
||||
5
ci/docker/i686-unknown-linux-gnu/build
Executable file
5
ci/docker/i686-unknown-linux-gnu/build
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
mkdir -p stage
|
||||
cp ../../ubuntu-install-packages ./stage/
|
||||
docker build -t burntsushi/cross:i686-unknown-linux-gnu .
|
||||
4
ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile
Normal file
4
ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM rustembedded/cross:mips64-unknown-linux-gnuabi64
|
||||
|
||||
COPY stage/ubuntu-install-packages /
|
||||
RUN /ubuntu-install-packages
|
||||
5
ci/docker/mips64-unknown-linux-gnuabi64/build
Executable file
5
ci/docker/mips64-unknown-linux-gnuabi64/build
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
mkdir -p stage
|
||||
cp ../../ubuntu-install-packages ./stage/
|
||||
docker build -t burntsushi/cross:mips64-unknown-linux-gnuabi64 .
|
||||
4
ci/docker/x86_64-unknown-linux-musl/Dockerfile
Normal file
4
ci/docker/x86_64-unknown-linux-musl/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM rustembedded/cross:x86_64-unknown-linux-musl
|
||||
|
||||
COPY stage/ubuntu-install-packages /
|
||||
RUN /ubuntu-install-packages
|
||||
5
ci/docker/x86_64-unknown-linux-musl/build
Executable file
5
ci/docker/x86_64-unknown-linux-musl/build
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
mkdir -p stage
|
||||
cp ../../ubuntu-install-packages ./stage/
|
||||
docker build -t burntsushi/cross:x86_64-unknown-linux-musl .
|
||||
@@ -1,60 +0,0 @@
|
||||
# `install` phase: install stuff needed for the `script` phase
|
||||
|
||||
set -ex
|
||||
|
||||
. $(dirname $0)/utils.sh
|
||||
|
||||
install_c_toolchain() {
|
||||
case $TARGET in
|
||||
aarch64-unknown-linux-gnu)
|
||||
sudo apt-get install -y --no-install-recommends \
|
||||
gcc-aarch64-linux-gnu libc6-arm64-cross libc6-dev-arm64-cross
|
||||
;;
|
||||
*)
|
||||
# For other targets, this is handled by addons.apt.packages in .travis.yml
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
install_rustup() {
|
||||
# uninstall the rust toolchain installed by travis, we are going to use rustup
|
||||
sh ~/rust/lib/rustlib/uninstall.sh
|
||||
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain=$TRAVIS_RUST_VERSION
|
||||
|
||||
rustc -V
|
||||
cargo -V
|
||||
}
|
||||
|
||||
install_standard_crates() {
|
||||
if [ $(host) != "$TARGET" ]; then
|
||||
rustup target add $TARGET
|
||||
fi
|
||||
}
|
||||
|
||||
configure_cargo() {
|
||||
local prefix=$(gcc_prefix)
|
||||
|
||||
if [ ! -z $prefix ]; then
|
||||
# information about the cross compiler
|
||||
${prefix}gcc -v
|
||||
|
||||
# tell cargo which linker to use for cross compilation
|
||||
mkdir -p .cargo
|
||||
cat >>.cargo/config <<EOF
|
||||
[target.$TARGET]
|
||||
linker = "${prefix}gcc"
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
install_c_toolchain
|
||||
install_rustup
|
||||
install_standard_crates
|
||||
configure_cargo
|
||||
|
||||
# TODO if you need to install extra stuff add it here
|
||||
}
|
||||
|
||||
main
|
||||
3
ci/macos-install-packages
Executable file
3
ci/macos-install-packages
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
brew install asciidoc docbook-xsl
|
||||
40
ci/script.sh
40
ci/script.sh
@@ -1,40 +0,0 @@
|
||||
# `script` phase: you usually build, test and generate docs in this phase
|
||||
|
||||
set -ex
|
||||
|
||||
. $(dirname $0)/utils.sh
|
||||
|
||||
# NOTE Workaround for rust-lang/rust#31907 - disable doc tests when cross compiling
|
||||
# This has been fixed in the nightly channel but it would take a while to reach the other channels
|
||||
disable_cross_doctests() {
|
||||
if [ $(host) != "$TARGET" ] && [ "$TRAVIS_RUST_VERSION" = "stable" ]; then
|
||||
if [ "$TRAVIS_OS_NAME" = "osx" ]; then
|
||||
brew install gnu-sed --default-names
|
||||
fi
|
||||
find src -name '*.rs' -type f | xargs sed -i -e 's:\(//.\s*```\):\1 ignore,:g'
|
||||
fi
|
||||
}
|
||||
|
||||
run_test_suite() {
|
||||
cargo clean --target $TARGET --verbose
|
||||
cargo build --target $TARGET --verbose
|
||||
cargo test --target $TARGET --verbose
|
||||
cargo build --target $TARGET --verbose --manifest-path grep/Cargo.toml
|
||||
cargo test --target $TARGET --verbose --manifest-path grep/Cargo.toml
|
||||
cargo build --target $TARGET --verbose --manifest-path globset/Cargo.toml
|
||||
cargo test --target $TARGET --verbose --manifest-path globset/Cargo.toml
|
||||
cargo build --target $TARGET --verbose --manifest-path ignore/Cargo.toml
|
||||
cargo test --target $TARGET --verbose --manifest-path ignore/Cargo.toml
|
||||
cargo build --target $TARGET --verbose --manifest-path termcolor/Cargo.toml
|
||||
cargo test --target $TARGET --verbose --manifest-path termcolor/Cargo.toml
|
||||
|
||||
# sanity check the file type
|
||||
file target/$TARGET/debug/rg
|
||||
}
|
||||
|
||||
main() {
|
||||
# disable_cross_doctests
|
||||
run_test_suite
|
||||
}
|
||||
|
||||
main
|
||||
0
ci/sha256.sh → ci/sha256-releases
Normal file → Executable file
0
ci/sha256.sh → ci/sha256-releases
Normal file → Executable file
97
ci/test-complete
Executable file
97
ci/test-complete
Executable file
@@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env zsh
|
||||
|
||||
emulate zsh -o extended_glob -o no_function_argzero -o no_unset
|
||||
|
||||
##
|
||||
# Compares options in `rg --help` output to options in zsh completion function
|
||||
|
||||
get_comp_args() {
|
||||
# Technically there are many options that the completion system sets that
|
||||
# our function may rely on, but we'll trust that we've got it mostly right
|
||||
setopt local_options unset
|
||||
|
||||
# Our completion function recognises a special variable which tells it to
|
||||
# dump the _arguments specs and then just return. But do this in a sub-shell
|
||||
# anyway to avoid any weirdness
|
||||
( _RG_COMPLETE_LIST_ARGS=1 source $1 )
|
||||
}
|
||||
|
||||
main() {
|
||||
local diff
|
||||
local rg="${0:a:h}/../${TARGET_DIR:-target}/release/rg"
|
||||
local _rg="${0:a:h}/../complete/_rg"
|
||||
local -a help_args comp_args
|
||||
|
||||
[[ -e $rg ]] || rg=${rg/%\/release\/rg/\/debug\/rg}
|
||||
|
||||
rg=${rg:a}
|
||||
_rg=${_rg:a}
|
||||
|
||||
[[ -e $rg ]] || {
|
||||
print -r >&2 "File not found: $rg"
|
||||
return 1
|
||||
}
|
||||
[[ -e $_rg ]] || {
|
||||
print -r >&2 "File not found: $_rg"
|
||||
return 1
|
||||
}
|
||||
|
||||
print -rl - 'Comparing options:' "-$rg" "+$_rg"
|
||||
|
||||
# 'Parse' options out of the `--help` output. To prevent false positives we
|
||||
# only look at lines where the first non-white-space character is `-`, or
|
||||
# where a long option starting with certain letters (see `_rg`) is found.
|
||||
# Occasionally we may have to handle some manually, however
|
||||
help_args=( ${(f)"$(
|
||||
$rg --help |
|
||||
$rg -i -- '^\s+--?[a-z0-9]|--[a-z]' |
|
||||
$rg -ior '$1' -- $'[\t /\"\'`.,](-[a-z0-9]|--[a-z0-9-]+)\\b' |
|
||||
$rg -v -- --print0 | # False positives
|
||||
sort -u
|
||||
)"} )
|
||||
|
||||
# 'Parse' options out of the completion function
|
||||
comp_args=( ${(f)"$( get_comp_args $_rg )"} )
|
||||
|
||||
# Note that we currently exclude hidden (!...) options; matching these
|
||||
# properly against the `--help` output could be irritating
|
||||
comp_args=( ${comp_args#\(*\)} ) # Strip excluded options
|
||||
comp_args=( ${comp_args#\*} ) # Strip repetition indicator
|
||||
comp_args=( ${comp_args%%-[:[]*} ) # Strip everything after -optname-
|
||||
comp_args=( ${comp_args%%[:+=[]*} ) # Strip everything after other optspecs
|
||||
comp_args=( ${comp_args##[^-]*} ) # Remove non-options
|
||||
comp_args=( ${(f)"$( print -rl - $comp_args | sort -u )"} )
|
||||
|
||||
(( $#help_args )) || {
|
||||
print -r >&2 'Failed to get help_args'
|
||||
return 1
|
||||
}
|
||||
(( $#comp_args )) || {
|
||||
print -r >&2 'Failed to get comp_args'
|
||||
return 1
|
||||
}
|
||||
|
||||
diff="$(
|
||||
if diff --help 2>&1 | grep -qF -- '--label'; then
|
||||
diff -U2 \
|
||||
--label '`rg --help`' \
|
||||
--label '`_rg`' \
|
||||
=( print -rl - $help_args ) =( print -rl - $comp_args )
|
||||
else
|
||||
diff -U2 \
|
||||
-L '`rg --help`' \
|
||||
-L '`_rg`' \
|
||||
=( print -rl - $help_args ) =( print -rl - $comp_args )
|
||||
fi
|
||||
)"
|
||||
|
||||
(( $#diff )) && {
|
||||
printf >&2 '%s\n' 'zsh completion options differ from `--help` options:'
|
||||
printf >&2 '%s\n' $diff
|
||||
return 1
|
||||
}
|
||||
printf 'OK\n'
|
||||
return 0
|
||||
}
|
||||
|
||||
main "$@"
|
||||
6
ci/ubuntu-install-packages
Executable file
6
ci/ubuntu-install-packages
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y --no-install-recommends \
|
||||
libxslt1-dev asciidoc docbook-xsl xsltproc libxml2-utils \
|
||||
zsh xz-utils liblz4-tool musl-tools
|
||||
113
ci/utils.sh
113
ci/utils.sh
@@ -1,5 +1,19 @@
|
||||
mktempd() {
|
||||
echo $(mktemp -d 2>/dev/null || mktemp -d -t tmp)
|
||||
#!/bin/bash
|
||||
|
||||
# Various utility functions used through CI.
|
||||
|
||||
# Finds Cargo's `OUT_DIR` directory from the most recent build.
|
||||
#
|
||||
# This requires one parameter corresponding to the target directory
|
||||
# to search for the build output.
|
||||
cargo_out_dir() {
|
||||
# This works by finding the most recent stamp file, which is produced by
|
||||
# every ripgrep build.
|
||||
target_dir="$1"
|
||||
find "$target_dir" -name ripgrep-stamp -print0 \
|
||||
| xargs -0 ls -t \
|
||||
| head -n1 \
|
||||
| xargs dirname
|
||||
}
|
||||
|
||||
host() {
|
||||
@@ -13,37 +27,12 @@ host() {
|
||||
esac
|
||||
}
|
||||
|
||||
gcc_prefix() {
|
||||
case "$TARGET" in
|
||||
aarch64-unknown-linux-gnu)
|
||||
echo aarch64-linux-gnu-
|
||||
;;
|
||||
arm*-gnueabihf)
|
||||
echo arm-linux-gnueabihf-
|
||||
;;
|
||||
*)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
dobin() {
|
||||
[ -z $MAKE_DEB ] && die 'dobin: $MAKE_DEB not set'
|
||||
[ $# -lt 1 ] && die "dobin: at least one argument needed"
|
||||
|
||||
local f prefix=$(gcc_prefix)
|
||||
for f in "$@"; do
|
||||
install -m0755 $f $dtd/debian/usr/bin/
|
||||
${prefix}strip -s $dtd/debian/usr/bin/$(basename $f)
|
||||
done
|
||||
}
|
||||
|
||||
architecture() {
|
||||
case $1 in
|
||||
x86_64-unknown-linux-gnu|x86_64-unknown-linux-musl)
|
||||
case "$TARGET" in
|
||||
x86_64-*)
|
||||
echo amd64
|
||||
;;
|
||||
i686-unknown-linux-gnu|i686-unknown-linux-musl)
|
||||
i686-*|i586-*|i386-*)
|
||||
echo i386
|
||||
;;
|
||||
arm*-unknown-linux-gnueabihf)
|
||||
@@ -54,3 +43,67 @@ architecture() {
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
gcc_prefix() {
|
||||
case "$(architecture)" in
|
||||
armhf)
|
||||
echo arm-linux-gnueabihf-
|
||||
;;
|
||||
*)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_musl() {
|
||||
case "$TARGET" in
|
||||
*-musl) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_x86() {
|
||||
case "$(architecture)" in
|
||||
amd64|i386) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_x86_64() {
|
||||
case "$(architecture)" in
|
||||
amd64) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_arm() {
|
||||
case "$(architecture)" in
|
||||
armhf) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_linux() {
|
||||
case "$TRAVIS_OS_NAME" in
|
||||
linux) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_osx() {
|
||||
case "$TRAVIS_OS_NAME" in
|
||||
osx) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
builder() {
|
||||
if is_musl && is_x86_64; then
|
||||
# cargo install cross
|
||||
# To work around https://github.com/rust-embedded/cross/issues/357
|
||||
cargo install --git https://github.com/rust-embedded/cross --force
|
||||
echo "cross"
|
||||
else
|
||||
echo "cargo"
|
||||
fi
|
||||
}
|
||||
|
||||
8
compile
8
compile
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# export RUSTFLAGS="-C target-feature=+ssse3"
|
||||
# cargo build --release --features 'simd-accel'
|
||||
|
||||
export RUSTFLAGS="-C target-cpu=native"
|
||||
cargo build --release --features 'simd-accel avx-accel'
|
||||
# cargo build --release --features 'simd-accel avx-accel' --target x86_64-unknown-linux-musl
|
||||
638
complete/_rg
Normal file
638
complete/_rg
Normal file
@@ -0,0 +1,638 @@
|
||||
#compdef rg
|
||||
|
||||
##
|
||||
# zsh completion function for ripgrep
|
||||
#
|
||||
# Run ci/test-complete after building to ensure that the options supported by
|
||||
# this function stay in synch with the `rg` binary.
|
||||
#
|
||||
# For convenience, a completion reference guide is included at the bottom of
|
||||
# this file.
|
||||
#
|
||||
# Originally based on code from the zsh-users project — see copyright notice
|
||||
# below.
|
||||
|
||||
_rg() {
|
||||
local curcontext=$curcontext no='!' descr ret=1
|
||||
local -a context line state state_descr args tmp suf
|
||||
local -A opt_args
|
||||
|
||||
# ripgrep has many options which negate the effect of a more common one — for
|
||||
# example, `--no-column` to negate `--column`, and `--messages` to negate
|
||||
# `--no-messages`. There are so many of these, and they're so infrequently
|
||||
# used, that some users will probably find it irritating if they're completed
|
||||
# indiscriminately, so let's not do that unless either the current prefix
|
||||
# matches one of those negation options or the user has the `complete-all`
|
||||
# style set. Note that this prefix check has to be updated manually to account
|
||||
# for all of the potential negation options listed below!
|
||||
if
|
||||
# We also want to list all of these options during testing
|
||||
[[ $_RG_COMPLETE_LIST_ARGS == (1|t*|y*) ]] ||
|
||||
# (--[imnp]* => --ignore*, --messages, --no-*, --pcre2-unicode)
|
||||
[[ $PREFIX$SUFFIX == --[imnp]* ]] ||
|
||||
zstyle -t ":complete:$curcontext:*" complete-all
|
||||
then
|
||||
no=
|
||||
fi
|
||||
|
||||
# We make heavy use of argument groups here to prevent the option specs from
|
||||
# growing unwieldy. These aren't supported in zsh <5.4, though, so we'll strip
|
||||
# them out below if necessary. This makes the exclusions inaccurate on those
|
||||
# older versions, but oh well — it's not that big a deal
|
||||
args=(
|
||||
+ '(exclusive)' # Misc. fully exclusive options
|
||||
'(: * -)'{-h,--help}'[display help information]'
|
||||
'(: * -)'{-V,--version}'[display version information]'
|
||||
'(: * -)'--pcre2-version'[print the version of PCRE2 used by ripgrep, if available]'
|
||||
|
||||
+ '(buffered)' # buffering options
|
||||
'--line-buffered[force line buffering]'
|
||||
$no"--no-line-buffered[don't force line buffering]"
|
||||
'--block-buffered[force block buffering]'
|
||||
$no"--no-block-buffered[don't force block buffering]"
|
||||
|
||||
+ '(case)' # Case-sensitivity options
|
||||
{-i,--ignore-case}'[search case-insensitively]'
|
||||
{-s,--case-sensitive}'[search case-sensitively]'
|
||||
{-S,--smart-case}'[search case-insensitively if pattern is all lowercase]'
|
||||
|
||||
+ '(context-a)' # Context (after) options
|
||||
'(context-c)'{-A+,--after-context=}'[specify lines to show after each match]:number of lines'
|
||||
|
||||
+ '(context-b)' # Context (before) options
|
||||
'(context-c)'{-B+,--before-context=}'[specify lines to show before each match]:number of lines'
|
||||
|
||||
+ '(context-c)' # Context (combined) options
|
||||
'(context-a context-b)'{-C+,--context=}'[specify lines to show before and after each match]:number of lines'
|
||||
|
||||
+ '(column)' # Column options
|
||||
'--column[show column numbers for matches]'
|
||||
$no"--no-column[don't show column numbers for matches]"
|
||||
|
||||
+ '(count)' # Counting options
|
||||
{-c,--count}'[only show count of matching lines for each file]'
|
||||
'--count-matches[only show count of individual matches for each file]'
|
||||
'--include-zero[include files with zero matches in summary]'
|
||||
|
||||
+ '(encoding)' # Encoding options
|
||||
{-E+,--encoding=}'[specify text encoding of files to search]: :_rg_encodings'
|
||||
$no'--no-encoding[use default text encoding]'
|
||||
|
||||
+ '(engine)' # Engine choice options
|
||||
'--engine=[select which regex engine to use]:when:((
|
||||
default\:"use default engine"
|
||||
pcre2\:"identical to --pcre2"
|
||||
auto\:"identical to --auto-hybrid-regex"
|
||||
))'
|
||||
|
||||
+ file # File-input options
|
||||
'(1)*'{-f+,--file=}'[specify file containing patterns to search for]: :_files'
|
||||
|
||||
+ '(file-match)' # Files with/without match options
|
||||
'(stats)'{-l,--files-with-matches}'[only show names of files with matches]'
|
||||
'(stats)--files-without-match[only show names of files without matches]'
|
||||
|
||||
+ '(file-name)' # File-name options
|
||||
{-H,--with-filename}'[show file name for matches]'
|
||||
{-I,--no-filename}"[don't show file name for matches]"
|
||||
|
||||
+ '(file-system)' # File system options
|
||||
"--one-file-system[don't descend into directories on other file systems]"
|
||||
$no'--no-one-file-system[descend into directories on other file systems]'
|
||||
|
||||
+ '(fixed)' # Fixed-string options
|
||||
{-F,--fixed-strings}'[treat pattern as literal string instead of regular expression]'
|
||||
$no"--no-fixed-strings[don't treat pattern as literal string]"
|
||||
|
||||
+ '(follow)' # Symlink-following options
|
||||
{-L,--follow}'[follow symlinks]'
|
||||
$no"--no-follow[don't follow symlinks]"
|
||||
|
||||
+ glob # File-glob options
|
||||
'*'{-g+,--glob=}'[include/exclude files matching specified glob]:glob'
|
||||
'*--iglob=[include/exclude files matching specified case-insensitive glob]:glob'
|
||||
|
||||
+ '(glob-case-insensitive)' # File-glob case sensitivity options
|
||||
'--glob-case-insensitive[treat -g/--glob patterns case insensitively]'
|
||||
$no'--no-glob-case-insensitive[treat -g/--glob patterns case sensitively]'
|
||||
|
||||
+ '(heading)' # Heading options
|
||||
'(pretty-vimgrep)--heading[show matches grouped by file name]'
|
||||
"(pretty-vimgrep)--no-heading[don't show matches grouped by file name]"
|
||||
|
||||
+ '(hidden)' # Hidden-file options
|
||||
'--hidden[search hidden files and directories]'
|
||||
$no"--no-hidden[don't search hidden files and directories]"
|
||||
|
||||
+ '(hybrid)' # hybrid regex options
|
||||
'--auto-hybrid-regex[dynamically use PCRE2 if necessary]'
|
||||
$no"--no-auto-hybrid-regex[don't dynamically use PCRE2 if necessary]"
|
||||
|
||||
+ '(ignore)' # Ignore-file options
|
||||
"(--no-ignore-global --no-ignore-parent --no-ignore-vcs --no-ignore-dot)--no-ignore[don't respect ignore files]"
|
||||
$no'(--ignore-global --ignore-parent --ignore-vcs --ignore-dot)--ignore[respect ignore files]'
|
||||
|
||||
+ '(ignore-file-case-insensitive)' # Ignore-file case sensitivity options
|
||||
'--ignore-file-case-insensitive[process ignore files case insensitively]'
|
||||
$no'--no-ignore-file-case-insensitive[process ignore files case sensitively]'
|
||||
|
||||
+ '(ignore-exclude)' # Local exclude (ignore)-file options
|
||||
"--no-ignore-exclude[don't respect local exclude (ignore) files]"
|
||||
$no'--ignore-exclude[respect local exclude (ignore) files]'
|
||||
|
||||
+ '(ignore-global)' # Global ignore-file options
|
||||
"--no-ignore-global[don't respect global ignore files]"
|
||||
$no'--ignore-global[respect global ignore files]'
|
||||
|
||||
+ '(ignore-parent)' # Parent ignore-file options
|
||||
"--no-ignore-parent[don't respect ignore files in parent directories]"
|
||||
$no'--ignore-parent[respect ignore files in parent directories]'
|
||||
|
||||
+ '(ignore-vcs)' # VCS ignore-file options
|
||||
"--no-ignore-vcs[don't respect version control ignore files]"
|
||||
$no'--ignore-vcs[respect version control ignore files]'
|
||||
|
||||
+ '(require-git)' # git specific settings
|
||||
"--no-require-git[don't require git repository to respect gitignore rules]"
|
||||
$no'--require-git[require git repository to respect gitignore rules]'
|
||||
|
||||
+ '(ignore-dot)' # .ignore options
|
||||
"--no-ignore-dot[don't respect .ignore files]"
|
||||
$no'--ignore-dot[respect .ignore files]'
|
||||
|
||||
+ '(ignore-files)' # custom global ignore file options
|
||||
"--no-ignore-files[don't respect --ignore-file flags]"
|
||||
$no'--ignore-files[respect --ignore-file files]'
|
||||
|
||||
+ '(json)' # JSON options
|
||||
'--json[output results in JSON Lines format]'
|
||||
$no"--no-json[don't output results in JSON Lines format]"
|
||||
|
||||
+ '(line-number)' # Line-number options
|
||||
{-n,--line-number}'[show line numbers for matches]'
|
||||
{-N,--no-line-number}"[don't show line numbers for matches]"
|
||||
|
||||
+ '(line-terminator)' # Line-terminator options
|
||||
'--crlf[use CRLF as line terminator]'
|
||||
$no"--no-crlf[don't use CRLF as line terminator]"
|
||||
'(text)--null-data[use NUL as line terminator]'
|
||||
|
||||
+ '(max-columns-preview)' # max column preview options
|
||||
'--max-columns-preview[show preview for long lines (with -M)]'
|
||||
$no"--no-max-columns-preview[don't show preview for long lines (with -M)]"
|
||||
|
||||
+ '(max-depth)' # Directory-depth options
|
||||
'--max-depth=[specify max number of directories to descend]:number of directories'
|
||||
'!--maxdepth=:number of directories'
|
||||
|
||||
+ '(messages)' # Error-message options
|
||||
'(--no-ignore-messages)--no-messages[suppress some error messages]'
|
||||
$no"--messages[don't suppress error messages affected by --no-messages]"
|
||||
|
||||
+ '(messages-ignore)' # Ignore-error message options
|
||||
"--no-ignore-messages[don't show ignore-file parse error messages]"
|
||||
$no'--ignore-messages[show ignore-file parse error messages]'
|
||||
|
||||
+ '(mmap)' # mmap options
|
||||
'--mmap[search using memory maps when possible]'
|
||||
"--no-mmap[don't search using memory maps]"
|
||||
|
||||
+ '(multiline)' # Multiline options
|
||||
{-U,--multiline}'[permit matching across multiple lines]'
|
||||
$no'(multiline-dotall)--no-multiline[restrict matches to at most one line each]'
|
||||
|
||||
+ '(multiline-dotall)' # Multiline DOTALL options
|
||||
'(--no-multiline)--multiline-dotall[allow "." to match newline (with -U)]'
|
||||
$no"(--no-multiline)--no-multiline-dotall[don't allow \".\" to match newline (with -U)]"
|
||||
|
||||
+ '(only)' # Only-match options
|
||||
{-o,--only-matching}'[show only matching part of each line]'
|
||||
|
||||
+ '(passthru)' # Pass-through options
|
||||
'(--vimgrep)--passthru[show both matching and non-matching lines]'
|
||||
'!(--vimgrep)--passthrough'
|
||||
|
||||
+ '(pcre2)' # PCRE2 options
|
||||
{-P,--pcre2}'[enable matching with PCRE2]'
|
||||
$no'(pcre2-unicode)--no-pcre2[disable matching with PCRE2]'
|
||||
|
||||
+ '(pcre2-unicode)' # PCRE2 Unicode options
|
||||
$no'(--no-pcre2 --no-pcre2-unicode)--pcre2-unicode[enable PCRE2 Unicode mode (with -P)]'
|
||||
'(--no-pcre2 --pcre2-unicode)--no-pcre2-unicode[disable PCRE2 Unicode mode (with -P)]'
|
||||
|
||||
+ '(pre)' # Preprocessing options
|
||||
'(-z --search-zip)--pre=[specify preprocessor utility]:preprocessor utility:_command_names -e'
|
||||
$no'--no-pre[disable preprocessor utility]'
|
||||
|
||||
+ pre-glob # Preprocessing glob options
|
||||
'*--pre-glob[include/exclude files for preprocessing with --pre]'
|
||||
|
||||
+ '(pretty-vimgrep)' # Pretty/vimgrep display options
|
||||
'(heading)'{-p,--pretty}'[alias for --color=always --heading -n]'
|
||||
'(heading passthru)--vimgrep[show results in vim-compatible format]'
|
||||
|
||||
+ regexp # Explicit pattern options
|
||||
'(1 file)*'{-e+,--regexp=}'[specify pattern]:pattern'
|
||||
|
||||
+ '(replace)' # Replacement options
|
||||
{-r+,--replace=}'[specify string used to replace matches]:replace string'
|
||||
|
||||
+ '(sort)' # File-sorting options
|
||||
'(threads)--sort=[sort results in ascending order (disables parallelism)]:sort method:((
|
||||
none\:"no sorting"
|
||||
path\:"sort by file path"
|
||||
modified\:"sort by last modified time"
|
||||
accessed\:"sort by last accessed time"
|
||||
created\:"sort by creation time"
|
||||
))'
|
||||
'(threads)--sortr=[sort results in descending order (disables parallelism)]:sort method:((
|
||||
none\:"no sorting"
|
||||
path\:"sort by file path"
|
||||
modified\:"sort by last modified time"
|
||||
accessed\:"sort by last accessed time"
|
||||
created\:"sort by creation time"
|
||||
))'
|
||||
'!(threads)--sort-files[sort results by file path (disables parallelism)]'
|
||||
|
||||
+ '(stats)' # Statistics options
|
||||
'(--files file-match)--stats[show search statistics]'
|
||||
$no"--no-stats[don't show search statistics]"
|
||||
|
||||
+ '(text)' # Binary-search options
|
||||
{-a,--text}'[search binary files as if they were text]'
|
||||
"--binary[search binary files, don't print binary data]"
|
||||
$no"--no-binary[don't search binary files]"
|
||||
$no"(--null-data)--no-text[don't search binary files as if they were text]"
|
||||
|
||||
+ '(threads)' # Thread-count options
|
||||
'(sort)'{-j+,--threads=}'[specify approximate number of threads to use]:number of threads'
|
||||
|
||||
+ '(trim)' # Trim options
|
||||
'--trim[trim any ASCII whitespace prefix from each line]'
|
||||
$no"--no-trim[don't trim ASCII whitespace prefix from each line]"
|
||||
|
||||
+ type # Type options
|
||||
'*'{-t+,--type=}'[only search files matching specified type]: :_rg_types'
|
||||
'*--type-add=[add new glob for specified file type]: :->typespec'
|
||||
'*--type-clear=[clear globs previously defined for specified file type]: :_rg_types'
|
||||
# This should actually be exclusive with everything but other type options
|
||||
'(: *)--type-list[show all supported file types and their associated globs]'
|
||||
'*'{-T+,--type-not=}"[don't search files matching specified file type]: :_rg_types"
|
||||
|
||||
+ '(word-line)' # Whole-word/line match options
|
||||
{-w,--word-regexp}'[only show matches surrounded by word boundaries]'
|
||||
{-x,--line-regexp}'[only show matches surrounded by line boundaries]'
|
||||
|
||||
+ '(unicode)' # Unicode options
|
||||
$no'--unicode[enable Unicode mode]'
|
||||
'--no-unicode[disable Unicode mode]'
|
||||
|
||||
+ '(zip)' # Compression options
|
||||
'(--pre)'{-z,--search-zip}'[search in compressed files]'
|
||||
$no"--no-search-zip[don't search in compressed files]"
|
||||
|
||||
+ misc # Other options — no need to separate these at the moment
|
||||
'(-b --byte-offset)'{-b,--byte-offset}'[show 0-based byte offset for each matching line]'
|
||||
'--color=[specify when to use colors in output]:when:((
|
||||
never\:"never use colors"
|
||||
auto\:"use colors or not based on stdout, TERM, etc."
|
||||
always\:"always use colors"
|
||||
ansi\:"always use ANSI colors (even on Windows)"
|
||||
))'
|
||||
'*--colors=[specify color and style settings]: :->colorspec'
|
||||
'--context-separator=[specify string used to separate non-continuous context lines in output]:separator'
|
||||
$no"--no-context-separator[don't print context separators]"
|
||||
'--debug[show debug messages]'
|
||||
'--trace[show more verbose debug messages]'
|
||||
'--dfa-size-limit=[specify upper size limit of generated DFA]:DFA size (bytes)'
|
||||
"(1 stats)--files[show each file that would be searched (but don't search)]"
|
||||
'*--ignore-file=[specify additional ignore file]:ignore file:_files'
|
||||
'(-v --invert-match)'{-v,--invert-match}'[invert matching]'
|
||||
'(-M --max-columns)'{-M+,--max-columns=}'[specify max length of lines to print]:number of bytes'
|
||||
'(-m --max-count)'{-m+,--max-count=}'[specify max number of matches per file]:number of matches'
|
||||
'--max-filesize=[specify size above which files should be ignored]:file size (bytes)'
|
||||
"--no-config[don't load configuration files]"
|
||||
'(-0 --null)'{-0,--null}'[print NUL byte after file names]'
|
||||
'--path-separator=[specify path separator to use when printing file names]:separator'
|
||||
'(-q --quiet)'{-q,--quiet}'[suppress normal output]'
|
||||
'--regex-size-limit=[specify upper size limit of compiled regex]:regex size (bytes)'
|
||||
'*'{-u,--unrestricted}'[reduce level of "smart" searching]'
|
||||
|
||||
+ operand # Operands
|
||||
'(--files --type-list file regexp)1: :_guard "^-*" pattern'
|
||||
'(--type-list)*: :_files'
|
||||
)
|
||||
|
||||
# This is used with test-complete to verify that there are no options
|
||||
# listed in the help output that aren't also defined here
|
||||
[[ $_RG_COMPLETE_LIST_ARGS == (1|t*|y*) ]] && {
|
||||
print -rl - $args
|
||||
return 0
|
||||
}
|
||||
|
||||
# Strip out argument groups where unsupported (see above)
|
||||
[[ $ZSH_VERSION == (4|5.<0-3>)(.*)# ]] &&
|
||||
args=( ${(@)args:#(#i)(+|[a-z0-9][a-z0-9_-]#|\([a-z0-9][a-z0-9_-]#\))} )
|
||||
|
||||
_arguments -C -s -S : $args && ret=0
|
||||
|
||||
case $state in
|
||||
colorspec)
|
||||
if [[ ${IPREFIX#--*=}$PREFIX == [^:]# ]]; then
|
||||
suf=( -qS: )
|
||||
tmp=(
|
||||
'column:specify coloring for column numbers'
|
||||
'line:specify coloring for line numbers'
|
||||
'match:specify coloring for match text'
|
||||
'path:specify coloring for file names'
|
||||
)
|
||||
descr='color/style type'
|
||||
elif [[ ${IPREFIX#--*=}$PREFIX == (column|line|match|path):[^:]# ]]; then
|
||||
suf=( -qS: )
|
||||
tmp=(
|
||||
'none:clear color/style for type'
|
||||
'bg:specify background color'
|
||||
'fg:specify foreground color'
|
||||
'style:specify text style'
|
||||
)
|
||||
descr='color/style attribute'
|
||||
elif [[ ${IPREFIX#--*=}$PREFIX == [^:]##:(bg|fg):[^:]# ]]; then
|
||||
tmp=( black blue green red cyan magenta yellow white )
|
||||
descr='color name or r,g,b'
|
||||
elif [[ ${IPREFIX#--*=}$PREFIX == [^:]##:style:[^:]# ]]; then
|
||||
tmp=( {,no}bold {,no}intense {,no}underline )
|
||||
descr='style name'
|
||||
else
|
||||
_message -e colorspec 'no more arguments'
|
||||
fi
|
||||
|
||||
(( $#tmp )) && {
|
||||
compset -P '*:'
|
||||
_describe -t colorspec $descr tmp $suf && ret=0
|
||||
}
|
||||
;;
|
||||
|
||||
typespec)
|
||||
if compset -P '[^:]##:include:'; then
|
||||
_sequence -s , _rg_types && ret=0
|
||||
# @todo This bit in particular could be better, but it's a little
|
||||
# complex, and attempting to solve it seems to run us up against a crash
|
||||
# bug — zsh # 40362
|
||||
elif compset -P '[^:]##:'; then
|
||||
_message 'glob or include directive' && ret=1
|
||||
elif [[ ! -prefix *:* ]]; then
|
||||
_rg_types -qS : && ret=0
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
# Complete encodings
|
||||
_rg_encodings() {
|
||||
local -a expl
|
||||
local -aU _encodings
|
||||
|
||||
# This is impossible to read, but these encodings rarely if ever change, so it
|
||||
# probably doesn't matter. They are derived from the list given here:
|
||||
# https://encoding.spec.whatwg.org/#concept-encoding-get
|
||||
_encodings=(
|
||||
{{,us-}ascii,arabic,chinese,cyrillic,greek{,8},hebrew,korean}
|
||||
logical visual mac {,cs}macintosh x-mac-{cyrillic,roman,ukrainian}
|
||||
866 ibm{819,866} csibm866
|
||||
big5{,-hkscs} {cn-,cs}big5 x-x-big5
|
||||
cp{819,866,125{0..8}} x-cp125{0..8}
|
||||
csiso2022{jp,kr} csiso8859{6,8}{e,i}
|
||||
csisolatin{{1..6},9} csisolatin{arabic,cyrillic,greek,hebrew}
|
||||
ecma-{114,118} asmo-708 elot_928 sun_eu_greek
|
||||
euc-{jp,kr} x-euc-jp cseuckr cseucpkdfmtjapanese
|
||||
{,x-}gbk csiso58gb231280 gb18030 {,cs}gb2312 gb_2312{,-80} hz-gb-2312
|
||||
iso-2022-{cn,cn-ext,jp,kr}
|
||||
iso8859{,-}{{1..11},13,14,15}
|
||||
iso-8859-{{1..11},{6,8}-{e,i},13,14,15,16} iso_8859-{{1..9},15}
|
||||
iso_8859-{1,2,6,7}:1987 iso_8859-{3,4,5,8}:1988 iso_8859-9:1989
|
||||
iso-ir-{58,100,101,109,110,126,127,138,144,148,149,157}
|
||||
koi{,8,8-r,8-ru,8-u,8_r} cskoi8r
|
||||
ks_c_5601-{1987,1989} ksc{,_}5691 csksc56011987
|
||||
latin{1..6} l{{1..6},9}
|
||||
shift{-,_}jis csshiftjis {,x-}sjis ms_kanji ms932
|
||||
utf{,-}8 utf-16{,be,le} unicode-1-1-utf-8
|
||||
windows-{31j,874,949,125{0..8}} dos-874 tis-620 ansi_x3.4-1968
|
||||
x-user-defined auto none
|
||||
)
|
||||
|
||||
_wanted encodings expl encoding compadd -a "$@" - _encodings
|
||||
}
|
||||
|
||||
# Complete file types
|
||||
_rg_types() {
|
||||
local -a expl
|
||||
local -aU _types
|
||||
|
||||
_types=( ${(@)${(f)"$( _call_program types rg --type-list )"}%%:*} )
|
||||
|
||||
_wanted types expl 'file type' compadd -a "$@" - _types
|
||||
}
|
||||
|
||||
_rg "$@"
|
||||
|
||||
################################################################################
|
||||
# ZSH COMPLETION REFERENCE
|
||||
#
|
||||
# For the convenience of developers who aren't especially familiar with zsh
|
||||
# completion functions, a brief reference guide follows. This is in no way
|
||||
# comprehensive; it covers just enough of the basic structure, syntax, and
|
||||
# conventions to help someone make simple changes like adding new options. For
|
||||
# more complete documentation regarding zsh completion functions, please see the
|
||||
# following:
|
||||
#
|
||||
# * http://zsh.sourceforge.net/Doc/Release/Completion-System.html
|
||||
# * https://github.com/zsh-users/zsh/blob/master/Etc/completion-style-guide
|
||||
#
|
||||
# OVERVIEW
|
||||
#
|
||||
# Most zsh completion functions are defined in terms of `_arguments`, which is a
|
||||
# shell function that takes a series of argument specifications. The specs for
|
||||
# `rg` are stored in an array, which is common for more complex functions; the
|
||||
# elements of the array are passed to `_arguments` on invocation.
|
||||
#
|
||||
# ARGUMENT-SPECIFICATION SYNTAX
|
||||
#
|
||||
# The following is a contrived example of the argument specs for a simple tool:
|
||||
#
|
||||
# '(: * -)'{-h,--help}'[display help information]'
|
||||
# '(-q -v --quiet --verbose)'{-q,--quiet}'[decrease output verbosity]'
|
||||
# '!(-q -v --quiet --verbose)--silent'
|
||||
# '(-q -v --quiet --verbose)'{-v,--verbose}'[increase output verbosity]'
|
||||
# '--color=[specify when to use colors]:when:(always never auto)'
|
||||
# '*:example file:_files'
|
||||
#
|
||||
# Although there may appear to be six specs here, there are actually nine; we
|
||||
# use brace expansion to combine specs for options that go by multiple names,
|
||||
# like `-q` and `--quiet`. This is customary, and ties in with the fact that zsh
|
||||
# merges completion possibilities together when they have the same description.
|
||||
#
|
||||
# The first line defines the option `-h`/`--help`. With most tools, it isn't
|
||||
# useful to complete anything after `--help` because it effectively overrides
|
||||
# all others; the `(: * -)` at the beginning of the spec tells zsh not to
|
||||
# complete any other operands (`:` and `*`) or options (`-`) after this one has
|
||||
# been used. The `[...]` at the end associates a description with `-h`/`--help`;
|
||||
# as mentioned, zsh will see the identical descriptions and merge these options
|
||||
# together when offering completion possibilities.
|
||||
#
|
||||
# The next line defines `-q`/`--quiet`. Here we don't want to suppress further
|
||||
# completions entirely, but we don't want to offer `-q` if `--quiet` has been
|
||||
# given (since they do the same thing), nor do we want to offer `-v` (since it
|
||||
# doesn't make sense to be quiet and verbose at the same time). We don't need to
|
||||
# tell zsh not to offer `--quiet` a second time, since that's the default
|
||||
# behaviour, but since this line expands to two specs describing `-q` *and*
|
||||
# `--quiet` we do need to explicitly list all of them here.
|
||||
#
|
||||
# The next line defines a hidden option `--silent` — maybe it's a deprecated
|
||||
# synonym for `--quiet`. The leading `!` indicates that zsh shouldn't offer this
|
||||
# option during completion. The benefit of providing a spec for an option that
|
||||
# shouldn't be completed is that, if someone *does* use it, we can correctly
|
||||
# suppress completion of other options afterwards.
|
||||
#
|
||||
# The next line defines `-v`/`--verbose`; this works just like `-q`/`--quiet`.
|
||||
#
|
||||
# The next line defines `--color`. In this example, `--color` doesn't have a
|
||||
# corresponding short option, so we don't need to use brace expansion. Further,
|
||||
# there are no other options it's exclusive with (just itself), so we don't need
|
||||
# to define those at the beginning. However, it does take a mandatory argument.
|
||||
# The `=` at the end of `--color=` indicates that the argument may appear either
|
||||
# like `--color always` or like `--color=always`; this is how most GNU-style
|
||||
# command-line tools work. The corresponding short option would normally use `+`
|
||||
# — for example, `-c+` would allow either `-c always` or `-calways`. For this
|
||||
# option, the arguments are known ahead of time, so we can simply list them in
|
||||
# parentheses at the end (`when` is used as the description for the argument).
|
||||
#
|
||||
# The last line defines an operand (a non-option argument). In this example, the
|
||||
# operand can be used any number of times (the leading `*`), and it should be a
|
||||
# file path, so we tell zsh to call the `_files` function to complete it. The
|
||||
# `example file` in the middle is the description to use for this operand; we
|
||||
# could use a space instead to accept the default provided by `_files`.
|
||||
#
|
||||
# GROUPING ARGUMENT SPECIFICATIONS
|
||||
#
|
||||
# Newer versions of zsh support grouping argument specs together. All specs
|
||||
# following a `+` and then a group name are considered to be members of the
|
||||
# named group. Grouping is useful mostly for organisational purposes; it makes
|
||||
# the relationship between different options more obvious, and makes it easier
|
||||
# to specify exclusions.
|
||||
#
|
||||
# We could rewrite our example above using grouping as follows:
|
||||
#
|
||||
# '(: * -)'{-h,--help}'[display help information]'
|
||||
# '--color=[specify when to use colors]:when:(always never auto)'
|
||||
# '*:example file:_files'
|
||||
# + '(verbosity)'
|
||||
# {-q,--quiet}'[decrease output verbosity]'
|
||||
# '!--silent'
|
||||
# {-v,--verbose}'[increase output verbosity]'
|
||||
#
|
||||
# Here we take advantage of a useful feature of spec grouping — when the group
|
||||
# name is surrounded by parentheses, as in `(verbosity)`, it tells zsh that all
|
||||
# of the options in that group are exclusive with each other. As a result, we
|
||||
# don't need to manually list out the exclusions at the beginning of each
|
||||
# option.
|
||||
#
|
||||
# Groups can also be referred to by name in other argument specs; for example:
|
||||
#
|
||||
# '(xyz)--aaa' '*: :_files'
|
||||
# + xyz --xxx --yyy --zzz
|
||||
#
|
||||
# Here we use the group name `xyz` to tell zsh that `--xxx`, `--yyy`, and
|
||||
# `--zzz` are not to be completed after `--aaa`. This makes the exclusion list
|
||||
# much more compact and reusable.
|
||||
#
|
||||
# CONVENTIONS
|
||||
#
|
||||
# zsh completion functions generally adhere to the following conventions:
|
||||
#
|
||||
# * Use two spaces for indentation
|
||||
# * Combine specs for options with different names using brace expansion
|
||||
# * In combined specs, list the short option first (as in `{-a,--text}`)
|
||||
# * Use `+` or `=` as described above for options that take arguments
|
||||
# * Provide a description for all options, option-arguments, and operands
|
||||
# * Capitalise/punctuate argument descriptions as phrases, not complete
|
||||
# sentences — 'display help information', never 'Display help information.'
|
||||
# (but still capitalise acronyms and proper names)
|
||||
# * Write argument descriptions as verb phrases — 'display x', 'enable y',
|
||||
# 'use z'
|
||||
# * Word descriptions to make it clear when an option expects an argument;
|
||||
# usually this is done with the word 'specify', as in 'specify x' or
|
||||
# 'use specified x')
|
||||
# * Write argument descriptions as tersely as possible — for example, articles
|
||||
# like 'a' and 'the' should be omitted unless it would be confusing
|
||||
#
|
||||
# Other conventions currently used by this function:
|
||||
#
|
||||
# * Order argument specs alphabetically by group name, then option name
|
||||
# * Group options that are directly related, mutually exclusive, or frequently
|
||||
# referenced by other argument specs
|
||||
# * Use only characters in the set [a-z0-9_-] in group names
|
||||
# * Order exclusion lists as follows: short options, long options, groups
|
||||
# * Use American English in descriptions
|
||||
# * Use 'don't' in descriptions instead of 'do not'
|
||||
# * Word descriptions for related options as similarly as possible. For example,
|
||||
# `--foo[enable foo]` and `--no-foo[disable foo]`, or `--foo[use foo]` and
|
||||
# `--no-foo[don't use foo]`
|
||||
# * Word descriptions to make it clear when an option only makes sense with
|
||||
# another option, usually by adding '(with -x)' to the end
|
||||
# * Don't quote strings or variables unnecessarily. When quotes are required,
|
||||
# prefer single-quotes to double-quotes
|
||||
# * Prefix option specs with `$no` when the option serves only to negate the
|
||||
# behaviour of another option that must be provided explicitly by the user.
|
||||
# This prevents rarely used options from cluttering up the completion menu
|
||||
################################################################################
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Copyright (c) 2011 Github zsh-users - http://github.com/zsh-users
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# * Neither the name of the zsh-users nor the
|
||||
# names of its contributors may be used to endorse or promote products
|
||||
# derived from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL ZSH-USERS BE LIABLE FOR ANY
|
||||
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
# ------------------------------------------------------------------------------
|
||||
# Description
|
||||
# -----------
|
||||
#
|
||||
# Completion script for ripgrep
|
||||
#
|
||||
# ------------------------------------------------------------------------------
|
||||
# Authors
|
||||
# -------
|
||||
#
|
||||
# * arcizan <ghostrevery@gmail.com>
|
||||
# * MaskRay <i@maskray.me>
|
||||
#
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Local Variables:
|
||||
# mode: shell-script
|
||||
# coding: utf-8-unix
|
||||
# indent-tabs-mode: nil
|
||||
# sh-indentation: 2
|
||||
# sh-basic-offset: 2
|
||||
# End:
|
||||
# vim: ft=zsh sw=2 ts=2 et
|
||||
26
crates/cli/Cargo.toml
Normal file
26
crates/cli/Cargo.toml
Normal file
@@ -0,0 +1,26 @@
|
||||
[package]
|
||||
name = "grep-cli"
|
||||
version = "0.1.4" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Utilities for search oriented command line applications.
|
||||
"""
|
||||
documentation = "https://docs.rs/grep-cli"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/cli"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/cli"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "grep", "cli", "utility", "util"]
|
||||
license = "Unlicense/MIT"
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2.11"
|
||||
bstr = "0.2.0"
|
||||
globset = { version = "0.4.3", path = "../globset" }
|
||||
lazy_static = "1.1.0"
|
||||
log = "0.4.5"
|
||||
regex = "1.1"
|
||||
same-file = "1.0.4"
|
||||
termcolor = "1.0.4"
|
||||
|
||||
[target.'cfg(windows)'.dependencies.winapi-util]
|
||||
version = "0.1.1"
|
||||
38
crates/cli/README.md
Normal file
38
crates/cli/README.md
Normal file
@@ -0,0 +1,38 @@
|
||||
grep-cli
|
||||
--------
|
||||
A utility library that provides common routines desired in search oriented
|
||||
command line applications. This includes, but is not limited to, parsing hex
|
||||
escapes, detecting whether stdin is readable and more. To the extent possible,
|
||||
this crate strives for compatibility across Windows, macOS and Linux.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://crates.io/crates/grep-cli)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
[https://docs.rs/grep-cli](https://docs.rs/grep-cli)
|
||||
|
||||
**NOTE:** You probably don't want to use this crate directly. Instead, you
|
||||
should prefer the facade defined in the
|
||||
[`grep`](https://docs.rs/grep)
|
||||
crate.
|
||||
|
||||
|
||||
### Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
grep-cli = "0.1"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
```rust
|
||||
extern crate grep_cli;
|
||||
```
|
||||
376
crates/cli/src/decompress.rs
Normal file
376
crates/cli/src/decompress.rs
Normal file
@@ -0,0 +1,376 @@
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
|
||||
use globset::{Glob, GlobSet, GlobSetBuilder};
|
||||
|
||||
use process::{CommandError, CommandReader, CommandReaderBuilder};
|
||||
|
||||
/// A builder for a matcher that determines which files get decompressed.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DecompressionMatcherBuilder {
|
||||
/// The commands for each matching glob.
|
||||
commands: Vec<DecompressionCommand>,
|
||||
/// Whether to include the default matching rules.
|
||||
defaults: bool,
|
||||
}
|
||||
|
||||
/// A representation of a single command for decompressing data
|
||||
/// out-of-proccess.
|
||||
#[derive(Clone, Debug)]
|
||||
struct DecompressionCommand {
|
||||
/// The glob that matches this command.
|
||||
glob: String,
|
||||
/// The command or binary name.
|
||||
bin: OsString,
|
||||
/// The arguments to invoke with the command.
|
||||
args: Vec<OsString>,
|
||||
}
|
||||
|
||||
impl Default for DecompressionMatcherBuilder {
|
||||
fn default() -> DecompressionMatcherBuilder {
|
||||
DecompressionMatcherBuilder::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl DecompressionMatcherBuilder {
|
||||
/// Create a new builder for configuring a decompression matcher.
|
||||
pub fn new() -> DecompressionMatcherBuilder {
|
||||
DecompressionMatcherBuilder { commands: vec![], defaults: true }
|
||||
}
|
||||
|
||||
/// Build a matcher for determining how to decompress files.
|
||||
///
|
||||
/// If there was a problem compiling the matcher, then an error is
|
||||
/// returned.
|
||||
pub fn build(&self) -> Result<DecompressionMatcher, CommandError> {
|
||||
let defaults = if !self.defaults {
|
||||
vec![]
|
||||
} else {
|
||||
default_decompression_commands()
|
||||
};
|
||||
let mut glob_builder = GlobSetBuilder::new();
|
||||
let mut commands = vec![];
|
||||
for decomp_cmd in defaults.iter().chain(&self.commands) {
|
||||
let glob = Glob::new(&decomp_cmd.glob).map_err(|err| {
|
||||
CommandError::io(io::Error::new(io::ErrorKind::Other, err))
|
||||
})?;
|
||||
glob_builder.add(glob);
|
||||
commands.push(decomp_cmd.clone());
|
||||
}
|
||||
let globs = glob_builder.build().map_err(|err| {
|
||||
CommandError::io(io::Error::new(io::ErrorKind::Other, err))
|
||||
})?;
|
||||
Ok(DecompressionMatcher { globs, commands })
|
||||
}
|
||||
|
||||
/// When enabled, the default matching rules will be compiled into this
|
||||
/// matcher before any other associations. When disabled, only the
|
||||
/// rules explicitly given to this builder will be used.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn defaults(&mut self, yes: bool) -> &mut DecompressionMatcherBuilder {
|
||||
self.defaults = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Associates a glob with a command to decompress files matching the glob.
|
||||
///
|
||||
/// If multiple globs match the same file, then the most recently added
|
||||
/// glob takes precedence.
|
||||
///
|
||||
/// The syntax for the glob is documented in the
|
||||
/// [`globset` crate](https://docs.rs/globset/#syntax).
|
||||
pub fn associate<P, I, A>(
|
||||
&mut self,
|
||||
glob: &str,
|
||||
program: P,
|
||||
args: I,
|
||||
) -> &mut DecompressionMatcherBuilder
|
||||
where
|
||||
P: AsRef<OsStr>,
|
||||
I: IntoIterator<Item = A>,
|
||||
A: AsRef<OsStr>,
|
||||
{
|
||||
let glob = glob.to_string();
|
||||
let bin = program.as_ref().to_os_string();
|
||||
let args =
|
||||
args.into_iter().map(|a| a.as_ref().to_os_string()).collect();
|
||||
self.commands.push(DecompressionCommand { glob, bin, args });
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A matcher for determining how to decompress files.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DecompressionMatcher {
|
||||
/// The set of globs to match. Each glob has a corresponding entry in
|
||||
/// `commands`. When a glob matches, the corresponding command should be
|
||||
/// used to perform out-of-process decompression.
|
||||
globs: GlobSet,
|
||||
/// The commands for each matching glob.
|
||||
commands: Vec<DecompressionCommand>,
|
||||
}
|
||||
|
||||
impl Default for DecompressionMatcher {
|
||||
fn default() -> DecompressionMatcher {
|
||||
DecompressionMatcher::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl DecompressionMatcher {
|
||||
/// Create a new matcher with default rules.
|
||||
///
|
||||
/// To add more matching rules, build a matcher with
|
||||
/// [`DecompressionMatcherBuilder`](struct.DecompressionMatcherBuilder.html).
|
||||
pub fn new() -> DecompressionMatcher {
|
||||
DecompressionMatcherBuilder::new()
|
||||
.build()
|
||||
.expect("built-in matching rules should always compile")
|
||||
}
|
||||
|
||||
/// Return a pre-built command based on the given file path that can
|
||||
/// decompress its contents. If no such decompressor is known, then this
|
||||
/// returns `None`.
|
||||
///
|
||||
/// If there are multiple possible commands matching the given path, then
|
||||
/// the command added last takes precedence.
|
||||
pub fn command<P: AsRef<Path>>(&self, path: P) -> Option<Command> {
|
||||
for i in self.globs.matches(path).into_iter().rev() {
|
||||
let decomp_cmd = &self.commands[i];
|
||||
let mut cmd = Command::new(&decomp_cmd.bin);
|
||||
cmd.args(&decomp_cmd.args);
|
||||
return Some(cmd);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given file path has at least one
|
||||
/// matching command to perform decompression on.
|
||||
pub fn has_command<P: AsRef<Path>>(&self, path: P) -> bool {
|
||||
self.globs.is_match(path)
|
||||
}
|
||||
}
|
||||
|
||||
/// Configures and builds a streaming reader for decompressing data.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct DecompressionReaderBuilder {
|
||||
matcher: DecompressionMatcher,
|
||||
command_builder: CommandReaderBuilder,
|
||||
}
|
||||
|
||||
impl DecompressionReaderBuilder {
|
||||
/// Create a new builder with the default configuration.
|
||||
pub fn new() -> DecompressionReaderBuilder {
|
||||
DecompressionReaderBuilder::default()
|
||||
}
|
||||
|
||||
/// Build a new streaming reader for decompressing data.
|
||||
///
|
||||
/// If decompression is done out-of-process and if there was a problem
|
||||
/// spawning the process, then its error is logged at the debug level and a
|
||||
/// passthru reader is returned that does no decompression. This behavior
|
||||
/// typically occurs when the given file path matches a decompression
|
||||
/// command, but is executing in an environment where the decompression
|
||||
/// command is not available.
|
||||
///
|
||||
/// If the given file path could not be matched with a decompression
|
||||
/// strategy, then a passthru reader is returned that does no
|
||||
/// decompression.
|
||||
pub fn build<P: AsRef<Path>>(
|
||||
&self,
|
||||
path: P,
|
||||
) -> Result<DecompressionReader, CommandError> {
|
||||
let path = path.as_ref();
|
||||
let mut cmd = match self.matcher.command(path) {
|
||||
None => return DecompressionReader::new_passthru(path),
|
||||
Some(cmd) => cmd,
|
||||
};
|
||||
cmd.arg(path);
|
||||
|
||||
match self.command_builder.build(&mut cmd) {
|
||||
Ok(cmd_reader) => Ok(DecompressionReader { rdr: Ok(cmd_reader) }),
|
||||
Err(err) => {
|
||||
debug!(
|
||||
"{}: error spawning command '{:?}': {} \
|
||||
(falling back to uncompressed reader)",
|
||||
path.display(),
|
||||
cmd,
|
||||
err,
|
||||
);
|
||||
DecompressionReader::new_passthru(path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the matcher to use to look up the decompression command for each
|
||||
/// file path.
|
||||
///
|
||||
/// A set of sensible rules is enabled by default. Setting this will
|
||||
/// completely replace the current rules.
|
||||
pub fn matcher(
|
||||
&mut self,
|
||||
matcher: DecompressionMatcher,
|
||||
) -> &mut DecompressionReaderBuilder {
|
||||
self.matcher = matcher;
|
||||
self
|
||||
}
|
||||
|
||||
/// Get the underlying matcher currently used by this builder.
|
||||
pub fn get_matcher(&self) -> &DecompressionMatcher {
|
||||
&self.matcher
|
||||
}
|
||||
|
||||
/// When enabled, the reader will asynchronously read the contents of the
|
||||
/// command's stderr output. When disabled, stderr is only read after the
|
||||
/// stdout stream has been exhausted (or if the process quits with an error
|
||||
/// code).
|
||||
///
|
||||
/// Note that when enabled, this may require launching an additional
|
||||
/// thread in order to read stderr. This is done so that the process being
|
||||
/// executed is never blocked from writing to stdout or stderr. If this is
|
||||
/// disabled, then it is possible for the process to fill up the stderr
|
||||
/// buffer and deadlock.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn async_stderr(
|
||||
&mut self,
|
||||
yes: bool,
|
||||
) -> &mut DecompressionReaderBuilder {
|
||||
self.command_builder.async_stderr(yes);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A streaming reader for decompressing the contents of a file.
|
||||
///
|
||||
/// The purpose of this reader is to provide a seamless way to decompress the
|
||||
/// contents of file using existing tools in the current environment. This is
|
||||
/// meant to be an alternative to using decompression libraries in favor of the
|
||||
/// simplicity and portability of using external commands such as `gzip` and
|
||||
/// `xz`. This does impose the overhead of spawning a process, so other means
|
||||
/// for performing decompression should be sought if this overhead isn't
|
||||
/// acceptable.
|
||||
///
|
||||
/// A decompression reader comes with a default set of matching rules that are
|
||||
/// meant to associate file paths with the corresponding command to use to
|
||||
/// decompress them. For example, a glob like `*.gz` matches gzip compressed
|
||||
/// files with the command `gzip -d -c`. If a file path does not match any
|
||||
/// existing rules, or if it matches a rule whose command does not exist in the
|
||||
/// current environment, then the decompression reader passes through the
|
||||
/// contents of the underlying file without doing any decompression.
|
||||
///
|
||||
/// The default matching rules are probably good enough for most cases, and if
|
||||
/// they require revision, pull requests are welcome. In cases where they must
|
||||
/// be changed or extended, they can be customized through the use of
|
||||
/// [`DecompressionMatcherBuilder`](struct.DecompressionMatcherBuilder.html)
|
||||
/// and
|
||||
/// [`DecompressionReaderBuilder`](struct.DecompressionReaderBuilder.html).
|
||||
///
|
||||
/// By default, this reader will asynchronously read the processes' stderr.
|
||||
/// This prevents subtle deadlocking bugs for noisy processes that write a lot
|
||||
/// to stderr. Currently, the entire contents of stderr is read on to the heap.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// This example shows how to read the decompressed contents of a file without
|
||||
/// needing to explicitly choose the decompression command to run.
|
||||
///
|
||||
/// Note that if you need to decompress multiple files, it is better to use
|
||||
/// `DecompressionReaderBuilder`, which will amortize the cost of compiling the
|
||||
/// matcher.
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::Read;
|
||||
/// use std::process::Command;
|
||||
/// use grep_cli::DecompressionReader;
|
||||
///
|
||||
/// # fn example() -> Result<(), Box<::std::error::Error>> {
|
||||
/// let mut rdr = DecompressionReader::new("/usr/share/man/man1/ls.1.gz")?;
|
||||
/// let mut contents = vec![];
|
||||
/// rdr.read_to_end(&mut contents)?;
|
||||
/// # Ok(()) }
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct DecompressionReader {
|
||||
rdr: Result<CommandReader, File>,
|
||||
}
|
||||
|
||||
impl DecompressionReader {
|
||||
/// Build a new streaming reader for decompressing data.
|
||||
///
|
||||
/// If decompression is done out-of-process and if there was a problem
|
||||
/// spawning the process, then its error is returned.
|
||||
///
|
||||
/// If the given file path could not be matched with a decompression
|
||||
/// strategy, then a passthru reader is returned that does no
|
||||
/// decompression.
|
||||
///
|
||||
/// This uses the default matching rules for determining how to decompress
|
||||
/// the given file. To change those matching rules, use
|
||||
/// [`DecompressionReaderBuilder`](struct.DecompressionReaderBuilder.html)
|
||||
/// and
|
||||
/// [`DecompressionMatcherBuilder`](struct.DecompressionMatcherBuilder.html).
|
||||
///
|
||||
/// When creating readers for many paths. it is better to use the builder
|
||||
/// since it will amortize the cost of constructing the matcher.
|
||||
pub fn new<P: AsRef<Path>>(
|
||||
path: P,
|
||||
) -> Result<DecompressionReader, CommandError> {
|
||||
DecompressionReaderBuilder::new().build(path)
|
||||
}
|
||||
|
||||
/// Creates a new "passthru" decompression reader that reads from the file
|
||||
/// corresponding to the given path without doing decompression and without
|
||||
/// executing another process.
|
||||
fn new_passthru(path: &Path) -> Result<DecompressionReader, CommandError> {
|
||||
let file = File::open(path)?;
|
||||
Ok(DecompressionReader { rdr: Err(file) })
|
||||
}
|
||||
}
|
||||
|
||||
impl io::Read for DecompressionReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
match self.rdr {
|
||||
Ok(ref mut rdr) => rdr.read(buf),
|
||||
Err(ref mut rdr) => rdr.read(buf),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_decompression_commands() -> Vec<DecompressionCommand> {
|
||||
const ARGS_GZIP: &[&str] = &["gzip", "-d", "-c"];
|
||||
const ARGS_BZIP: &[&str] = &["bzip2", "-d", "-c"];
|
||||
const ARGS_XZ: &[&str] = &["xz", "-d", "-c"];
|
||||
const ARGS_LZ4: &[&str] = &["lz4", "-d", "-c"];
|
||||
const ARGS_LZMA: &[&str] = &["xz", "--format=lzma", "-d", "-c"];
|
||||
const ARGS_BROTLI: &[&str] = &["brotli", "-d", "-c"];
|
||||
const ARGS_ZSTD: &[&str] = &["zstd", "-q", "-d", "-c"];
|
||||
|
||||
fn cmd(glob: &str, args: &[&str]) -> DecompressionCommand {
|
||||
DecompressionCommand {
|
||||
glob: glob.to_string(),
|
||||
bin: OsStr::new(&args[0]).to_os_string(),
|
||||
args: args
|
||||
.iter()
|
||||
.skip(1)
|
||||
.map(|s| OsStr::new(s).to_os_string())
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
vec![
|
||||
cmd("*.gz", ARGS_GZIP),
|
||||
cmd("*.tgz", ARGS_GZIP),
|
||||
cmd("*.bz2", ARGS_BZIP),
|
||||
cmd("*.tbz2", ARGS_BZIP),
|
||||
cmd("*.xz", ARGS_XZ),
|
||||
cmd("*.txz", ARGS_XZ),
|
||||
cmd("*.lz4", ARGS_LZ4),
|
||||
cmd("*.lzma", ARGS_LZMA),
|
||||
cmd("*.br", ARGS_BROTLI),
|
||||
cmd("*.zst", ARGS_ZSTD),
|
||||
cmd("*.zstd", ARGS_ZSTD),
|
||||
]
|
||||
}
|
||||
272
crates/cli/src/escape.rs
Normal file
272
crates/cli/src/escape.rs
Normal file
@@ -0,0 +1,272 @@
|
||||
use std::ffi::OsStr;
|
||||
use std::str;
|
||||
|
||||
use bstr::{ByteSlice, ByteVec};
|
||||
|
||||
/// A single state in the state machine used by `unescape`.
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
enum State {
|
||||
/// The state after seeing a `\`.
|
||||
Escape,
|
||||
/// The state after seeing a `\x`.
|
||||
HexFirst,
|
||||
/// The state after seeing a `\x[0-9A-Fa-f]`.
|
||||
HexSecond(char),
|
||||
/// Default state.
|
||||
Literal,
|
||||
}
|
||||
|
||||
/// Escapes arbitrary bytes into a human readable string.
|
||||
///
|
||||
/// This converts `\t`, `\r` and `\n` into their escaped forms. It also
|
||||
/// converts the non-printable subset of ASCII in addition to invalid UTF-8
|
||||
/// bytes to hexadecimal escape sequences. Everything else is left as is.
|
||||
///
|
||||
/// The dual of this routine is [`unescape`](fn.unescape.html).
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// This example shows how to convert a byte string that contains a `\n` and
|
||||
/// invalid UTF-8 bytes into a `String`.
|
||||
///
|
||||
/// Pay special attention to the use of raw strings. That is, `r"\n"` is
|
||||
/// equivalent to `"\\n"`.
|
||||
///
|
||||
/// ```
|
||||
/// use grep_cli::escape;
|
||||
///
|
||||
/// assert_eq!(r"foo\nbar\xFFbaz", escape(b"foo\nbar\xFFbaz"));
|
||||
/// ```
|
||||
pub fn escape(bytes: &[u8]) -> String {
|
||||
let mut escaped = String::new();
|
||||
for (s, e, ch) in bytes.char_indices() {
|
||||
if ch == '\u{FFFD}' {
|
||||
for b in bytes[s..e].bytes() {
|
||||
escape_byte(b, &mut escaped);
|
||||
}
|
||||
} else {
|
||||
escape_char(ch, &mut escaped);
|
||||
}
|
||||
}
|
||||
escaped
|
||||
}
|
||||
|
||||
/// Escapes an OS string into a human readable string.
|
||||
///
|
||||
/// This is like [`escape`](fn.escape.html), but accepts an OS string.
|
||||
pub fn escape_os(string: &OsStr) -> String {
|
||||
escape(Vec::from_os_str_lossy(string).as_bytes())
|
||||
}
|
||||
|
||||
/// Unescapes a string.
|
||||
///
|
||||
/// It supports a limited set of escape sequences:
|
||||
///
|
||||
/// * `\t`, `\r` and `\n` are mapped to their corresponding ASCII bytes.
|
||||
/// * `\xZZ` hexadecimal escapes are mapped to their byte.
|
||||
///
|
||||
/// Everything else is left as is, including non-hexadecimal escapes like
|
||||
/// `\xGG`.
|
||||
///
|
||||
/// This is useful when it is desirable for a command line argument to be
|
||||
/// capable of specifying arbitrary bytes or otherwise make it easier to
|
||||
/// specify non-printable characters.
|
||||
///
|
||||
/// The dual of this routine is [`escape`](fn.escape.html).
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// This example shows how to convert an escaped string (which is valid UTF-8)
|
||||
/// into a corresponding sequence of bytes. Each escape sequence is mapped to
|
||||
/// its bytes, which may include invalid UTF-8.
|
||||
///
|
||||
/// Pay special attention to the use of raw strings. That is, `r"\n"` is
|
||||
/// equivalent to `"\\n"`.
|
||||
///
|
||||
/// ```
|
||||
/// use grep_cli::unescape;
|
||||
///
|
||||
/// assert_eq!(&b"foo\nbar\xFFbaz"[..], &*unescape(r"foo\nbar\xFFbaz"));
|
||||
/// ```
|
||||
pub fn unescape(s: &str) -> Vec<u8> {
|
||||
use self::State::*;
|
||||
|
||||
let mut bytes = vec![];
|
||||
let mut state = Literal;
|
||||
for c in s.chars() {
|
||||
match state {
|
||||
Escape => match c {
|
||||
'\\' => {
|
||||
bytes.push(b'\\');
|
||||
state = Literal;
|
||||
}
|
||||
'n' => {
|
||||
bytes.push(b'\n');
|
||||
state = Literal;
|
||||
}
|
||||
'r' => {
|
||||
bytes.push(b'\r');
|
||||
state = Literal;
|
||||
}
|
||||
't' => {
|
||||
bytes.push(b'\t');
|
||||
state = Literal;
|
||||
}
|
||||
'x' => {
|
||||
state = HexFirst;
|
||||
}
|
||||
c => {
|
||||
bytes.extend(format!(r"\{}", c).into_bytes());
|
||||
state = Literal;
|
||||
}
|
||||
},
|
||||
HexFirst => match c {
|
||||
'0'..='9' | 'A'..='F' | 'a'..='f' => {
|
||||
state = HexSecond(c);
|
||||
}
|
||||
c => {
|
||||
bytes.extend(format!(r"\x{}", c).into_bytes());
|
||||
state = Literal;
|
||||
}
|
||||
},
|
||||
HexSecond(first) => match c {
|
||||
'0'..='9' | 'A'..='F' | 'a'..='f' => {
|
||||
let ordinal = format!("{}{}", first, c);
|
||||
let byte = u8::from_str_radix(&ordinal, 16).unwrap();
|
||||
bytes.push(byte);
|
||||
state = Literal;
|
||||
}
|
||||
c => {
|
||||
let original = format!(r"\x{}{}", first, c);
|
||||
bytes.extend(original.into_bytes());
|
||||
state = Literal;
|
||||
}
|
||||
},
|
||||
Literal => match c {
|
||||
'\\' => {
|
||||
state = Escape;
|
||||
}
|
||||
c => {
|
||||
bytes.extend(c.to_string().as_bytes());
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
match state {
|
||||
Escape => bytes.push(b'\\'),
|
||||
HexFirst => bytes.extend(b"\\x"),
|
||||
HexSecond(c) => bytes.extend(format!("\\x{}", c).into_bytes()),
|
||||
Literal => {}
|
||||
}
|
||||
bytes
|
||||
}
|
||||
|
||||
/// Unescapes an OS string.
|
||||
///
|
||||
/// This is like [`unescape`](fn.unescape.html), but accepts an OS string.
|
||||
///
|
||||
/// Note that this first lossily decodes the given OS string as UTF-8. That
|
||||
/// is, an escaped string (the thing given) should be valid UTF-8.
|
||||
pub fn unescape_os(string: &OsStr) -> Vec<u8> {
|
||||
unescape(&string.to_string_lossy())
|
||||
}
|
||||
|
||||
/// Adds the given codepoint to the given string, escaping it if necessary.
|
||||
fn escape_char(cp: char, into: &mut String) {
|
||||
if cp.is_ascii() {
|
||||
escape_byte(cp as u8, into);
|
||||
} else {
|
||||
into.push(cp);
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds the given byte to the given string, escaping it if necessary.
|
||||
fn escape_byte(byte: u8, into: &mut String) {
|
||||
match byte {
|
||||
0x21..=0x5B | 0x5D..=0x7D => into.push(byte as char),
|
||||
b'\n' => into.push_str(r"\n"),
|
||||
b'\r' => into.push_str(r"\r"),
|
||||
b'\t' => into.push_str(r"\t"),
|
||||
b'\\' => into.push_str(r"\\"),
|
||||
_ => into.push_str(&format!(r"\x{:02X}", byte)),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{escape, unescape};
|
||||
|
||||
fn b(bytes: &'static [u8]) -> Vec<u8> {
|
||||
bytes.to_vec()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty() {
|
||||
assert_eq!(b(b""), unescape(r""));
|
||||
assert_eq!(r"", escape(b""));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn backslash() {
|
||||
assert_eq!(b(b"\\"), unescape(r"\\"));
|
||||
assert_eq!(r"\\", escape(b"\\"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nul() {
|
||||
assert_eq!(b(b"\x00"), unescape(r"\x00"));
|
||||
assert_eq!(r"\x00", escape(b"\x00"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nl() {
|
||||
assert_eq!(b(b"\n"), unescape(r"\n"));
|
||||
assert_eq!(r"\n", escape(b"\n"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tab() {
|
||||
assert_eq!(b(b"\t"), unescape(r"\t"));
|
||||
assert_eq!(r"\t", escape(b"\t"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn carriage() {
|
||||
assert_eq!(b(b"\r"), unescape(r"\r"));
|
||||
assert_eq!(r"\r", escape(b"\r"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nothing_simple() {
|
||||
assert_eq!(b(b"\\a"), unescape(r"\a"));
|
||||
assert_eq!(b(b"\\a"), unescape(r"\\a"));
|
||||
assert_eq!(r"\\a", escape(b"\\a"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nothing_hex0() {
|
||||
assert_eq!(b(b"\\x"), unescape(r"\x"));
|
||||
assert_eq!(b(b"\\x"), unescape(r"\\x"));
|
||||
assert_eq!(r"\\x", escape(b"\\x"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nothing_hex1() {
|
||||
assert_eq!(b(b"\\xz"), unescape(r"\xz"));
|
||||
assert_eq!(b(b"\\xz"), unescape(r"\\xz"));
|
||||
assert_eq!(r"\\xz", escape(b"\\xz"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nothing_hex2() {
|
||||
assert_eq!(b(b"\\xzz"), unescape(r"\xzz"));
|
||||
assert_eq!(b(b"\\xzz"), unescape(r"\\xzz"));
|
||||
assert_eq!(r"\\xzz", escape(b"\\xzz"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_utf8() {
|
||||
assert_eq!(r"\xFF", escape(b"\xFF"));
|
||||
assert_eq!(r"a\xFFb", escape(b"a\xFFb"));
|
||||
}
|
||||
}
|
||||
165
crates/cli/src/human.rs
Normal file
165
crates/cli/src/human.rs
Normal file
@@ -0,0 +1,165 @@
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::num::ParseIntError;
|
||||
|
||||
use regex::Regex;
|
||||
|
||||
/// An error that occurs when parsing a human readable size description.
|
||||
///
|
||||
/// This error provides a end user friendly message describing why the
|
||||
/// description coudln't be parsed and what the expected format is.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct ParseSizeError {
|
||||
original: String,
|
||||
kind: ParseSizeErrorKind,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
enum ParseSizeErrorKind {
|
||||
InvalidFormat,
|
||||
InvalidInt(ParseIntError),
|
||||
Overflow,
|
||||
}
|
||||
|
||||
impl ParseSizeError {
|
||||
fn format(original: &str) -> ParseSizeError {
|
||||
ParseSizeError {
|
||||
original: original.to_string(),
|
||||
kind: ParseSizeErrorKind::InvalidFormat,
|
||||
}
|
||||
}
|
||||
|
||||
fn int(original: &str, err: ParseIntError) -> ParseSizeError {
|
||||
ParseSizeError {
|
||||
original: original.to_string(),
|
||||
kind: ParseSizeErrorKind::InvalidInt(err),
|
||||
}
|
||||
}
|
||||
|
||||
fn overflow(original: &str) -> ParseSizeError {
|
||||
ParseSizeError {
|
||||
original: original.to_string(),
|
||||
kind: ParseSizeErrorKind::Overflow,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for ParseSizeError {
|
||||
fn description(&self) -> &str {
|
||||
"invalid size"
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ParseSizeError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
use self::ParseSizeErrorKind::*;
|
||||
|
||||
match self.kind {
|
||||
InvalidFormat => write!(
|
||||
f,
|
||||
"invalid format for size '{}', which should be a sequence \
|
||||
of digits followed by an optional 'K', 'M' or 'G' \
|
||||
suffix",
|
||||
self.original
|
||||
),
|
||||
InvalidInt(ref err) => write!(
|
||||
f,
|
||||
"invalid integer found in size '{}': {}",
|
||||
self.original, err
|
||||
),
|
||||
Overflow => write!(f, "size too big in '{}'", self.original),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ParseSizeError> for io::Error {
|
||||
fn from(size_err: ParseSizeError) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, size_err)
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a human readable size like `2M` into a corresponding number of bytes.
|
||||
///
|
||||
/// Supported size suffixes are `K` (for kilobyte), `M` (for megabyte) and `G`
|
||||
/// (for gigabyte). If a size suffix is missing, then the size is interpreted
|
||||
/// as bytes. If the size is too big to fit into a `u64`, then this returns an
|
||||
/// error.
|
||||
///
|
||||
/// Additional suffixes may be added over time.
|
||||
pub fn parse_human_readable_size(size: &str) -> Result<u64, ParseSizeError> {
|
||||
lazy_static! {
|
||||
// Normally I'd just parse something this simple by hand to avoid the
|
||||
// regex dep, but we bring regex in any way for glob matching, so might
|
||||
// as well use it.
|
||||
static ref RE: Regex = Regex::new(r"^([0-9]+)([KMG])?$").unwrap();
|
||||
}
|
||||
|
||||
let caps = match RE.captures(size) {
|
||||
Some(caps) => caps,
|
||||
None => return Err(ParseSizeError::format(size)),
|
||||
};
|
||||
let value: u64 =
|
||||
caps[1].parse().map_err(|err| ParseSizeError::int(size, err))?;
|
||||
let suffix = match caps.get(2) {
|
||||
None => return Ok(value),
|
||||
Some(cap) => cap.as_str(),
|
||||
};
|
||||
let bytes = match suffix {
|
||||
"K" => value.checked_mul(1 << 10),
|
||||
"M" => value.checked_mul(1 << 20),
|
||||
"G" => value.checked_mul(1 << 30),
|
||||
// Because if the regex matches this group, it must be [KMG].
|
||||
_ => unreachable!(),
|
||||
};
|
||||
bytes.ok_or_else(|| ParseSizeError::overflow(size))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn suffix_none() {
|
||||
let x = parse_human_readable_size("123").unwrap();
|
||||
assert_eq!(123, x);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn suffix_k() {
|
||||
let x = parse_human_readable_size("123K").unwrap();
|
||||
assert_eq!(123 * (1 << 10), x);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn suffix_m() {
|
||||
let x = parse_human_readable_size("123M").unwrap();
|
||||
assert_eq!(123 * (1 << 20), x);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn suffix_g() {
|
||||
let x = parse_human_readable_size("123G").unwrap();
|
||||
assert_eq!(123 * (1 << 30), x);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_empty() {
|
||||
assert!(parse_human_readable_size("").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_non_digit() {
|
||||
assert!(parse_human_readable_size("a").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_overflow() {
|
||||
assert!(parse_human_readable_size("9999999999999999G").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_suffix() {
|
||||
assert!(parse_human_readable_size("123T").is_err());
|
||||
}
|
||||
}
|
||||
250
crates/cli/src/lib.rs
Normal file
250
crates/cli/src/lib.rs
Normal file
@@ -0,0 +1,250 @@
|
||||
/*!
|
||||
This crate provides common routines used in command line applications, with a
|
||||
focus on routines useful for search oriented applications. As a utility
|
||||
library, there is no central type or function. However, a key focus of this
|
||||
crate is to improve failure modes and provide user friendly error messages
|
||||
when things go wrong.
|
||||
|
||||
To the best extent possible, everything in this crate works on Windows, macOS
|
||||
and Linux.
|
||||
|
||||
|
||||
# Standard I/O
|
||||
|
||||
The
|
||||
[`is_readable_stdin`](fn.is_readable_stdin.html),
|
||||
[`is_tty_stderr`](fn.is_tty_stderr.html),
|
||||
[`is_tty_stdin`](fn.is_tty_stdin.html)
|
||||
and
|
||||
[`is_tty_stdout`](fn.is_tty_stdout.html)
|
||||
routines query aspects of standard I/O. `is_readable_stdin` determines whether
|
||||
stdin can be usefully read from, while the `tty` methods determine whether a
|
||||
tty is attached to stdin/stdout/stderr.
|
||||
|
||||
`is_readable_stdin` is useful when writing an application that changes behavior
|
||||
based on whether the application was invoked with data on stdin. For example,
|
||||
`rg foo` might recursively search the current working directory for
|
||||
occurrences of `foo`, but `rg foo < file` might only search the contents of
|
||||
`file`.
|
||||
|
||||
The `tty` methods are useful for similar reasons. Namely, commands like `ls`
|
||||
will change their output depending on whether they are printing to a terminal
|
||||
or not. For example, `ls` shows a file on each line when stdout is redirected
|
||||
to a file or a pipe, but condenses the output to show possibly many files on
|
||||
each line when stdout is connected to a tty.
|
||||
|
||||
|
||||
# Coloring and buffering
|
||||
|
||||
The
|
||||
[`stdout`](fn.stdout.html),
|
||||
[`stdout_buffered_block`](fn.stdout_buffered_block.html)
|
||||
and
|
||||
[`stdout_buffered_line`](fn.stdout_buffered_line.html)
|
||||
routines are alternative constructors for
|
||||
[`StandardStream`](struct.StandardStream.html).
|
||||
A `StandardStream` implements `termcolor::WriteColor`, which provides a way
|
||||
to emit colors to terminals. Its key use is the encapsulation of buffering
|
||||
style. Namely, `stdout` will return a line buffered `StandardStream` if and
|
||||
only if stdout is connected to a tty, and will otherwise return a block
|
||||
buffered `StandardStream`. Line buffering is important for use with a tty
|
||||
because it typically decreases the latency at which the end user sees output.
|
||||
Block buffering is used otherwise because it is faster, and redirecting stdout
|
||||
to a file typically doesn't benefit from the decreased latency that line
|
||||
buffering provides.
|
||||
|
||||
The `stdout_buffered_block` and `stdout_buffered_line` can be used to
|
||||
explicitly set the buffering strategy regardless of whether stdout is connected
|
||||
to a tty or not.
|
||||
|
||||
|
||||
# Escaping
|
||||
|
||||
The
|
||||
[`escape`](fn.escape.html),
|
||||
[`escape_os`](fn.escape_os.html),
|
||||
[`unescape`](fn.unescape.html)
|
||||
and
|
||||
[`unescape_os`](fn.unescape_os.html)
|
||||
routines provide a user friendly way of dealing with UTF-8 encoded strings that
|
||||
can express arbitrary bytes. For example, you might want to accept a string
|
||||
containing arbitrary bytes as a command line argument, but most interactive
|
||||
shells make such strings difficult to type. Instead, we can ask users to use
|
||||
escape sequences.
|
||||
|
||||
For example, `a\xFFz` is itself a valid UTF-8 string corresponding to the
|
||||
following bytes:
|
||||
|
||||
```ignore
|
||||
[b'a', b'\\', b'x', b'F', b'F', b'z']
|
||||
```
|
||||
|
||||
However, we can
|
||||
interpret `\xFF` as an escape sequence with the `unescape`/`unescape_os`
|
||||
routines, which will yield
|
||||
|
||||
```ignore
|
||||
[b'a', b'\xFF', b'z']
|
||||
```
|
||||
|
||||
instead. For example:
|
||||
|
||||
```
|
||||
use grep_cli::unescape;
|
||||
|
||||
// Note the use of a raw string!
|
||||
assert_eq!(vec![b'a', b'\xFF', b'z'], unescape(r"a\xFFz"));
|
||||
```
|
||||
|
||||
The `escape`/`escape_os` routines provide the reverse transformation, which
|
||||
makes it easy to show user friendly error messages involving arbitrary bytes.
|
||||
|
||||
|
||||
# Building patterns
|
||||
|
||||
Typically, regular expression patterns must be valid UTF-8. However, command
|
||||
line arguments aren't guaranteed to be valid UTF-8. Unfortunately, the
|
||||
standard library's UTF-8 conversion functions from `OsStr`s do not provide
|
||||
good error messages. However, the
|
||||
[`pattern_from_bytes`](fn.pattern_from_bytes.html)
|
||||
and
|
||||
[`pattern_from_os`](fn.pattern_from_os.html)
|
||||
do, including reporting exactly where the first invalid UTF-8 byte is seen.
|
||||
|
||||
Additionally, it can be useful to read patterns from a file while reporting
|
||||
good error messages that include line numbers. The
|
||||
[`patterns_from_path`](fn.patterns_from_path.html),
|
||||
[`patterns_from_reader`](fn.patterns_from_reader.html)
|
||||
and
|
||||
[`patterns_from_stdin`](fn.patterns_from_stdin.html)
|
||||
routines do just that. If any pattern is found that is invalid UTF-8, then the
|
||||
error includes the file path (if available) along with the line number and the
|
||||
byte offset at which the first invalid UTF-8 byte was observed.
|
||||
|
||||
|
||||
# Read process output
|
||||
|
||||
Sometimes a command line application needs to execute other processes and read
|
||||
its stdout in a streaming fashion. The
|
||||
[`CommandReader`](struct.CommandReader.html)
|
||||
provides this functionality with an explicit goal of improving failure modes.
|
||||
In particular, if the process exits with an error code, then stderr is read
|
||||
and converted into a normal Rust error to show to end users. This makes the
|
||||
underlying failure modes explicit and gives more information to end users for
|
||||
debugging the problem.
|
||||
|
||||
As a special case,
|
||||
[`DecompressionReader`](struct.DecompressionReader.html)
|
||||
provides a way to decompress arbitrary files by matching their file extensions
|
||||
up with corresponding decompression programs (such as `gzip` and `xz`). This
|
||||
is useful as a means of performing simplistic decompression in a portable
|
||||
manner without binding to specific compression libraries. This does come with
|
||||
some overhead though, so if you need to decompress lots of small files, this
|
||||
may not be an appropriate convenience to use.
|
||||
|
||||
Each reader has a corresponding builder for additional configuration, such as
|
||||
whether to read stderr asynchronously in order to avoid deadlock (which is
|
||||
enabled by default).
|
||||
|
||||
|
||||
# Miscellaneous parsing
|
||||
|
||||
The
|
||||
[`parse_human_readable_size`](fn.parse_human_readable_size.html)
|
||||
routine parses strings like `2M` and converts them to the corresponding number
|
||||
of bytes (`2 * 1<<20` in this case). If an invalid size is found, then a good
|
||||
error message is crafted that typically tells the user how to fix the problem.
|
||||
*/
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
extern crate atty;
|
||||
extern crate bstr;
|
||||
extern crate globset;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate regex;
|
||||
extern crate same_file;
|
||||
extern crate termcolor;
|
||||
#[cfg(windows)]
|
||||
extern crate winapi_util;
|
||||
|
||||
mod decompress;
|
||||
mod escape;
|
||||
mod human;
|
||||
mod pattern;
|
||||
mod process;
|
||||
mod wtr;
|
||||
|
||||
pub use decompress::{
|
||||
DecompressionMatcher, DecompressionMatcherBuilder, DecompressionReader,
|
||||
DecompressionReaderBuilder,
|
||||
};
|
||||
pub use escape::{escape, escape_os, unescape, unescape_os};
|
||||
pub use human::{parse_human_readable_size, ParseSizeError};
|
||||
pub use pattern::{
|
||||
pattern_from_bytes, pattern_from_os, patterns_from_path,
|
||||
patterns_from_reader, patterns_from_stdin, InvalidPatternError,
|
||||
};
|
||||
pub use process::{CommandError, CommandReader, CommandReaderBuilder};
|
||||
pub use wtr::{
|
||||
stdout, stdout_buffered_block, stdout_buffered_line, StandardStream,
|
||||
};
|
||||
|
||||
/// Returns true if and only if stdin is believed to be readable.
|
||||
///
|
||||
/// When stdin is readable, command line programs may choose to behave
|
||||
/// differently than when stdin is not readable. For example, `command foo`
|
||||
/// might search the current directory for occurrences of `foo` where as
|
||||
/// `command foo < some-file` or `cat some-file | command foo` might instead
|
||||
/// only search stdin for occurrences of `foo`.
|
||||
pub fn is_readable_stdin() -> bool {
|
||||
#[cfg(unix)]
|
||||
fn imp() -> bool {
|
||||
use same_file::Handle;
|
||||
use std::os::unix::fs::FileTypeExt;
|
||||
|
||||
let ft = match Handle::stdin().and_then(|h| h.as_file().metadata()) {
|
||||
Err(_) => return false,
|
||||
Ok(md) => md.file_type(),
|
||||
};
|
||||
ft.is_file() || ft.is_fifo()
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn imp() -> bool {
|
||||
use winapi_util as winutil;
|
||||
|
||||
winutil::file::typ(winutil::HandleRef::stdin())
|
||||
.map(|t| t.is_disk() || t.is_pipe())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
!is_tty_stdin() && imp()
|
||||
}
|
||||
|
||||
/// Returns true if and only if stdin is believed to be connectted to a tty
|
||||
/// or a console.
|
||||
pub fn is_tty_stdin() -> bool {
|
||||
atty::is(atty::Stream::Stdin)
|
||||
}
|
||||
|
||||
/// Returns true if and only if stdout is believed to be connectted to a tty
|
||||
/// or a console.
|
||||
///
|
||||
/// This is useful for when you want your command line program to produce
|
||||
/// different output depending on whether it's printing directly to a user's
|
||||
/// terminal or whether it's being redirected somewhere else. For example,
|
||||
/// implementations of `ls` will often show one item per line when stdout is
|
||||
/// redirected, but will condensed output when printing to a tty.
|
||||
pub fn is_tty_stdout() -> bool {
|
||||
atty::is(atty::Stream::Stdout)
|
||||
}
|
||||
|
||||
/// Returns true if and only if stderr is believed to be connectted to a tty
|
||||
/// or a console.
|
||||
pub fn is_tty_stderr() -> bool {
|
||||
atty::is(atty::Stream::Stderr)
|
||||
}
|
||||
195
crates/cli/src/pattern.rs
Normal file
195
crates/cli/src/pattern.rs
Normal file
@@ -0,0 +1,195 @@
|
||||
use std::error;
|
||||
use std::ffi::OsStr;
|
||||
use std::fmt;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::str;
|
||||
|
||||
use bstr::io::BufReadExt;
|
||||
|
||||
use escape::{escape, escape_os};
|
||||
|
||||
/// An error that occurs when a pattern could not be converted to valid UTF-8.
|
||||
///
|
||||
/// The purpose of this error is to give a more targeted failure mode for
|
||||
/// patterns written by end users that are not valid UTF-8.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct InvalidPatternError {
|
||||
original: String,
|
||||
valid_up_to: usize,
|
||||
}
|
||||
|
||||
impl InvalidPatternError {
|
||||
/// Returns the index in the given string up to which valid UTF-8 was
|
||||
/// verified.
|
||||
pub fn valid_up_to(&self) -> usize {
|
||||
self.valid_up_to
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for InvalidPatternError {
|
||||
fn description(&self) -> &str {
|
||||
"invalid pattern"
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for InvalidPatternError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"found invalid UTF-8 in pattern at byte offset {} \
|
||||
(use hex escape sequences to match arbitrary bytes \
|
||||
in a pattern, e.g., \\xFF): '{}'",
|
||||
self.valid_up_to, self.original,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<InvalidPatternError> for io::Error {
|
||||
fn from(paterr: InvalidPatternError) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, paterr)
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert an OS string into a regular expression pattern.
|
||||
///
|
||||
/// This conversion fails if the given pattern is not valid UTF-8, in which
|
||||
/// case, a targeted error with more information about where the invalid UTF-8
|
||||
/// occurs is given. The error also suggests the use of hex escape sequences,
|
||||
/// which are supported by many regex engines.
|
||||
pub fn pattern_from_os(pattern: &OsStr) -> Result<&str, InvalidPatternError> {
|
||||
pattern.to_str().ok_or_else(|| {
|
||||
let valid_up_to = pattern
|
||||
.to_string_lossy()
|
||||
.find('\u{FFFD}')
|
||||
.expect("a Unicode replacement codepoint for invalid UTF-8");
|
||||
InvalidPatternError {
|
||||
original: escape_os(pattern),
|
||||
valid_up_to: valid_up_to,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert arbitrary bytes into a regular expression pattern.
|
||||
///
|
||||
/// This conversion fails if the given pattern is not valid UTF-8, in which
|
||||
/// case, a targeted error with more information about where the invalid UTF-8
|
||||
/// occurs is given. The error also suggests the use of hex escape sequences,
|
||||
/// which are supported by many regex engines.
|
||||
pub fn pattern_from_bytes(
|
||||
pattern: &[u8],
|
||||
) -> Result<&str, InvalidPatternError> {
|
||||
str::from_utf8(pattern).map_err(|err| InvalidPatternError {
|
||||
original: escape(pattern),
|
||||
valid_up_to: err.valid_up_to(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Read patterns from a file path, one per line.
|
||||
///
|
||||
/// If there was a problem reading or if any of the patterns contain invalid
|
||||
/// UTF-8, then an error is returned. If there was a problem with a specific
|
||||
/// pattern, then the error message will include the line number and the file
|
||||
/// path.
|
||||
pub fn patterns_from_path<P: AsRef<Path>>(path: P) -> io::Result<Vec<String>> {
|
||||
let path = path.as_ref();
|
||||
let file = File::open(path).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("{}: {}", path.display(), err),
|
||||
)
|
||||
})?;
|
||||
patterns_from_reader(file).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("{}:{}", path.display(), err),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Read patterns from stdin, one per line.
|
||||
///
|
||||
/// If there was a problem reading or if any of the patterns contain invalid
|
||||
/// UTF-8, then an error is returned. If there was a problem with a specific
|
||||
/// pattern, then the error message will include the line number and the fact
|
||||
/// that it came from stdin.
|
||||
pub fn patterns_from_stdin() -> io::Result<Vec<String>> {
|
||||
let stdin = io::stdin();
|
||||
let locked = stdin.lock();
|
||||
patterns_from_reader(locked).map_err(|err| {
|
||||
io::Error::new(io::ErrorKind::Other, format!("<stdin>:{}", err))
|
||||
})
|
||||
}
|
||||
|
||||
/// Read patterns from any reader, one per line.
|
||||
///
|
||||
/// If there was a problem reading or if any of the patterns contain invalid
|
||||
/// UTF-8, then an error is returned. If there was a problem with a specific
|
||||
/// pattern, then the error message will include the line number.
|
||||
///
|
||||
/// Note that this routine uses its own internal buffer, so the caller should
|
||||
/// not provide their own buffered reader if possible.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// This shows how to parse patterns, one per line.
|
||||
///
|
||||
/// ```
|
||||
/// use grep_cli::patterns_from_reader;
|
||||
///
|
||||
/// # fn example() -> Result<(), Box<::std::error::Error>> {
|
||||
/// let patterns = "\
|
||||
/// foo
|
||||
/// bar\\s+foo
|
||||
/// [a-z]{3}
|
||||
/// ";
|
||||
///
|
||||
/// assert_eq!(patterns_from_reader(patterns.as_bytes())?, vec![
|
||||
/// r"foo",
|
||||
/// r"bar\s+foo",
|
||||
/// r"[a-z]{3}",
|
||||
/// ]);
|
||||
/// # Ok(()) }
|
||||
/// ```
|
||||
pub fn patterns_from_reader<R: io::Read>(rdr: R) -> io::Result<Vec<String>> {
|
||||
let mut patterns = vec![];
|
||||
let mut line_number = 0;
|
||||
io::BufReader::new(rdr).for_byte_line(|line| {
|
||||
line_number += 1;
|
||||
match pattern_from_bytes(line) {
|
||||
Ok(pattern) => {
|
||||
patterns.push(pattern.to_string());
|
||||
Ok(true)
|
||||
}
|
||||
Err(err) => Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("{}: {}", line_number, err),
|
||||
)),
|
||||
}
|
||||
})?;
|
||||
Ok(patterns)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn bytes() {
|
||||
let pat = b"abc\xFFxyz";
|
||||
let err = pattern_from_bytes(pat).unwrap_err();
|
||||
assert_eq!(3, err.valid_up_to());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn os() {
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
let pat = OsStr::from_bytes(b"abc\xFFxyz");
|
||||
let err = pattern_from_os(pat).unwrap_err();
|
||||
assert_eq!(3, err.valid_up_to());
|
||||
}
|
||||
}
|
||||
270
crates/cli/src/process.rs
Normal file
270
crates/cli/src/process.rs
Normal file
@@ -0,0 +1,270 @@
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::io::{self, Read};
|
||||
use std::iter;
|
||||
use std::process;
|
||||
use std::thread::{self, JoinHandle};
|
||||
|
||||
/// An error that can occur while running a command and reading its output.
|
||||
///
|
||||
/// This error can be seamlessly converted to an `io::Error` via a `From`
|
||||
/// implementation.
|
||||
#[derive(Debug)]
|
||||
pub struct CommandError {
|
||||
kind: CommandErrorKind,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum CommandErrorKind {
|
||||
Io(io::Error),
|
||||
Stderr(Vec<u8>),
|
||||
}
|
||||
|
||||
impl CommandError {
|
||||
/// Create an error from an I/O error.
|
||||
pub(crate) fn io(ioerr: io::Error) -> CommandError {
|
||||
CommandError { kind: CommandErrorKind::Io(ioerr) }
|
||||
}
|
||||
|
||||
/// Create an error from the contents of stderr (which may be empty).
|
||||
pub(crate) fn stderr(bytes: Vec<u8>) -> CommandError {
|
||||
CommandError { kind: CommandErrorKind::Stderr(bytes) }
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for CommandError {
|
||||
fn description(&self) -> &str {
|
||||
"command error"
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CommandError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self.kind {
|
||||
CommandErrorKind::Io(ref e) => e.fmt(f),
|
||||
CommandErrorKind::Stderr(ref bytes) => {
|
||||
let msg = String::from_utf8_lossy(bytes);
|
||||
if msg.trim().is_empty() {
|
||||
write!(f, "<stderr is empty>")
|
||||
} else {
|
||||
let div = iter::repeat('-').take(79).collect::<String>();
|
||||
write!(
|
||||
f,
|
||||
"\n{div}\n{msg}\n{div}",
|
||||
div = div,
|
||||
msg = msg.trim()
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for CommandError {
|
||||
fn from(ioerr: io::Error) -> CommandError {
|
||||
CommandError { kind: CommandErrorKind::Io(ioerr) }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CommandError> for io::Error {
|
||||
fn from(cmderr: CommandError) -> io::Error {
|
||||
match cmderr.kind {
|
||||
CommandErrorKind::Io(ioerr) => ioerr,
|
||||
CommandErrorKind::Stderr(_) => {
|
||||
io::Error::new(io::ErrorKind::Other, cmderr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configures and builds a streaming reader for process output.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct CommandReaderBuilder {
|
||||
async_stderr: bool,
|
||||
}
|
||||
|
||||
impl CommandReaderBuilder {
|
||||
/// Create a new builder with the default configuration.
|
||||
pub fn new() -> CommandReaderBuilder {
|
||||
CommandReaderBuilder::default()
|
||||
}
|
||||
|
||||
/// Build a new streaming reader for the given command's output.
|
||||
///
|
||||
/// The caller should set everything that's required on the given command
|
||||
/// before building a reader, such as its arguments, environment and
|
||||
/// current working directory. Settings such as the stdout and stderr (but
|
||||
/// not stdin) pipes will be overridden so that they can be controlled by
|
||||
/// the reader.
|
||||
///
|
||||
/// If there was a problem spawning the given command, then its error is
|
||||
/// returned.
|
||||
pub fn build(
|
||||
&self,
|
||||
command: &mut process::Command,
|
||||
) -> Result<CommandReader, CommandError> {
|
||||
let mut child = command
|
||||
.stdout(process::Stdio::piped())
|
||||
.stderr(process::Stdio::piped())
|
||||
.spawn()?;
|
||||
let stdout = child.stdout.take().unwrap();
|
||||
let stderr = if self.async_stderr {
|
||||
StderrReader::async(child.stderr.take().unwrap())
|
||||
} else {
|
||||
StderrReader::sync(child.stderr.take().unwrap())
|
||||
};
|
||||
Ok(CommandReader {
|
||||
child: child,
|
||||
stdout: stdout,
|
||||
stderr: stderr,
|
||||
done: false,
|
||||
})
|
||||
}
|
||||
|
||||
/// When enabled, the reader will asynchronously read the contents of the
|
||||
/// command's stderr output. When disabled, stderr is only read after the
|
||||
/// stdout stream has been exhausted (or if the process quits with an error
|
||||
/// code).
|
||||
///
|
||||
/// Note that when enabled, this may require launching an additional
|
||||
/// thread in order to read stderr. This is done so that the process being
|
||||
/// executed is never blocked from writing to stdout or stderr. If this is
|
||||
/// disabled, then it is possible for the process to fill up the stderr
|
||||
/// buffer and deadlock.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn async_stderr(&mut self, yes: bool) -> &mut CommandReaderBuilder {
|
||||
self.async_stderr = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A streaming reader for a command's output.
|
||||
///
|
||||
/// The purpose of this reader is to provide an easy way to execute processes
|
||||
/// whose stdout is read in a streaming way while also making the processes'
|
||||
/// stderr available when the process fails with an exit code. This makes it
|
||||
/// possible to execute processes while surfacing the underlying failure mode
|
||||
/// in the case of an error.
|
||||
///
|
||||
/// Moreover, by default, this reader will asynchronously read the processes'
|
||||
/// stderr. This prevents subtle deadlocking bugs for noisy processes that
|
||||
/// write a lot to stderr. Currently, the entire contents of stderr is read
|
||||
/// on to the heap.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// This example shows how to invoke `gzip` to decompress the contents of a
|
||||
/// file. If the `gzip` command reports a failing exit status, then its stderr
|
||||
/// is returned as an error.
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::Read;
|
||||
/// use std::process::Command;
|
||||
/// use grep_cli::CommandReader;
|
||||
///
|
||||
/// # fn example() -> Result<(), Box<::std::error::Error>> {
|
||||
/// let mut cmd = Command::new("gzip");
|
||||
/// cmd.arg("-d").arg("-c").arg("/usr/share/man/man1/ls.1.gz");
|
||||
///
|
||||
/// let mut rdr = CommandReader::new(&mut cmd)?;
|
||||
/// let mut contents = vec![];
|
||||
/// rdr.read_to_end(&mut contents)?;
|
||||
/// # Ok(()) }
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct CommandReader {
|
||||
child: process::Child,
|
||||
stdout: process::ChildStdout,
|
||||
stderr: StderrReader,
|
||||
done: bool,
|
||||
}
|
||||
|
||||
impl CommandReader {
|
||||
/// Create a new streaming reader for the given command using the default
|
||||
/// configuration.
|
||||
///
|
||||
/// The caller should set everything that's required on the given command
|
||||
/// before building a reader, such as its arguments, environment and
|
||||
/// current working directory. Settings such as the stdout and stderr (but
|
||||
/// not stdin) pipes will be overridden so that they can be controlled by
|
||||
/// the reader.
|
||||
///
|
||||
/// If there was a problem spawning the given command, then its error is
|
||||
/// returned.
|
||||
///
|
||||
/// If the caller requires additional configuration for the reader
|
||||
/// returned, then use
|
||||
/// [`CommandReaderBuilder`](struct.CommandReaderBuilder.html).
|
||||
pub fn new(
|
||||
cmd: &mut process::Command,
|
||||
) -> Result<CommandReader, CommandError> {
|
||||
CommandReaderBuilder::new().build(cmd)
|
||||
}
|
||||
}
|
||||
|
||||
impl io::Read for CommandReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
if self.done {
|
||||
return Ok(0);
|
||||
}
|
||||
let nread = self.stdout.read(buf)?;
|
||||
if nread == 0 {
|
||||
self.done = true;
|
||||
// Reap the child now that we're done reading. If the command
|
||||
// failed, report stderr as an error.
|
||||
if !self.child.wait()?.success() {
|
||||
return Err(io::Error::from(self.stderr.read_to_end()));
|
||||
}
|
||||
}
|
||||
Ok(nread)
|
||||
}
|
||||
}
|
||||
|
||||
/// A reader that encapsulates the asynchronous or synchronous reading of
|
||||
/// stderr.
|
||||
#[derive(Debug)]
|
||||
enum StderrReader {
|
||||
Async(Option<JoinHandle<CommandError>>),
|
||||
Sync(process::ChildStderr),
|
||||
}
|
||||
|
||||
impl StderrReader {
|
||||
/// Create a reader for stderr that reads contents asynchronously.
|
||||
fn async(mut stderr: process::ChildStderr) -> StderrReader {
|
||||
let handle =
|
||||
thread::spawn(move || stderr_to_command_error(&mut stderr));
|
||||
StderrReader::Async(Some(handle))
|
||||
}
|
||||
|
||||
/// Create a reader for stderr that reads contents synchronously.
|
||||
fn sync(stderr: process::ChildStderr) -> StderrReader {
|
||||
StderrReader::Sync(stderr)
|
||||
}
|
||||
|
||||
/// Consumes all of stderr on to the heap and returns it as an error.
|
||||
///
|
||||
/// If there was a problem reading stderr itself, then this returns an I/O
|
||||
/// command error.
|
||||
fn read_to_end(&mut self) -> CommandError {
|
||||
match *self {
|
||||
StderrReader::Async(ref mut handle) => {
|
||||
let handle = handle
|
||||
.take()
|
||||
.expect("read_to_end cannot be called more than once");
|
||||
handle.join().expect("stderr reading thread does not panic")
|
||||
}
|
||||
StderrReader::Sync(ref mut stderr) => {
|
||||
stderr_to_command_error(stderr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn stderr_to_command_error(stderr: &mut process::ChildStderr) -> CommandError {
|
||||
let mut bytes = vec![];
|
||||
match stderr.read_to_end(&mut bytes) {
|
||||
Ok(_) => CommandError::stderr(bytes),
|
||||
Err(err) => CommandError::io(err),
|
||||
}
|
||||
}
|
||||
133
crates/cli/src/wtr.rs
Normal file
133
crates/cli/src/wtr.rs
Normal file
@@ -0,0 +1,133 @@
|
||||
use std::io;
|
||||
|
||||
use termcolor;
|
||||
|
||||
use is_tty_stdout;
|
||||
|
||||
/// A writer that supports coloring with either line or block buffering.
|
||||
pub struct StandardStream(StandardStreamKind);
|
||||
|
||||
/// Returns a possibly buffered writer to stdout for the given color choice.
|
||||
///
|
||||
/// The writer returned is either line buffered or block buffered. The decision
|
||||
/// between these two is made automatically based on whether a tty is attached
|
||||
/// to stdout or not. If a tty is attached, then line buffering is used.
|
||||
/// Otherwise, block buffering is used. In general, block buffering is more
|
||||
/// efficient, but may increase the time it takes for the end user to see the
|
||||
/// first bits of output.
|
||||
///
|
||||
/// If you need more fine grained control over the buffering mode, then use one
|
||||
/// of `stdout_buffered_line` or `stdout_buffered_block`.
|
||||
///
|
||||
/// The color choice given is passed along to the underlying writer. To
|
||||
/// completely disable colors in all cases, use `ColorChoice::Never`.
|
||||
pub fn stdout(color_choice: termcolor::ColorChoice) -> StandardStream {
|
||||
if is_tty_stdout() {
|
||||
stdout_buffered_line(color_choice)
|
||||
} else {
|
||||
stdout_buffered_block(color_choice)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a line buffered writer to stdout for the given color choice.
|
||||
///
|
||||
/// This writer is useful when printing results directly to a tty such that
|
||||
/// users see output as soon as it's written. The downside of this approach
|
||||
/// is that it can be slower, especially when there is a lot of output.
|
||||
///
|
||||
/// You might consider using
|
||||
/// [`stdout`](fn.stdout.html)
|
||||
/// instead, which chooses the buffering strategy automatically based on
|
||||
/// whether stdout is connected to a tty.
|
||||
pub fn stdout_buffered_line(
|
||||
color_choice: termcolor::ColorChoice,
|
||||
) -> StandardStream {
|
||||
let out = termcolor::StandardStream::stdout(color_choice);
|
||||
StandardStream(StandardStreamKind::LineBuffered(out))
|
||||
}
|
||||
|
||||
/// Returns a block buffered writer to stdout for the given color choice.
|
||||
///
|
||||
/// This writer is useful when printing results to a file since it amortizes
|
||||
/// the cost of writing data. The downside of this approach is that it can
|
||||
/// increase the latency of display output when writing to a tty.
|
||||
///
|
||||
/// You might consider using
|
||||
/// [`stdout`](fn.stdout.html)
|
||||
/// instead, which chooses the buffering strategy automatically based on
|
||||
/// whether stdout is connected to a tty.
|
||||
pub fn stdout_buffered_block(
|
||||
color_choice: termcolor::ColorChoice,
|
||||
) -> StandardStream {
|
||||
let out = termcolor::BufferedStandardStream::stdout(color_choice);
|
||||
StandardStream(StandardStreamKind::BlockBuffered(out))
|
||||
}
|
||||
|
||||
enum StandardStreamKind {
|
||||
LineBuffered(termcolor::StandardStream),
|
||||
BlockBuffered(termcolor::BufferedStandardStream),
|
||||
}
|
||||
|
||||
impl io::Write for StandardStream {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref mut w) => w.write(buf),
|
||||
BlockBuffered(ref mut w) => w.write(buf),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref mut w) => w.flush(),
|
||||
BlockBuffered(ref mut w) => w.flush(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl termcolor::WriteColor for StandardStream {
|
||||
#[inline]
|
||||
fn supports_color(&self) -> bool {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref w) => w.supports_color(),
|
||||
BlockBuffered(ref w) => w.supports_color(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_color(&mut self, spec: &termcolor::ColorSpec) -> io::Result<()> {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref mut w) => w.set_color(spec),
|
||||
BlockBuffered(ref mut w) => w.set_color(spec),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reset(&mut self) -> io::Result<()> {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref mut w) => w.reset(),
|
||||
BlockBuffered(ref mut w) => w.reset(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_synchronous(&self) -> bool {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref w) => w.is_synchronous(),
|
||||
BlockBuffered(ref w) => w.is_synchronous(),
|
||||
}
|
||||
}
|
||||
}
|
||||
15
crates/core/README.md
Normal file
15
crates/core/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
ripgrep core
|
||||
------------
|
||||
This is the core ripgrep crate. In particular, `main.rs` is where the `main`
|
||||
function lives.
|
||||
|
||||
Most of ripgrep core consists of two things:
|
||||
|
||||
* The definition of the CLI interface, including docs for every flag.
|
||||
* Glue code that brings the `grep-matcher`, `grep-regex`, `grep-searcher` and
|
||||
`grep-printer` crates together to actually execute the search.
|
||||
|
||||
Currently, there are no plans to make ripgrep core available as an independent
|
||||
library. However, much of the heavy lifting of ripgrep is done via its
|
||||
constituent crates, which can be reused independent of ripgrep. Unfortunately,
|
||||
there is no guide or tutorial to teach folks how to do this yet.
|
||||
3044
crates/core/app.rs
Normal file
3044
crates/core/app.rs
Normal file
File diff suppressed because it is too large
Load Diff
1854
crates/core/args.rs
Normal file
1854
crates/core/args.rs
Normal file
File diff suppressed because it is too large
Load Diff
169
crates/core/config.rs
Normal file
169
crates/core/config.rs
Normal file
@@ -0,0 +1,169 @@
|
||||
// This module provides routines for reading ripgrep config "rc" files. The
|
||||
// primary output of these routines is a sequence of arguments, where each
|
||||
// argument corresponds precisely to one shell argument.
|
||||
|
||||
use std::env;
|
||||
use std::error::Error;
|
||||
use std::ffi::OsString;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use bstr::{io::BufReadExt, ByteSlice};
|
||||
use log;
|
||||
|
||||
use crate::Result;
|
||||
|
||||
/// Return a sequence of arguments derived from ripgrep rc configuration files.
|
||||
pub fn args() -> Vec<OsString> {
|
||||
let config_path = match env::var_os("RIPGREP_CONFIG_PATH") {
|
||||
None => return vec![],
|
||||
Some(config_path) => {
|
||||
if config_path.is_empty() {
|
||||
return vec![];
|
||||
}
|
||||
PathBuf::from(config_path)
|
||||
}
|
||||
};
|
||||
let (args, errs) = match parse(&config_path) {
|
||||
Ok((args, errs)) => (args, errs),
|
||||
Err(err) => {
|
||||
message!("{}", err);
|
||||
return vec![];
|
||||
}
|
||||
};
|
||||
if !errs.is_empty() {
|
||||
for err in errs {
|
||||
message!("{}:{}", config_path.display(), err);
|
||||
}
|
||||
}
|
||||
log::debug!(
|
||||
"{}: arguments loaded from config file: {:?}",
|
||||
config_path.display(),
|
||||
args
|
||||
);
|
||||
args
|
||||
}
|
||||
|
||||
/// Parse a single ripgrep rc file from the given path.
|
||||
///
|
||||
/// On success, this returns a set of shell arguments, in order, that should
|
||||
/// be pre-pended to the arguments given to ripgrep at the command line.
|
||||
///
|
||||
/// If the file could not be read, then an error is returned. If there was
|
||||
/// a problem parsing one or more lines in the file, then errors are returned
|
||||
/// for each line in addition to successfully parsed arguments.
|
||||
fn parse<P: AsRef<Path>>(
|
||||
path: P,
|
||||
) -> Result<(Vec<OsString>, Vec<Box<dyn Error>>)> {
|
||||
let path = path.as_ref();
|
||||
match File::open(&path) {
|
||||
Ok(file) => parse_reader(file),
|
||||
Err(err) => Err(From::from(format!("{}: {}", path.display(), err))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a single ripgrep rc file from the given reader.
|
||||
///
|
||||
/// Callers should not provided a buffered reader, as this routine will use its
|
||||
/// own buffer internally.
|
||||
///
|
||||
/// On success, this returns a set of shell arguments, in order, that should
|
||||
/// be pre-pended to the arguments given to ripgrep at the command line.
|
||||
///
|
||||
/// If the reader could not be read, then an error is returned. If there was a
|
||||
/// problem parsing one or more lines, then errors are returned for each line
|
||||
/// in addition to successfully parsed arguments.
|
||||
fn parse_reader<R: io::Read>(
|
||||
rdr: R,
|
||||
) -> Result<(Vec<OsString>, Vec<Box<dyn Error>>)> {
|
||||
let bufrdr = io::BufReader::new(rdr);
|
||||
let (mut args, mut errs) = (vec![], vec![]);
|
||||
let mut line_number = 0;
|
||||
bufrdr.for_byte_line_with_terminator(|line| {
|
||||
line_number += 1;
|
||||
|
||||
let line = line.trim();
|
||||
if line.is_empty() || line[0] == b'#' {
|
||||
return Ok(true);
|
||||
}
|
||||
match line.to_os_str() {
|
||||
Ok(osstr) => {
|
||||
args.push(osstr.to_os_string());
|
||||
}
|
||||
Err(err) => {
|
||||
errs.push(format!("{}: {}", line_number, err).into());
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
})?;
|
||||
Ok((args, errs))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::parse_reader;
|
||||
use std::ffi::OsString;
|
||||
|
||||
#[test]
|
||||
fn basic() {
|
||||
let (args, errs) = parse_reader(
|
||||
&b"\
|
||||
# Test
|
||||
--context=0
|
||||
--smart-case
|
||||
-u
|
||||
|
||||
|
||||
# --bar
|
||||
--foo
|
||||
"[..],
|
||||
)
|
||||
.unwrap();
|
||||
assert!(errs.is_empty());
|
||||
let args: Vec<String> =
|
||||
args.into_iter().map(|s| s.into_string().unwrap()).collect();
|
||||
assert_eq!(args, vec!["--context=0", "--smart-case", "-u", "--foo",]);
|
||||
}
|
||||
|
||||
// We test that we can handle invalid UTF-8 on Unix-like systems.
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn error() {
|
||||
use std::os::unix::ffi::OsStringExt;
|
||||
|
||||
let (args, errs) = parse_reader(
|
||||
&b"\
|
||||
quux
|
||||
foo\xFFbar
|
||||
baz
|
||||
"[..],
|
||||
)
|
||||
.unwrap();
|
||||
assert!(errs.is_empty());
|
||||
assert_eq!(
|
||||
args,
|
||||
vec![
|
||||
OsString::from("quux"),
|
||||
OsString::from_vec(b"foo\xFFbar".to_vec()),
|
||||
OsString::from("baz"),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
// ... but test that invalid UTF-8 fails on Windows.
|
||||
#[test]
|
||||
#[cfg(not(unix))]
|
||||
fn error() {
|
||||
let (args, errs) = parse_reader(
|
||||
&b"\
|
||||
quux
|
||||
foo\xFFbar
|
||||
baz
|
||||
"[..],
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(errs.len(), 1);
|
||||
assert_eq!(args, vec![OsString::from("quux"), OsString::from("baz"),]);
|
||||
}
|
||||
}
|
||||
68
crates/core/logger.rs
Normal file
68
crates/core/logger.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
// This module defines a super simple logger that works with the `log` crate.
|
||||
// We don't need anything fancy; just basic log levels and the ability to
|
||||
// print to stderr. We therefore avoid bringing in extra dependencies just
|
||||
// for this functionality.
|
||||
|
||||
use log::{self, Log};
|
||||
|
||||
/// The simplest possible logger that logs to stderr.
|
||||
///
|
||||
/// This logger does no filtering. Instead, it relies on the `log` crates
|
||||
/// filtering via its global max_level setting.
|
||||
#[derive(Debug)]
|
||||
pub struct Logger(());
|
||||
|
||||
const LOGGER: &'static Logger = &Logger(());
|
||||
|
||||
impl Logger {
|
||||
/// Create a new logger that logs to stderr and initialize it as the
|
||||
/// global logger. If there was a problem setting the logger, then an
|
||||
/// error is returned.
|
||||
pub fn init() -> Result<(), log::SetLoggerError> {
|
||||
log::set_logger(LOGGER)
|
||||
}
|
||||
}
|
||||
|
||||
impl Log for Logger {
|
||||
fn enabled(&self, _: &log::Metadata) -> bool {
|
||||
// We set the log level via log::set_max_level, so we don't need to
|
||||
// implement filtering here.
|
||||
true
|
||||
}
|
||||
|
||||
fn log(&self, record: &log::Record) {
|
||||
match (record.file(), record.line()) {
|
||||
(Some(file), Some(line)) => {
|
||||
eprintln!(
|
||||
"{}|{}|{}:{}: {}",
|
||||
record.level(),
|
||||
record.target(),
|
||||
file,
|
||||
line,
|
||||
record.args()
|
||||
);
|
||||
}
|
||||
(Some(file), None) => {
|
||||
eprintln!(
|
||||
"{}|{}|{}: {}",
|
||||
record.level(),
|
||||
record.target(),
|
||||
file,
|
||||
record.args()
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
eprintln!(
|
||||
"{}|{}: {}",
|
||||
record.level(),
|
||||
record.target(),
|
||||
record.args()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn flush(&self) {
|
||||
// We use eprintln! which is flushed on every call.
|
||||
}
|
||||
}
|
||||
325
crates/core/main.rs
Normal file
325
crates/core/main.rs
Normal file
@@ -0,0 +1,325 @@
|
||||
use std::error;
|
||||
use std::io::{self, Write};
|
||||
use std::process;
|
||||
use std::sync::Mutex;
|
||||
use std::time::Instant;
|
||||
|
||||
use ignore::WalkState;
|
||||
|
||||
use args::Args;
|
||||
use subject::Subject;
|
||||
|
||||
#[macro_use]
|
||||
mod messages;
|
||||
|
||||
mod app;
|
||||
mod args;
|
||||
mod config;
|
||||
mod logger;
|
||||
mod path_printer;
|
||||
mod search;
|
||||
mod subject;
|
||||
|
||||
// Since Rust no longer uses jemalloc by default, ripgrep will, by default,
|
||||
// use the system allocator. On Linux, this would normally be glibc's
|
||||
// allocator, which is pretty good. In particular, ripgrep does not have a
|
||||
// particularly allocation heavy workload, so there really isn't much
|
||||
// difference (for ripgrep's purposes) between glibc's allocator and jemalloc.
|
||||
//
|
||||
// However, when ripgrep is built with musl, this means ripgrep will use musl's
|
||||
// allocator, which appears to be substantially worse. (musl's goal is not to
|
||||
// have the fastest version of everything. Its goal is to be small and amenable
|
||||
// to static compilation.) Even though ripgrep isn't particularly allocation
|
||||
// heavy, musl's allocator appears to slow down ripgrep quite a bit. Therefore,
|
||||
// when building with musl, we use jemalloc.
|
||||
//
|
||||
// We don't unconditionally use jemalloc because it can be nice to use the
|
||||
// system's default allocator by default. Moreover, jemalloc seems to increase
|
||||
// compilation times by a bit.
|
||||
//
|
||||
// Moreover, we only do this on 64-bit systems since jemalloc doesn't support
|
||||
// i686.
|
||||
#[cfg(all(target_env = "musl", target_pointer_width = "64"))]
|
||||
#[global_allocator]
|
||||
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
||||
|
||||
type Result<T> = ::std::result::Result<T, Box<dyn error::Error>>;
|
||||
|
||||
fn main() {
|
||||
if let Err(err) = Args::parse().and_then(try_main) {
|
||||
eprintln!("{}", err);
|
||||
process::exit(2);
|
||||
}
|
||||
}
|
||||
|
||||
fn try_main(args: Args) -> Result<()> {
|
||||
use args::Command::*;
|
||||
|
||||
let matched = match args.command()? {
|
||||
Search => search(&args),
|
||||
SearchParallel => search_parallel(&args),
|
||||
SearchNever => Ok(false),
|
||||
Files => files(&args),
|
||||
FilesParallel => files_parallel(&args),
|
||||
Types => types(&args),
|
||||
PCRE2Version => pcre2_version(&args),
|
||||
}?;
|
||||
if matched && (args.quiet() || !messages::errored()) {
|
||||
process::exit(0)
|
||||
} else if messages::errored() {
|
||||
process::exit(2)
|
||||
} else {
|
||||
process::exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
/// The top-level entry point for single-threaded search. This recursively
|
||||
/// steps through the file list (current directory by default) and searches
|
||||
/// each file sequentially.
|
||||
fn search(args: &Args) -> Result<bool> {
|
||||
let started_at = Instant::now();
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let subject_builder = args.subject_builder();
|
||||
let mut stats = args.stats()?;
|
||||
let mut searcher = args.search_worker(args.stdout())?;
|
||||
let mut matched = false;
|
||||
|
||||
for result in args.walker()? {
|
||||
let subject = match subject_builder.build_from_result(result) {
|
||||
Some(subject) => subject,
|
||||
None => continue,
|
||||
};
|
||||
let search_result = match searcher.search(&subject) {
|
||||
Ok(search_result) => search_result,
|
||||
Err(err) => {
|
||||
// A broken pipe means graceful termination.
|
||||
if err.kind() == io::ErrorKind::BrokenPipe {
|
||||
break;
|
||||
}
|
||||
err_message!("{}: {}", subject.path().display(), err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
matched = matched || search_result.has_match();
|
||||
if let Some(ref mut stats) = stats {
|
||||
*stats += search_result.stats().unwrap();
|
||||
}
|
||||
if matched && quit_after_match {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if let Some(ref stats) = stats {
|
||||
let elapsed = Instant::now().duration_since(started_at);
|
||||
// We don't care if we couldn't print this successfully.
|
||||
let _ = searcher.print_stats(elapsed, stats);
|
||||
}
|
||||
Ok(matched)
|
||||
}
|
||||
|
||||
/// The top-level entry point for multi-threaded search. The parallelism is
|
||||
/// itself achieved by the recursive directory traversal. All we need to do is
|
||||
/// feed it a worker for performing a search on each file.
|
||||
fn search_parallel(args: &Args) -> Result<bool> {
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let started_at = Instant::now();
|
||||
let subject_builder = args.subject_builder();
|
||||
let bufwtr = args.buffer_writer()?;
|
||||
let stats = args.stats()?.map(Mutex::new);
|
||||
let matched = AtomicBool::new(false);
|
||||
let mut searcher_err = None;
|
||||
args.walker_parallel()?.run(|| {
|
||||
let bufwtr = &bufwtr;
|
||||
let stats = &stats;
|
||||
let matched = &matched;
|
||||
let subject_builder = &subject_builder;
|
||||
let mut searcher = match args.search_worker(bufwtr.buffer()) {
|
||||
Ok(searcher) => searcher,
|
||||
Err(err) => {
|
||||
searcher_err = Some(err);
|
||||
return Box::new(move |_| WalkState::Quit);
|
||||
}
|
||||
};
|
||||
|
||||
Box::new(move |result| {
|
||||
let subject = match subject_builder.build_from_result(result) {
|
||||
Some(subject) => subject,
|
||||
None => return WalkState::Continue,
|
||||
};
|
||||
searcher.printer().get_mut().clear();
|
||||
let search_result = match searcher.search(&subject) {
|
||||
Ok(search_result) => search_result,
|
||||
Err(err) => {
|
||||
err_message!("{}: {}", subject.path().display(), err);
|
||||
return WalkState::Continue;
|
||||
}
|
||||
};
|
||||
if search_result.has_match() {
|
||||
matched.store(true, SeqCst);
|
||||
}
|
||||
if let Some(ref locked_stats) = *stats {
|
||||
let mut stats = locked_stats.lock().unwrap();
|
||||
*stats += search_result.stats().unwrap();
|
||||
}
|
||||
if let Err(err) = bufwtr.print(searcher.printer().get_mut()) {
|
||||
// A broken pipe means graceful termination.
|
||||
if err.kind() == io::ErrorKind::BrokenPipe {
|
||||
return WalkState::Quit;
|
||||
}
|
||||
// Otherwise, we continue on our merry way.
|
||||
err_message!("{}: {}", subject.path().display(), err);
|
||||
}
|
||||
if matched.load(SeqCst) && quit_after_match {
|
||||
WalkState::Quit
|
||||
} else {
|
||||
WalkState::Continue
|
||||
}
|
||||
})
|
||||
});
|
||||
if let Some(err) = searcher_err.take() {
|
||||
return Err(err);
|
||||
}
|
||||
if let Some(ref locked_stats) = stats {
|
||||
let elapsed = Instant::now().duration_since(started_at);
|
||||
let stats = locked_stats.lock().unwrap();
|
||||
let mut searcher = args.search_worker(args.stdout())?;
|
||||
// We don't care if we couldn't print this successfully.
|
||||
let _ = searcher.print_stats(elapsed, &stats);
|
||||
}
|
||||
Ok(matched.load(SeqCst))
|
||||
}
|
||||
|
||||
/// The top-level entry point for listing files without searching them. This
|
||||
/// recursively steps through the file list (current directory by default) and
|
||||
/// prints each path sequentially using a single thread.
|
||||
fn files(args: &Args) -> Result<bool> {
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let subject_builder = args.subject_builder();
|
||||
let mut matched = false;
|
||||
let mut path_printer = args.path_printer(args.stdout())?;
|
||||
for result in args.walker()? {
|
||||
let subject = match subject_builder.build_from_result(result) {
|
||||
Some(subject) => subject,
|
||||
None => continue,
|
||||
};
|
||||
matched = true;
|
||||
if quit_after_match {
|
||||
break;
|
||||
}
|
||||
if let Err(err) = path_printer.write_path(subject.path()) {
|
||||
// A broken pipe means graceful termination.
|
||||
if err.kind() == io::ErrorKind::BrokenPipe {
|
||||
break;
|
||||
}
|
||||
// Otherwise, we have some other error that's preventing us from
|
||||
// writing to stdout, so we should bubble it up.
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
Ok(matched)
|
||||
}
|
||||
|
||||
/// The top-level entry point for listing files without searching them. This
|
||||
/// recursively steps through the file list (current directory by default) and
|
||||
/// prints each path sequentially using multiple threads.
|
||||
fn files_parallel(args: &Args) -> Result<bool> {
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let subject_builder = args.subject_builder();
|
||||
let mut path_printer = args.path_printer(args.stdout())?;
|
||||
let matched = AtomicBool::new(false);
|
||||
let (tx, rx) = mpsc::channel::<Subject>();
|
||||
|
||||
let print_thread = thread::spawn(move || -> io::Result<()> {
|
||||
for subject in rx.iter() {
|
||||
path_printer.write_path(subject.path())?;
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
args.walker_parallel()?.run(|| {
|
||||
let subject_builder = &subject_builder;
|
||||
let matched = &matched;
|
||||
let tx = tx.clone();
|
||||
|
||||
Box::new(move |result| {
|
||||
let subject = match subject_builder.build_from_result(result) {
|
||||
Some(subject) => subject,
|
||||
None => return WalkState::Continue,
|
||||
};
|
||||
matched.store(true, SeqCst);
|
||||
if quit_after_match {
|
||||
WalkState::Quit
|
||||
} else {
|
||||
match tx.send(subject) {
|
||||
Ok(_) => WalkState::Continue,
|
||||
Err(_) => WalkState::Quit,
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
drop(tx);
|
||||
if let Err(err) = print_thread.join().unwrap() {
|
||||
// A broken pipe means graceful termination, so fall through.
|
||||
// Otherwise, something bad happened while writing to stdout, so bubble
|
||||
// it up.
|
||||
if err.kind() != io::ErrorKind::BrokenPipe {
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
Ok(matched.load(SeqCst))
|
||||
}
|
||||
|
||||
/// The top-level entry point for --type-list.
|
||||
fn types(args: &Args) -> Result<bool> {
|
||||
let mut count = 0;
|
||||
let mut stdout = args.stdout();
|
||||
for def in args.type_defs()? {
|
||||
count += 1;
|
||||
stdout.write_all(def.name().as_bytes())?;
|
||||
stdout.write_all(b": ")?;
|
||||
|
||||
let mut first = true;
|
||||
for glob in def.globs() {
|
||||
if !first {
|
||||
stdout.write_all(b", ")?;
|
||||
}
|
||||
stdout.write_all(glob.as_bytes())?;
|
||||
first = false;
|
||||
}
|
||||
stdout.write_all(b"\n")?;
|
||||
}
|
||||
Ok(count > 0)
|
||||
}
|
||||
|
||||
/// The top-level entry point for --pcre2-version.
|
||||
fn pcre2_version(args: &Args) -> Result<bool> {
|
||||
#[cfg(feature = "pcre2")]
|
||||
fn imp(args: &Args) -> Result<bool> {
|
||||
use grep::pcre2;
|
||||
|
||||
let mut stdout = args.stdout();
|
||||
|
||||
let (major, minor) = pcre2::version();
|
||||
writeln!(stdout, "PCRE2 {}.{} is available", major, minor)?;
|
||||
|
||||
if cfg!(target_pointer_width = "64") && pcre2::is_jit_available() {
|
||||
writeln!(stdout, "JIT is available")?;
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "pcre2"))]
|
||||
fn imp(args: &Args) -> Result<bool> {
|
||||
let mut stdout = args.stdout();
|
||||
writeln!(stdout, "PCRE2 is not available in this build of ripgrep.")?;
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
imp(args)
|
||||
}
|
||||
74
crates/core/messages.rs
Normal file
74
crates/core/messages.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
static MESSAGES: AtomicBool = AtomicBool::new(false);
|
||||
static IGNORE_MESSAGES: AtomicBool = AtomicBool::new(false);
|
||||
static ERRORED: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
/// Emit a non-fatal error message, unless messages were disabled.
|
||||
#[macro_export]
|
||||
macro_rules! message {
|
||||
($($tt:tt)*) => {
|
||||
if crate::messages::messages() {
|
||||
eprintln!($($tt)*);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Like message, but sets ripgrep's "errored" flag, which controls the exit
|
||||
/// status.
|
||||
#[macro_export]
|
||||
macro_rules! err_message {
|
||||
($($tt:tt)*) => {
|
||||
crate::messages::set_errored();
|
||||
message!($($tt)*);
|
||||
}
|
||||
}
|
||||
|
||||
/// Emit a non-fatal ignore-related error message (like a parse error), unless
|
||||
/// ignore-messages were disabled.
|
||||
#[macro_export]
|
||||
macro_rules! ignore_message {
|
||||
($($tt:tt)*) => {
|
||||
if crate::messages::messages() && crate::messages::ignore_messages() {
|
||||
eprintln!($($tt)*);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if messages should be shown.
|
||||
pub fn messages() -> bool {
|
||||
MESSAGES.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Set whether messages should be shown or not.
|
||||
///
|
||||
/// By default, they are not shown.
|
||||
pub fn set_messages(yes: bool) {
|
||||
MESSAGES.store(yes, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Returns true if and only if "ignore" related messages should be shown.
|
||||
pub fn ignore_messages() -> bool {
|
||||
IGNORE_MESSAGES.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Set whether "ignore" related messages should be shown or not.
|
||||
///
|
||||
/// By default, they are not shown.
|
||||
///
|
||||
/// Note that this is overridden if `messages` is disabled. Namely, if
|
||||
/// `messages` is disabled, then "ignore" messages are never shown, regardless
|
||||
/// of this setting.
|
||||
pub fn set_ignore_messages(yes: bool) {
|
||||
IGNORE_MESSAGES.store(yes, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Returns true if and only if ripgrep came across a non-fatal error.
|
||||
pub fn errored() -> bool {
|
||||
ERRORED.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Indicate that ripgrep has come across a non-fatal error.
|
||||
pub fn set_errored() {
|
||||
ERRORED.store(true, Ordering::SeqCst);
|
||||
}
|
||||
98
crates/core/path_printer.rs
Normal file
98
crates/core/path_printer.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
|
||||
use grep::printer::{ColorSpecs, PrinterPath};
|
||||
use termcolor::WriteColor;
|
||||
|
||||
/// A configuration for describing how paths should be written.
|
||||
#[derive(Clone, Debug)]
|
||||
struct Config {
|
||||
colors: ColorSpecs,
|
||||
separator: Option<u8>,
|
||||
terminator: u8,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
colors: ColorSpecs::default(),
|
||||
separator: None,
|
||||
terminator: b'\n',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for constructing things to search over.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PathPrinterBuilder {
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl PathPrinterBuilder {
|
||||
/// Return a new subject builder with a default configuration.
|
||||
pub fn new() -> PathPrinterBuilder {
|
||||
PathPrinterBuilder { config: Config::default() }
|
||||
}
|
||||
|
||||
/// Create a new path printer with the current configuration that writes
|
||||
/// paths to the given writer.
|
||||
pub fn build<W: WriteColor>(&self, wtr: W) -> PathPrinter<W> {
|
||||
PathPrinter { config: self.config.clone(), wtr }
|
||||
}
|
||||
|
||||
/// Set the color specification for this printer.
|
||||
///
|
||||
/// Currently, only the `path` component of the given specification is
|
||||
/// used.
|
||||
pub fn color_specs(
|
||||
&mut self,
|
||||
specs: ColorSpecs,
|
||||
) -> &mut PathPrinterBuilder {
|
||||
self.config.colors = specs;
|
||||
self
|
||||
}
|
||||
|
||||
/// A path separator.
|
||||
///
|
||||
/// When provided, the path's default separator will be replaced with
|
||||
/// the given separator.
|
||||
///
|
||||
/// This is not set by default, and the system's default path separator
|
||||
/// will be used.
|
||||
pub fn separator(&mut self, sep: Option<u8>) -> &mut PathPrinterBuilder {
|
||||
self.config.separator = sep;
|
||||
self
|
||||
}
|
||||
|
||||
/// A path terminator.
|
||||
///
|
||||
/// When printing a path, it will be by terminated by the given byte.
|
||||
///
|
||||
/// This is set to `\n` by default.
|
||||
pub fn terminator(&mut self, terminator: u8) -> &mut PathPrinterBuilder {
|
||||
self.config.terminator = terminator;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A printer for emitting paths to a writer, with optional color support.
|
||||
#[derive(Debug)]
|
||||
pub struct PathPrinter<W> {
|
||||
config: Config,
|
||||
wtr: W,
|
||||
}
|
||||
|
||||
impl<W: WriteColor> PathPrinter<W> {
|
||||
/// Write the given path to the underlying writer.
|
||||
pub fn write_path(&mut self, path: &Path) -> io::Result<()> {
|
||||
let ppath = PrinterPath::with_separator(path, self.config.separator);
|
||||
if !self.wtr.supports_color() {
|
||||
self.wtr.write_all(ppath.as_bytes())?;
|
||||
} else {
|
||||
self.wtr.set_color(self.config.colors.path())?;
|
||||
self.wtr.write_all(ppath.as_bytes())?;
|
||||
self.wtr.reset()?;
|
||||
}
|
||||
self.wtr.write_all(&[self.config.terminator])
|
||||
}
|
||||
}
|
||||
534
crates/core/search.rs
Normal file
534
crates/core/search.rs
Normal file
@@ -0,0 +1,534 @@
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::{Command, Stdio};
|
||||
use std::time::Duration;
|
||||
|
||||
use grep::cli;
|
||||
use grep::matcher::Matcher;
|
||||
#[cfg(feature = "pcre2")]
|
||||
use grep::pcre2::RegexMatcher as PCRE2RegexMatcher;
|
||||
use grep::printer::{Standard, Stats, Summary, JSON};
|
||||
use grep::regex::RegexMatcher as RustRegexMatcher;
|
||||
use grep::searcher::{BinaryDetection, Searcher};
|
||||
use ignore::overrides::Override;
|
||||
use serde_json as json;
|
||||
use serde_json::json;
|
||||
use termcolor::WriteColor;
|
||||
|
||||
use crate::subject::Subject;
|
||||
|
||||
/// The configuration for the search worker. Among a few other things, the
|
||||
/// configuration primarily controls the way we show search results to users
|
||||
/// at a very high level.
|
||||
#[derive(Clone, Debug)]
|
||||
struct Config {
|
||||
json_stats: bool,
|
||||
preprocessor: Option<PathBuf>,
|
||||
preprocessor_globs: Override,
|
||||
search_zip: bool,
|
||||
binary_implicit: BinaryDetection,
|
||||
binary_explicit: BinaryDetection,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
json_stats: false,
|
||||
preprocessor: None,
|
||||
preprocessor_globs: Override::empty(),
|
||||
search_zip: false,
|
||||
binary_implicit: BinaryDetection::none(),
|
||||
binary_explicit: BinaryDetection::none(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for configuring and constructing a search worker.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SearchWorkerBuilder {
|
||||
config: Config,
|
||||
command_builder: cli::CommandReaderBuilder,
|
||||
decomp_builder: cli::DecompressionReaderBuilder,
|
||||
}
|
||||
|
||||
impl Default for SearchWorkerBuilder {
|
||||
fn default() -> SearchWorkerBuilder {
|
||||
SearchWorkerBuilder::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl SearchWorkerBuilder {
|
||||
/// Create a new builder for configuring and constructing a search worker.
|
||||
pub fn new() -> SearchWorkerBuilder {
|
||||
let mut cmd_builder = cli::CommandReaderBuilder::new();
|
||||
cmd_builder.async_stderr(true);
|
||||
|
||||
let mut decomp_builder = cli::DecompressionReaderBuilder::new();
|
||||
decomp_builder.async_stderr(true);
|
||||
|
||||
SearchWorkerBuilder {
|
||||
config: Config::default(),
|
||||
command_builder: cmd_builder,
|
||||
decomp_builder,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new search worker using the given searcher, matcher and
|
||||
/// printer.
|
||||
pub fn build<W: WriteColor>(
|
||||
&self,
|
||||
matcher: PatternMatcher,
|
||||
searcher: Searcher,
|
||||
printer: Printer<W>,
|
||||
) -> SearchWorker<W> {
|
||||
let config = self.config.clone();
|
||||
let command_builder = self.command_builder.clone();
|
||||
let decomp_builder = self.decomp_builder.clone();
|
||||
SearchWorker {
|
||||
config,
|
||||
command_builder,
|
||||
decomp_builder,
|
||||
matcher,
|
||||
searcher,
|
||||
printer,
|
||||
}
|
||||
}
|
||||
|
||||
/// Forcefully use JSON to emit statistics, even if the underlying printer
|
||||
/// is not the JSON printer.
|
||||
///
|
||||
/// This is useful for implementing flag combinations like
|
||||
/// `--json --quiet`, which uses the summary printer for implementing
|
||||
/// `--quiet` but still wants to emit summary statistics, which should
|
||||
/// be JSON formatted because of the `--json` flag.
|
||||
pub fn json_stats(&mut self, yes: bool) -> &mut SearchWorkerBuilder {
|
||||
self.config.json_stats = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the path to a preprocessor command.
|
||||
///
|
||||
/// When this is set, instead of searching files directly, the given
|
||||
/// command will be run with the file path as the first argument, and the
|
||||
/// output of that command will be searched instead.
|
||||
pub fn preprocessor(
|
||||
&mut self,
|
||||
cmd: Option<PathBuf>,
|
||||
) -> &mut SearchWorkerBuilder {
|
||||
self.config.preprocessor = cmd;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the globs for determining which files should be run through the
|
||||
/// preprocessor. By default, with no globs and a preprocessor specified,
|
||||
/// every file is run through the preprocessor.
|
||||
pub fn preprocessor_globs(
|
||||
&mut self,
|
||||
globs: Override,
|
||||
) -> &mut SearchWorkerBuilder {
|
||||
self.config.preprocessor_globs = globs;
|
||||
self
|
||||
}
|
||||
|
||||
/// Enable the decompression and searching of common compressed files.
|
||||
///
|
||||
/// When enabled, if a particular file path is recognized as a compressed
|
||||
/// file, then it is decompressed before searching.
|
||||
///
|
||||
/// Note that if a preprocessor command is set, then it overrides this
|
||||
/// setting.
|
||||
pub fn search_zip(&mut self, yes: bool) -> &mut SearchWorkerBuilder {
|
||||
self.config.search_zip = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the binary detection that should be used when searching files
|
||||
/// found via a recursive directory search.
|
||||
///
|
||||
/// Generally, this binary detection may be `BinaryDetection::quit` if
|
||||
/// we want to skip binary files completely.
|
||||
///
|
||||
/// By default, no binary detection is performed.
|
||||
pub fn binary_detection_implicit(
|
||||
&mut self,
|
||||
detection: BinaryDetection,
|
||||
) -> &mut SearchWorkerBuilder {
|
||||
self.config.binary_implicit = detection;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the binary detection that should be used when searching files
|
||||
/// explicitly supplied by an end user.
|
||||
///
|
||||
/// Generally, this binary detection should NOT be `BinaryDetection::quit`,
|
||||
/// since we never want to automatically filter files supplied by the end
|
||||
/// user.
|
||||
///
|
||||
/// By default, no binary detection is performed.
|
||||
pub fn binary_detection_explicit(
|
||||
&mut self,
|
||||
detection: BinaryDetection,
|
||||
) -> &mut SearchWorkerBuilder {
|
||||
self.config.binary_explicit = detection;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of executing a search.
|
||||
///
|
||||
/// Generally speaking, the "result" of a search is sent to a printer, which
|
||||
/// writes results to an underlying writer such as stdout or a file. However,
|
||||
/// every search also has some aggregate statistics or meta data that may be
|
||||
/// useful to higher level routines.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct SearchResult {
|
||||
has_match: bool,
|
||||
stats: Option<Stats>,
|
||||
}
|
||||
|
||||
impl SearchResult {
|
||||
/// Whether the search found a match or not.
|
||||
pub fn has_match(&self) -> bool {
|
||||
self.has_match
|
||||
}
|
||||
|
||||
/// Return aggregate search statistics for a single search, if available.
|
||||
///
|
||||
/// It can be expensive to compute statistics, so these are only present
|
||||
/// if explicitly enabled in the printer provided by the caller.
|
||||
pub fn stats(&self) -> Option<&Stats> {
|
||||
self.stats.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// The pattern matcher used by a search worker.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum PatternMatcher {
|
||||
RustRegex(RustRegexMatcher),
|
||||
#[cfg(feature = "pcre2")]
|
||||
PCRE2(PCRE2RegexMatcher),
|
||||
}
|
||||
|
||||
/// The printer used by a search worker.
|
||||
///
|
||||
/// The `W` type parameter refers to the type of the underlying writer.
|
||||
#[derive(Debug)]
|
||||
pub enum Printer<W> {
|
||||
/// Use the standard printer, which supports the classic grep-like format.
|
||||
Standard(Standard<W>),
|
||||
/// Use the summary printer, which supports aggregate displays of search
|
||||
/// results.
|
||||
Summary(Summary<W>),
|
||||
/// A JSON printer, which emits results in the JSON Lines format.
|
||||
JSON(JSON<W>),
|
||||
}
|
||||
|
||||
impl<W: WriteColor> Printer<W> {
|
||||
fn print_stats(
|
||||
&mut self,
|
||||
total_duration: Duration,
|
||||
stats: &Stats,
|
||||
) -> io::Result<()> {
|
||||
match *self {
|
||||
Printer::JSON(_) => self.print_stats_json(total_duration, stats),
|
||||
Printer::Standard(_) | Printer::Summary(_) => {
|
||||
self.print_stats_human(total_duration, stats)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn print_stats_human(
|
||||
&mut self,
|
||||
total_duration: Duration,
|
||||
stats: &Stats,
|
||||
) -> io::Result<()> {
|
||||
write!(
|
||||
self.get_mut(),
|
||||
"
|
||||
{matches} matches
|
||||
{lines} matched lines
|
||||
{searches_with_match} files contained matches
|
||||
{searches} files searched
|
||||
{bytes_printed} bytes printed
|
||||
{bytes_searched} bytes searched
|
||||
{search_time:0.6} seconds spent searching
|
||||
{process_time:0.6} seconds
|
||||
",
|
||||
matches = stats.matches(),
|
||||
lines = stats.matched_lines(),
|
||||
searches_with_match = stats.searches_with_match(),
|
||||
searches = stats.searches(),
|
||||
bytes_printed = stats.bytes_printed(),
|
||||
bytes_searched = stats.bytes_searched(),
|
||||
search_time = fractional_seconds(stats.elapsed()),
|
||||
process_time = fractional_seconds(total_duration)
|
||||
)
|
||||
}
|
||||
|
||||
fn print_stats_json(
|
||||
&mut self,
|
||||
total_duration: Duration,
|
||||
stats: &Stats,
|
||||
) -> io::Result<()> {
|
||||
// We specifically match the format laid out by the JSON printer in
|
||||
// the grep-printer crate. We simply "extend" it with the 'summary'
|
||||
// message type.
|
||||
let fractional = fractional_seconds(total_duration);
|
||||
json::to_writer(
|
||||
self.get_mut(),
|
||||
&json!({
|
||||
"type": "summary",
|
||||
"data": {
|
||||
"stats": stats,
|
||||
"elapsed_total": {
|
||||
"secs": total_duration.as_secs(),
|
||||
"nanos": total_duration.subsec_nanos(),
|
||||
"human": format!("{:0.6}s", fractional),
|
||||
},
|
||||
}
|
||||
}),
|
||||
)?;
|
||||
write!(self.get_mut(), "\n")
|
||||
}
|
||||
|
||||
/// Return a mutable reference to the underlying printer's writer.
|
||||
pub fn get_mut(&mut self) -> &mut W {
|
||||
match *self {
|
||||
Printer::Standard(ref mut p) => p.get_mut(),
|
||||
Printer::Summary(ref mut p) => p.get_mut(),
|
||||
Printer::JSON(ref mut p) => p.get_mut(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A worker for executing searches.
|
||||
///
|
||||
/// It is intended for a single worker to execute many searches, and is
|
||||
/// generally intended to be used from a single thread. When searching using
|
||||
/// multiple threads, it is better to create a new worker for each thread.
|
||||
#[derive(Debug)]
|
||||
pub struct SearchWorker<W> {
|
||||
config: Config,
|
||||
command_builder: cli::CommandReaderBuilder,
|
||||
decomp_builder: cli::DecompressionReaderBuilder,
|
||||
matcher: PatternMatcher,
|
||||
searcher: Searcher,
|
||||
printer: Printer<W>,
|
||||
}
|
||||
|
||||
impl<W: WriteColor> SearchWorker<W> {
|
||||
/// Execute a search over the given subject.
|
||||
pub fn search(&mut self, subject: &Subject) -> io::Result<SearchResult> {
|
||||
let bin = if subject.is_explicit() {
|
||||
self.config.binary_explicit.clone()
|
||||
} else {
|
||||
self.config.binary_implicit.clone()
|
||||
};
|
||||
self.searcher.set_binary_detection(bin);
|
||||
|
||||
let path = subject.path();
|
||||
if subject.is_stdin() {
|
||||
self.search_reader(path, io::stdin().lock())
|
||||
} else if self.should_preprocess(path) {
|
||||
self.search_preprocessor(path)
|
||||
} else if self.should_decompress(path) {
|
||||
self.search_decompress(path)
|
||||
} else {
|
||||
self.search_path(path)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a mutable reference to the underlying printer.
|
||||
pub fn printer(&mut self) -> &mut Printer<W> {
|
||||
&mut self.printer
|
||||
}
|
||||
|
||||
/// Print the given statistics to the underlying writer in a way that is
|
||||
/// consistent with this searcher's printer's format.
|
||||
///
|
||||
/// While `Stats` contains a duration itself, this only corresponds to the
|
||||
/// time spent searching, where as `total_duration` should roughly
|
||||
/// approximate the lifespan of the ripgrep process itself.
|
||||
pub fn print_stats(
|
||||
&mut self,
|
||||
total_duration: Duration,
|
||||
stats: &Stats,
|
||||
) -> io::Result<()> {
|
||||
if self.config.json_stats {
|
||||
self.printer().print_stats_json(total_duration, stats)
|
||||
} else {
|
||||
self.printer().print_stats(total_duration, stats)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given file path should be
|
||||
/// decompressed before searching.
|
||||
fn should_decompress(&self, path: &Path) -> bool {
|
||||
if !self.config.search_zip {
|
||||
return false;
|
||||
}
|
||||
self.decomp_builder.get_matcher().has_command(path)
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given file path should be run through
|
||||
/// the preprocessor.
|
||||
fn should_preprocess(&self, path: &Path) -> bool {
|
||||
if !self.config.preprocessor.is_some() {
|
||||
return false;
|
||||
}
|
||||
if self.config.preprocessor_globs.is_empty() {
|
||||
return true;
|
||||
}
|
||||
!self.config.preprocessor_globs.matched(path, false).is_ignore()
|
||||
}
|
||||
|
||||
/// Search the given file path by first asking the preprocessor for the
|
||||
/// data to search instead of opening the path directly.
|
||||
fn search_preprocessor(
|
||||
&mut self,
|
||||
path: &Path,
|
||||
) -> io::Result<SearchResult> {
|
||||
let bin = self.config.preprocessor.as_ref().unwrap();
|
||||
let mut cmd = Command::new(bin);
|
||||
cmd.arg(path).stdin(Stdio::from(File::open(path)?));
|
||||
|
||||
let rdr = self.command_builder.build(&mut cmd).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"preprocessor command could not start: '{:?}': {}",
|
||||
cmd, err,
|
||||
),
|
||||
)
|
||||
})?;
|
||||
self.search_reader(path, rdr).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("preprocessor command failed: '{:?}': {}", cmd, err),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Attempt to decompress the data at the given file path and search the
|
||||
/// result. If the given file path isn't recognized as a compressed file,
|
||||
/// then search it without doing any decompression.
|
||||
fn search_decompress(&mut self, path: &Path) -> io::Result<SearchResult> {
|
||||
let rdr = self.decomp_builder.build(path)?;
|
||||
self.search_reader(path, rdr)
|
||||
}
|
||||
|
||||
/// Search the contents of the given file path.
|
||||
fn search_path(&mut self, path: &Path) -> io::Result<SearchResult> {
|
||||
use self::PatternMatcher::*;
|
||||
|
||||
let (searcher, printer) = (&mut self.searcher, &mut self.printer);
|
||||
match self.matcher {
|
||||
RustRegex(ref m) => search_path(m, searcher, printer, path),
|
||||
#[cfg(feature = "pcre2")]
|
||||
PCRE2(ref m) => search_path(m, searcher, printer, path),
|
||||
}
|
||||
}
|
||||
|
||||
/// Executes a search on the given reader, which may or may not correspond
|
||||
/// directly to the contents of the given file path. Instead, the reader
|
||||
/// may actually cause something else to be searched (for example, when
|
||||
/// a preprocessor is set or when decompression is enabled). In those
|
||||
/// cases, the file path is used for visual purposes only.
|
||||
///
|
||||
/// Generally speaking, this method should only be used when there is no
|
||||
/// other choice. Searching via `search_path` provides more opportunities
|
||||
/// for optimizations (such as memory maps).
|
||||
fn search_reader<R: io::Read>(
|
||||
&mut self,
|
||||
path: &Path,
|
||||
rdr: R,
|
||||
) -> io::Result<SearchResult> {
|
||||
use self::PatternMatcher::*;
|
||||
|
||||
let (searcher, printer) = (&mut self.searcher, &mut self.printer);
|
||||
match self.matcher {
|
||||
RustRegex(ref m) => search_reader(m, searcher, printer, path, rdr),
|
||||
#[cfg(feature = "pcre2")]
|
||||
PCRE2(ref m) => search_reader(m, searcher, printer, path, rdr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Search the contents of the given file path using the given matcher,
|
||||
/// searcher and printer.
|
||||
fn search_path<M: Matcher, W: WriteColor>(
|
||||
matcher: M,
|
||||
searcher: &mut Searcher,
|
||||
printer: &mut Printer<W>,
|
||||
path: &Path,
|
||||
) -> io::Result<SearchResult> {
|
||||
match *printer {
|
||||
Printer::Standard(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_path(&matcher, path, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: sink.stats().map(|s| s.clone()),
|
||||
})
|
||||
}
|
||||
Printer::Summary(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_path(&matcher, path, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: sink.stats().map(|s| s.clone()),
|
||||
})
|
||||
}
|
||||
Printer::JSON(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_path(&matcher, path, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: Some(sink.stats().clone()),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Search the contents of the given reader using the given matcher, searcher
|
||||
/// and printer.
|
||||
fn search_reader<M: Matcher, R: io::Read, W: WriteColor>(
|
||||
matcher: M,
|
||||
searcher: &mut Searcher,
|
||||
printer: &mut Printer<W>,
|
||||
path: &Path,
|
||||
rdr: R,
|
||||
) -> io::Result<SearchResult> {
|
||||
match *printer {
|
||||
Printer::Standard(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_reader(&matcher, rdr, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: sink.stats().map(|s| s.clone()),
|
||||
})
|
||||
}
|
||||
Printer::Summary(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_reader(&matcher, rdr, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: sink.stats().map(|s| s.clone()),
|
||||
})
|
||||
}
|
||||
Printer::JSON(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_reader(&matcher, rdr, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: Some(sink.stats().clone()),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the given duration as fractional seconds.
|
||||
fn fractional_seconds(duration: Duration) -> f64 {
|
||||
(duration.as_secs() as f64) + (duration.subsec_nanos() as f64 * 1e-9)
|
||||
}
|
||||
160
crates/core/subject.rs
Normal file
160
crates/core/subject.rs
Normal file
@@ -0,0 +1,160 @@
|
||||
use std::path::Path;
|
||||
|
||||
use ignore::{self, DirEntry};
|
||||
use log;
|
||||
|
||||
/// A configuration for describing how subjects should be built.
|
||||
#[derive(Clone, Debug)]
|
||||
struct Config {
|
||||
strip_dot_prefix: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config { strip_dot_prefix: false }
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for constructing things to search over.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SubjectBuilder {
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl SubjectBuilder {
|
||||
/// Return a new subject builder with a default configuration.
|
||||
pub fn new() -> SubjectBuilder {
|
||||
SubjectBuilder { config: Config::default() }
|
||||
}
|
||||
|
||||
/// Create a new subject from a possibly missing directory entry.
|
||||
///
|
||||
/// If the directory entry isn't present, then the corresponding error is
|
||||
/// logged if messages have been configured. Otherwise, if the subject is
|
||||
/// deemed searchable, then it is returned.
|
||||
pub fn build_from_result(
|
||||
&self,
|
||||
result: Result<DirEntry, ignore::Error>,
|
||||
) -> Option<Subject> {
|
||||
match result {
|
||||
Ok(dent) => self.build(dent),
|
||||
Err(err) => {
|
||||
err_message!("{}", err);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new subject using this builder's configuration.
|
||||
///
|
||||
/// If a subject could not be created or should otherwise not be searched,
|
||||
/// then this returns `None` after emitting any relevant log messages.
|
||||
pub fn build(&self, dent: DirEntry) -> Option<Subject> {
|
||||
let subj =
|
||||
Subject { dent, strip_dot_prefix: self.config.strip_dot_prefix };
|
||||
if let Some(ignore_err) = subj.dent.error() {
|
||||
ignore_message!("{}", ignore_err);
|
||||
}
|
||||
// If this entry was explicitly provided by an end user, then we always
|
||||
// want to search it.
|
||||
if subj.is_explicit() {
|
||||
return Some(subj);
|
||||
}
|
||||
// At this point, we only want to search something if it's explicitly a
|
||||
// file. This omits symlinks. (If ripgrep was configured to follow
|
||||
// symlinks, then they have already been followed by the directory
|
||||
// traversal.)
|
||||
if subj.is_file() {
|
||||
return Some(subj);
|
||||
}
|
||||
// We got nothin. Emit a debug message, but only if this isn't a
|
||||
// directory. Otherwise, emitting messages for directories is just
|
||||
// noisy.
|
||||
if !subj.is_dir() {
|
||||
log::debug!(
|
||||
"ignoring {}: failed to pass subject filter: \
|
||||
file type: {:?}, metadata: {:?}",
|
||||
subj.dent.path().display(),
|
||||
subj.dent.file_type(),
|
||||
subj.dent.metadata()
|
||||
);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// When enabled, if the subject's file path starts with `./` then it is
|
||||
/// stripped.
|
||||
///
|
||||
/// This is useful when implicitly searching the current working directory.
|
||||
pub fn strip_dot_prefix(&mut self, yes: bool) -> &mut SubjectBuilder {
|
||||
self.config.strip_dot_prefix = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A subject is a thing we want to search. Generally, a subject is either a
|
||||
/// file or stdin.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Subject {
|
||||
dent: DirEntry,
|
||||
strip_dot_prefix: bool,
|
||||
}
|
||||
|
||||
impl Subject {
|
||||
/// Return the file path corresponding to this subject.
|
||||
///
|
||||
/// If this subject corresponds to stdin, then a special `<stdin>` path
|
||||
/// is returned instead.
|
||||
pub fn path(&self) -> &Path {
|
||||
if self.strip_dot_prefix && self.dent.path().starts_with("./") {
|
||||
self.dent.path().strip_prefix("./").unwrap()
|
||||
} else {
|
||||
self.dent.path()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry corresponds to stdin.
|
||||
pub fn is_stdin(&self) -> bool {
|
||||
self.dent.is_stdin()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry corresponds to a subject to
|
||||
/// search that was explicitly supplied by an end user.
|
||||
///
|
||||
/// Generally, this corresponds to either stdin or an explicit file path
|
||||
/// argument. e.g., in `rg foo some-file ./some-dir/`, `some-file` is
|
||||
/// an explicit subject, but, e.g., `./some-dir/some-other-file` is not.
|
||||
///
|
||||
/// However, note that ripgrep does not see through shell globbing. e.g.,
|
||||
/// in `rg foo ./some-dir/*`, `./some-dir/some-other-file` will be treated
|
||||
/// as an explicit subject.
|
||||
pub fn is_explicit(&self) -> bool {
|
||||
// stdin is obvious. When an entry has a depth of 0, that means it
|
||||
// was explicitly provided to our directory iterator, which means it
|
||||
// was in turn explicitly provided by the end user. The !is_dir check
|
||||
// means that we want to search files even if their symlinks, again,
|
||||
// because they were explicitly provided. (And we never want to try
|
||||
// to search a directory.)
|
||||
self.is_stdin() || (self.dent.depth() == 0 && !self.is_dir())
|
||||
}
|
||||
|
||||
/// Returns true if and only if this subject points to a directory after
|
||||
/// following symbolic links.
|
||||
fn is_dir(&self) -> bool {
|
||||
let ft = match self.dent.file_type() {
|
||||
None => return false,
|
||||
Some(ft) => ft,
|
||||
};
|
||||
if ft.is_dir() {
|
||||
return true;
|
||||
}
|
||||
// If this is a symlink, then we want to follow it to determine
|
||||
// whether it's a directory or not.
|
||||
self.dent.path_is_symlink() && self.dent.path().is_dir()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this subject points to a file.
|
||||
fn is_file(&self) -> bool {
|
||||
self.dent.file_type().map_or(false, |ft| ft.is_file())
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "globset"
|
||||
version = "0.1.4" #:version
|
||||
version = "0.4.5" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Cross platform single glob and glob set matching. Glob set matching is the
|
||||
@@ -8,8 +8,8 @@ process of matching one or more glob patterns against a single candidate path
|
||||
simultaneously, and returning all of the globs that matched.
|
||||
"""
|
||||
documentation = "https://docs.rs/globset"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/globset"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/globset"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/globset"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/globset"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "glob", "multiple", "set", "pattern"]
|
||||
license = "Unlicense/MIT"
|
||||
@@ -19,14 +19,18 @@ name = "globset"
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
aho-corasick = "0.6.0"
|
||||
fnv = "1.0"
|
||||
log = "0.3"
|
||||
memchr = "1"
|
||||
regex = "0.2.1"
|
||||
aho-corasick = "0.7.3"
|
||||
bstr = { version = "0.2.0", default-features = false, features = ["std"] }
|
||||
fnv = "1.0.6"
|
||||
log = "0.4.5"
|
||||
regex = "1.1.5"
|
||||
serde = { version = "1.0.104", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
glob = "0.2"
|
||||
glob = "0.3.0"
|
||||
lazy_static = "1"
|
||||
serde_json = "1.0.45"
|
||||
|
||||
[features]
|
||||
simd-accel = ["regex/simd-accel"]
|
||||
simd-accel = []
|
||||
serde1 = ["serde"]
|
||||
@@ -4,7 +4,7 @@ Cross platform single glob and glob set matching. Glob set matching is the
|
||||
process of matching one or more glob patterns against a single candidate path
|
||||
simultaneously, and returning all of the globs that matched.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://crates.io/crates/globset)
|
||||
|
||||
@@ -20,7 +20,7 @@ Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
globset = "0.1"
|
||||
globset = "0.3"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
@@ -29,6 +29,10 @@ and this to your crate root:
|
||||
extern crate globset;
|
||||
```
|
||||
|
||||
### Features
|
||||
|
||||
* `serde1`: Enables implementing Serde traits on the `Glob` type.
|
||||
|
||||
### Example: one glob
|
||||
|
||||
This example shows how to match a single glob against a single file path.
|
||||
@@ -36,7 +40,7 @@ This example shows how to match a single glob against a single file path.
|
||||
```rust
|
||||
use globset::Glob;
|
||||
|
||||
let glob = try!(Glob::new("*.rs")).compile_matcher();
|
||||
let glob = Glob::new("*.rs")?.compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(glob.is_match("foo/bar.rs"));
|
||||
@@ -51,8 +55,8 @@ semantics. In this example, we prevent wildcards from matching path separators.
|
||||
```rust
|
||||
use globset::GlobBuilder;
|
||||
|
||||
let glob = try!(GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()).compile_matcher();
|
||||
let glob = GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()?.compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(!glob.is_match("foo/bar.rs")); // no longer matches
|
||||
@@ -69,10 +73,10 @@ use globset::{Glob, GlobSetBuilder};
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
// A GlobBuilder can be used to configure each glob's match semantics
|
||||
// independently.
|
||||
builder.add(try!(Glob::new("*.rs")));
|
||||
builder.add(try!(Glob::new("src/lib.rs")));
|
||||
builder.add(try!(Glob::new("src/**/foo.rs")));
|
||||
let set = try!(builder.build());
|
||||
builder.add(Glob::new("*.rs")?);
|
||||
builder.add(Glob::new("src/lib.rs")?);
|
||||
builder.add(Glob::new("src/**/foo.rs")?);
|
||||
let set = builder.build()?;
|
||||
|
||||
assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]);
|
||||
```
|
||||
@@ -6,14 +6,9 @@ tool itself, see the benchsuite directory.
|
||||
|
||||
extern crate glob;
|
||||
extern crate globset;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate regex;
|
||||
extern crate test;
|
||||
|
||||
use std::ffi::OsStr;
|
||||
use std::path::Path;
|
||||
|
||||
use globset::{Candidate, Glob, GlobMatcher, GlobSet, GlobSetBuilder};
|
||||
|
||||
const EXT: &'static str = "some/a/bigger/path/to/the/crazy/needle.txt";
|
||||
@@ -1,15 +1,14 @@
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::fmt;
|
||||
use std::hash;
|
||||
use std::iter;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::path::{Path, is_separator};
|
||||
use std::path::{is_separator, Path};
|
||||
use std::str;
|
||||
|
||||
use regex;
|
||||
use regex::bytes::Regex;
|
||||
|
||||
use {Candidate, Error, new_regex};
|
||||
use {new_regex, Candidate, Error, ErrorKind};
|
||||
|
||||
/// Describes a matching strategy for a particular pattern.
|
||||
///
|
||||
@@ -28,7 +27,7 @@ pub enum MatchStrategy {
|
||||
BasenameLiteral(String),
|
||||
/// A pattern matches if and only if the file path's extension matches this
|
||||
/// literal string.
|
||||
Extension(OsString),
|
||||
Extension(String),
|
||||
/// A pattern matches if and only if this prefix literal is a prefix of the
|
||||
/// candidate file path.
|
||||
Prefix(String),
|
||||
@@ -47,7 +46,7 @@ pub enum MatchStrategy {
|
||||
/// extension. Note that this is a necessary but NOT sufficient criterion.
|
||||
/// Namely, if the extension matches, then a full regex search is still
|
||||
/// required.
|
||||
RequiredExtension(OsString),
|
||||
RequiredExtension(String),
|
||||
/// A regex needs to be used for matching.
|
||||
Regex,
|
||||
}
|
||||
@@ -86,16 +85,16 @@ pub struct Glob {
|
||||
}
|
||||
|
||||
impl PartialEq for Glob {
|
||||
fn eq(&self, other: &Glob) -> bool {
|
||||
self.glob == other.glob && self.opts == other.opts
|
||||
}
|
||||
fn eq(&self, other: &Glob) -> bool {
|
||||
self.glob == other.glob && self.opts == other.opts
|
||||
}
|
||||
}
|
||||
|
||||
impl hash::Hash for Glob {
|
||||
fn hash<H: hash::Hasher>(&self, state: &mut H) {
|
||||
self.glob.hash(state);
|
||||
self.opts.hash(state);
|
||||
}
|
||||
fn hash<H: hash::Hasher>(&self, state: &mut H) {
|
||||
self.glob.hash(state);
|
||||
self.opts.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Glob {
|
||||
@@ -104,6 +103,14 @@ impl fmt::Display for Glob {
|
||||
}
|
||||
}
|
||||
|
||||
impl str::FromStr for Glob {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(glob: &str) -> Result<Self, Self::Err> {
|
||||
Self::new(glob)
|
||||
}
|
||||
}
|
||||
|
||||
/// A matcher for a single pattern.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GlobMatcher {
|
||||
@@ -123,6 +130,11 @@ impl GlobMatcher {
|
||||
pub fn is_match_candidate(&self, path: &Candidate) -> bool {
|
||||
self.re.is_match(&path.path)
|
||||
}
|
||||
|
||||
/// Returns the `Glob` used to compile this matcher.
|
||||
pub fn glob(&self) -> &Glob {
|
||||
&self.pat
|
||||
}
|
||||
}
|
||||
|
||||
/// A strategic matcher for a single pattern.
|
||||
@@ -154,7 +166,7 @@ impl GlobStrategic {
|
||||
lit.as_bytes() == &*candidate.basename
|
||||
}
|
||||
MatchStrategy::Extension(ref ext) => {
|
||||
candidate.ext == ext
|
||||
ext.as_bytes() == &*candidate.ext
|
||||
}
|
||||
MatchStrategy::Prefix(ref pre) => {
|
||||
starts_with(pre.as_bytes(), byte_path)
|
||||
@@ -166,7 +178,8 @@ impl GlobStrategic {
|
||||
ends_with(suffix.as_bytes(), byte_path)
|
||||
}
|
||||
MatchStrategy::RequiredExtension(ref ext) => {
|
||||
candidate.ext == ext && self.re.is_match(byte_path)
|
||||
let ext = ext.as_bytes();
|
||||
&*candidate.ext == ext && self.re.is_match(byte_path)
|
||||
}
|
||||
MatchStrategy::Regex => self.re.is_match(byte_path),
|
||||
}
|
||||
@@ -187,13 +200,26 @@ pub struct GlobBuilder<'a> {
|
||||
opts: GlobOptions,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)]
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
struct GlobOptions {
|
||||
/// Whether to match case insensitively.
|
||||
case_insensitive: bool,
|
||||
/// Whether to require a literal separator to match a separator in a file
|
||||
/// path. e.g., when enabled, `*` won't match `/`.
|
||||
literal_separator: bool,
|
||||
/// Whether or not to use `\` to escape special characters.
|
||||
/// e.g., when enabled, `\*` will match a literal `*`.
|
||||
backslash_escape: bool,
|
||||
}
|
||||
|
||||
impl GlobOptions {
|
||||
fn default() -> GlobOptions {
|
||||
GlobOptions {
|
||||
case_insensitive: false,
|
||||
literal_separator: false,
|
||||
backslash_escape: !is_separator('\\'),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Eq, PartialEq)]
|
||||
@@ -201,11 +227,15 @@ struct Tokens(Vec<Token>);
|
||||
|
||||
impl Deref for Tokens {
|
||||
type Target = Vec<Token>;
|
||||
fn deref(&self) -> &Vec<Token> { &self.0 }
|
||||
fn deref(&self) -> &Vec<Token> {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for Tokens {
|
||||
fn deref_mut(&mut self) -> &mut Vec<Token> { &mut self.0 }
|
||||
fn deref_mut(&mut self) -> &mut Vec<Token> {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
@@ -216,10 +246,7 @@ enum Token {
|
||||
RecursivePrefix,
|
||||
RecursiveSuffix,
|
||||
RecursiveZeroOrMore,
|
||||
Class {
|
||||
negated: bool,
|
||||
ranges: Vec<(char, char)>,
|
||||
},
|
||||
Class { negated: bool, ranges: Vec<(char, char)> },
|
||||
Alternates(Vec<Tokens>),
|
||||
}
|
||||
|
||||
@@ -231,12 +258,9 @@ impl Glob {
|
||||
|
||||
/// Returns a matcher for this pattern.
|
||||
pub fn compile_matcher(&self) -> GlobMatcher {
|
||||
let re = new_regex(&self.re)
|
||||
.expect("regex compilation shouldn't fail");
|
||||
GlobMatcher {
|
||||
pat: self.clone(),
|
||||
re: re,
|
||||
}
|
||||
let re =
|
||||
new_regex(&self.re).expect("regex compilation shouldn't fail");
|
||||
GlobMatcher { pat: self.clone(), re: re }
|
||||
}
|
||||
|
||||
/// Returns a strategic matcher.
|
||||
@@ -247,13 +271,9 @@ impl Glob {
|
||||
#[cfg(test)]
|
||||
fn compile_strategic_matcher(&self) -> GlobStrategic {
|
||||
let strategy = MatchStrategy::new(self);
|
||||
let re = new_regex(&self.re)
|
||||
.expect("regex compilation shouldn't fail");
|
||||
GlobStrategic {
|
||||
strategy: strategy,
|
||||
pat: self.clone(),
|
||||
re: re,
|
||||
}
|
||||
let re =
|
||||
new_regex(&self.re).expect("regex compilation shouldn't fail");
|
||||
GlobStrategic { strategy: strategy, pat: self.clone(), re: re }
|
||||
}
|
||||
|
||||
/// Returns the original glob pattern used to build this pattern.
|
||||
@@ -262,6 +282,19 @@ impl Glob {
|
||||
}
|
||||
|
||||
/// Returns the regular expression string for this glob.
|
||||
///
|
||||
/// Note that regular expressions for globs are intended to be matched on
|
||||
/// arbitrary bytes (`&[u8]`) instead of Unicode strings (`&str`). In
|
||||
/// particular, globs are frequently used on file paths, where there is no
|
||||
/// general guarantee that file paths are themselves valid UTF-8. As a
|
||||
/// result, callers will need to ensure that they are using a regex API
|
||||
/// that can match on arbitrary bytes. For example, the
|
||||
/// [`regex`](https://crates.io/regex)
|
||||
/// crate's
|
||||
/// [`Regex`](https://docs.rs/regex/*/regex/struct.Regex.html)
|
||||
/// API is not suitable for this since it matches on `&str`, but its
|
||||
/// [`bytes::Regex`](https://docs.rs/regex/*/regex/bytes/struct.Regex.html)
|
||||
/// API is suitable for this.
|
||||
pub fn regex(&self) -> &str {
|
||||
&self.re
|
||||
}
|
||||
@@ -295,7 +328,7 @@ impl Glob {
|
||||
/// std::path::Path::extension returns. Namely, this extension includes
|
||||
/// the '.'. Also, paths like `.rs` are considered to have an extension
|
||||
/// of `.rs`.
|
||||
fn ext(&self) -> Option<OsString> {
|
||||
fn ext(&self) -> Option<String> {
|
||||
if self.opts.case_insensitive {
|
||||
return None;
|
||||
}
|
||||
@@ -319,11 +352,11 @@ impl Glob {
|
||||
Some(&Token::Literal('.')) => {}
|
||||
_ => return None,
|
||||
}
|
||||
let mut lit = OsStr::new(".").to_os_string();
|
||||
let mut lit = ".".to_string();
|
||||
for t in self.tokens[start + 2..].iter() {
|
||||
match *t {
|
||||
Token::Literal('.') | Token::Literal('/') => return None,
|
||||
Token::Literal(c) => lit.push(c.to_string()),
|
||||
Token::Literal(c) => lit.push(c),
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
@@ -337,7 +370,7 @@ impl Glob {
|
||||
/// This is like `ext`, but returns an extension even if it isn't sufficent
|
||||
/// to imply a match. Namely, if an extension is returned, then it is
|
||||
/// necessary but not sufficient for a match.
|
||||
fn required_ext(&self) -> Option<OsString> {
|
||||
fn required_ext(&self) -> Option<String> {
|
||||
if self.opts.case_insensitive {
|
||||
return None;
|
||||
}
|
||||
@@ -360,7 +393,7 @@ impl Glob {
|
||||
None
|
||||
} else {
|
||||
ext.reverse();
|
||||
Some(OsString::from(ext.into_iter().collect::<String>()))
|
||||
Some(ext.into_iter().collect())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -498,7 +531,7 @@ impl Glob {
|
||||
| Token::RecursiveZeroOrMore => {
|
||||
return None;
|
||||
}
|
||||
Token::Class{..} | Token::Alternates(..) => {
|
||||
Token::Class { .. } | Token::Alternates(..) => {
|
||||
// We *could* be a little smarter here, but either one
|
||||
// of these is going to prevent our literal optimizations
|
||||
// anyway, so give up.
|
||||
@@ -509,7 +542,7 @@ impl Glob {
|
||||
Some(&self.tokens[start..])
|
||||
}
|
||||
|
||||
/// Returns the pattern as a literal if and only if the pattern exclusiely
|
||||
/// Returns the pattern as a literal if and only if the pattern exclusively
|
||||
/// matches the basename of a file path *and* is a literal.
|
||||
///
|
||||
/// The basic format of these patterns is `**/{literal}`, where `{literal}`
|
||||
@@ -535,25 +568,30 @@ impl<'a> GlobBuilder<'a> {
|
||||
///
|
||||
/// The pattern is not compiled until `build` is called.
|
||||
pub fn new(glob: &'a str) -> GlobBuilder<'a> {
|
||||
GlobBuilder {
|
||||
glob: glob,
|
||||
opts: GlobOptions::default(),
|
||||
}
|
||||
GlobBuilder { glob: glob, opts: GlobOptions::default() }
|
||||
}
|
||||
|
||||
/// Parses and builds the pattern.
|
||||
pub fn build(&self) -> Result<Glob, Error> {
|
||||
let mut p = Parser {
|
||||
glob: &self.glob,
|
||||
stack: vec![Tokens::default()],
|
||||
chars: self.glob.chars().peekable(),
|
||||
prev: None,
|
||||
cur: None,
|
||||
opts: &self.opts,
|
||||
};
|
||||
try!(p.parse());
|
||||
p.parse()?;
|
||||
if p.stack.is_empty() {
|
||||
Err(Error::UnopenedAlternates)
|
||||
Err(Error {
|
||||
glob: Some(self.glob.to_string()),
|
||||
kind: ErrorKind::UnopenedAlternates,
|
||||
})
|
||||
} else if p.stack.len() > 1 {
|
||||
Err(Error::UnclosedAlternates)
|
||||
Err(Error {
|
||||
glob: Some(self.glob.to_string()),
|
||||
kind: ErrorKind::UnclosedAlternates,
|
||||
})
|
||||
} else {
|
||||
let tokens = p.stack.pop().unwrap();
|
||||
Ok(Glob {
|
||||
@@ -578,6 +616,19 @@ impl<'a> GlobBuilder<'a> {
|
||||
self.opts.literal_separator = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// When enabled, a back slash (`\`) may be used to escape
|
||||
/// special characters in a glob pattern. Additionally, this will
|
||||
/// prevent `\` from being interpreted as a path separator on all
|
||||
/// platforms.
|
||||
///
|
||||
/// This is enabled by default on platforms where `\` is not a
|
||||
/// path separator and disabled by default on platforms where `\`
|
||||
/// is a path separator.
|
||||
pub fn backslash_escape(&mut self, yes: bool) -> &mut GlobBuilder<'a> {
|
||||
self.opts.backslash_escape = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Tokens {
|
||||
@@ -698,30 +749,30 @@ fn bytes_to_escaped_literal(bs: &[u8]) -> String {
|
||||
}
|
||||
|
||||
struct Parser<'a> {
|
||||
glob: &'a str,
|
||||
stack: Vec<Tokens>,
|
||||
chars: iter::Peekable<str::Chars<'a>>,
|
||||
prev: Option<char>,
|
||||
cur: Option<char>,
|
||||
opts: &'a GlobOptions,
|
||||
}
|
||||
|
||||
impl<'a> Parser<'a> {
|
||||
fn error(&self, kind: ErrorKind) -> Error {
|
||||
Error { glob: Some(self.glob.to_string()), kind: kind }
|
||||
}
|
||||
|
||||
fn parse(&mut self) -> Result<(), Error> {
|
||||
while let Some(c) = self.bump() {
|
||||
match c {
|
||||
'?' => try!(self.push_token(Token::Any)),
|
||||
'*' => try!(self.parse_star()),
|
||||
'[' => try!(self.parse_class()),
|
||||
'{' => try!(self.push_alternate()),
|
||||
'}' => try!(self.pop_alternate()),
|
||||
',' => try!(self.parse_comma()),
|
||||
c => {
|
||||
if is_separator(c) {
|
||||
// Normalize all patterns to use / as a separator.
|
||||
try!(self.push_token(Token::Literal('/')))
|
||||
} else {
|
||||
try!(self.push_token(Token::Literal(c)))
|
||||
}
|
||||
}
|
||||
'?' => self.push_token(Token::Any)?,
|
||||
'*' => self.parse_star()?,
|
||||
'[' => self.parse_class()?,
|
||||
'{' => self.push_alternate()?,
|
||||
'}' => self.pop_alternate()?,
|
||||
',' => self.parse_comma()?,
|
||||
'\\' => self.parse_backslash()?,
|
||||
c => self.push_token(Token::Literal(c))?,
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -729,7 +780,7 @@ impl<'a> Parser<'a> {
|
||||
|
||||
fn push_alternate(&mut self) -> Result<(), Error> {
|
||||
if self.stack.len() > 1 {
|
||||
return Err(Error::NestedAlternates);
|
||||
return Err(self.error(ErrorKind::NestedAlternates));
|
||||
}
|
||||
Ok(self.stack.push(Tokens::default()))
|
||||
}
|
||||
@@ -743,22 +794,22 @@ impl<'a> Parser<'a> {
|
||||
}
|
||||
|
||||
fn push_token(&mut self, tok: Token) -> Result<(), Error> {
|
||||
match self.stack.last_mut() {
|
||||
None => Err(Error::UnopenedAlternates),
|
||||
Some(ref mut pat) => Ok(pat.push(tok)),
|
||||
if let Some(ref mut pat) = self.stack.last_mut() {
|
||||
return Ok(pat.push(tok));
|
||||
}
|
||||
Err(self.error(ErrorKind::UnopenedAlternates))
|
||||
}
|
||||
|
||||
fn pop_token(&mut self) -> Result<Token, Error> {
|
||||
match self.stack.last_mut() {
|
||||
None => Err(Error::UnopenedAlternates),
|
||||
Some(ref mut pat) => Ok(pat.pop().unwrap()),
|
||||
if let Some(ref mut pat) = self.stack.last_mut() {
|
||||
return Ok(pat.pop().unwrap());
|
||||
}
|
||||
Err(self.error(ErrorKind::UnopenedAlternates))
|
||||
}
|
||||
|
||||
fn have_tokens(&self) -> Result<bool, Error> {
|
||||
match self.stack.last() {
|
||||
None => Err(Error::UnopenedAlternates),
|
||||
None => Err(self.error(ErrorKind::UnopenedAlternates)),
|
||||
Some(ref pat) => Ok(!pat.is_empty()),
|
||||
}
|
||||
}
|
||||
@@ -774,62 +825,106 @@ impl<'a> Parser<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_backslash(&mut self) -> Result<(), Error> {
|
||||
if self.opts.backslash_escape {
|
||||
match self.bump() {
|
||||
None => Err(self.error(ErrorKind::DanglingEscape)),
|
||||
Some(c) => self.push_token(Token::Literal(c)),
|
||||
}
|
||||
} else if is_separator('\\') {
|
||||
// Normalize all patterns to use / as a separator.
|
||||
self.push_token(Token::Literal('/'))
|
||||
} else {
|
||||
self.push_token(Token::Literal('\\'))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_star(&mut self) -> Result<(), Error> {
|
||||
let prev = self.prev;
|
||||
if self.chars.peek() != Some(&'*') {
|
||||
try!(self.push_token(Token::ZeroOrMore));
|
||||
if self.peek() != Some('*') {
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
return Ok(());
|
||||
}
|
||||
assert!(self.bump() == Some('*'));
|
||||
if !try!(self.have_tokens()) {
|
||||
try!(self.push_token(Token::RecursivePrefix));
|
||||
let next = self.bump();
|
||||
if !next.map(is_separator).unwrap_or(true) {
|
||||
return Err(Error::InvalidRecursive);
|
||||
if !self.have_tokens()? {
|
||||
if !self.peek().map_or(true, is_separator) {
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
} else {
|
||||
self.push_token(Token::RecursivePrefix)?;
|
||||
assert!(self.bump().map_or(true, is_separator));
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
try!(self.pop_token());
|
||||
|
||||
if !prev.map(is_separator).unwrap_or(false) {
|
||||
if self.stack.len() <= 1
|
||||
|| (prev != Some(',') && prev != Some('{')) {
|
||||
return Err(Error::InvalidRecursive);
|
||||
|| (prev != Some(',') && prev != Some('{'))
|
||||
{
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
match self.chars.peek() {
|
||||
let is_suffix = match self.peek() {
|
||||
None => {
|
||||
assert!(self.bump().is_none());
|
||||
self.push_token(Token::RecursiveSuffix)
|
||||
true
|
||||
}
|
||||
Some(&',') | Some(&'}') if self.stack.len() >= 2 => {
|
||||
self.push_token(Token::RecursiveSuffix)
|
||||
}
|
||||
Some(&c) if is_separator(c) => {
|
||||
Some(',') | Some('}') if self.stack.len() >= 2 => true,
|
||||
Some(c) if is_separator(c) => {
|
||||
assert!(self.bump().map(is_separator).unwrap_or(false));
|
||||
self.push_token(Token::RecursiveZeroOrMore)
|
||||
false
|
||||
}
|
||||
_ => {
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
match self.pop_token()? {
|
||||
Token::RecursivePrefix => {
|
||||
self.push_token(Token::RecursivePrefix)?;
|
||||
}
|
||||
Token::RecursiveSuffix => {
|
||||
self.push_token(Token::RecursiveSuffix)?;
|
||||
}
|
||||
_ => {
|
||||
if is_suffix {
|
||||
self.push_token(Token::RecursiveSuffix)?;
|
||||
} else {
|
||||
self.push_token(Token::RecursiveZeroOrMore)?;
|
||||
}
|
||||
}
|
||||
_ => Err(Error::InvalidRecursive),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_class(&mut self) -> Result<(), Error> {
|
||||
fn add_to_last_range(
|
||||
glob: &str,
|
||||
r: &mut (char, char),
|
||||
add: char,
|
||||
) -> Result<(), Error> {
|
||||
r.1 = add;
|
||||
if r.1 < r.0 {
|
||||
Err(Error::InvalidRange(r.0, r.1))
|
||||
Err(Error {
|
||||
glob: Some(glob.to_string()),
|
||||
kind: ErrorKind::InvalidRange(r.0, r.1),
|
||||
})
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
let mut negated = false;
|
||||
let mut ranges = vec![];
|
||||
if self.chars.peek() == Some(&'!') {
|
||||
assert!(self.bump() == Some('!'));
|
||||
negated = true;
|
||||
}
|
||||
let negated = match self.chars.peek() {
|
||||
Some(&'!') | Some(&'^') => {
|
||||
let bump = self.bump();
|
||||
assert!(bump == Some('!') || bump == Some('^'));
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
let mut first = true;
|
||||
let mut in_range = false;
|
||||
loop {
|
||||
@@ -837,7 +932,7 @@ impl<'a> Parser<'a> {
|
||||
Some(c) => c,
|
||||
// The only way to successfully break this loop is to observe
|
||||
// a ']'.
|
||||
None => return Err(Error::UnclosedClass),
|
||||
None => return Err(self.error(ErrorKind::UnclosedClass)),
|
||||
};
|
||||
match c {
|
||||
']' => {
|
||||
@@ -854,7 +949,7 @@ impl<'a> Parser<'a> {
|
||||
// invariant: in_range is only set when there is
|
||||
// already at least one character seen.
|
||||
let r = ranges.last_mut().unwrap();
|
||||
try!(add_to_last_range(r, '-'));
|
||||
add_to_last_range(&self.glob, r, '-')?;
|
||||
in_range = false;
|
||||
} else {
|
||||
assert!(!ranges.is_empty());
|
||||
@@ -865,7 +960,11 @@ impl<'a> Parser<'a> {
|
||||
if in_range {
|
||||
// invariant: in_range is only set when there is
|
||||
// already at least one character seen.
|
||||
try!(add_to_last_range(ranges.last_mut().unwrap(), c));
|
||||
add_to_last_range(
|
||||
&self.glob,
|
||||
ranges.last_mut().unwrap(),
|
||||
c,
|
||||
)?;
|
||||
} else {
|
||||
ranges.push((c, c));
|
||||
}
|
||||
@@ -879,10 +978,7 @@ impl<'a> Parser<'a> {
|
||||
// it as a literal.
|
||||
ranges.push(('-', '-'));
|
||||
}
|
||||
self.push_token(Token::Class {
|
||||
negated: negated,
|
||||
ranges: ranges,
|
||||
})
|
||||
self.push_token(Token::Class { negated: negated, ranges: ranges })
|
||||
}
|
||||
|
||||
fn bump(&mut self) -> Option<char> {
|
||||
@@ -890,6 +986,10 @@ impl<'a> Parser<'a> {
|
||||
self.cur = self.chars.next();
|
||||
self.cur
|
||||
}
|
||||
|
||||
fn peek(&mut self) -> Option<char> {
|
||||
self.chars.peek().map(|&ch| ch)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -907,16 +1007,15 @@ fn ends_with(needle: &[u8], haystack: &[u8]) -> bool {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ffi::{OsStr, OsString};
|
||||
|
||||
use {GlobSetBuilder, Error};
|
||||
use super::{Glob, GlobBuilder, Token};
|
||||
use super::Token::*;
|
||||
use super::{Glob, GlobBuilder, Token};
|
||||
use {ErrorKind, GlobSetBuilder};
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct Options {
|
||||
casei: bool,
|
||||
litsep: bool,
|
||||
casei: Option<bool>,
|
||||
litsep: Option<bool>,
|
||||
bsesc: Option<bool>,
|
||||
}
|
||||
|
||||
macro_rules! syntax {
|
||||
@@ -926,7 +1025,7 @@ mod tests {
|
||||
let pat = Glob::new($pat).unwrap();
|
||||
assert_eq!($tokens, pat.tokens.0);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! syntaxerr {
|
||||
@@ -934,9 +1033,9 @@ mod tests {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let err = Glob::new($pat).unwrap_err();
|
||||
assert_eq!($err, err);
|
||||
assert_eq!(&$err, err.kind());
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! toregex {
|
||||
@@ -946,11 +1045,17 @@ mod tests {
|
||||
($name:ident, $pat:expr, $re:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($options.casei)
|
||||
.literal_separator($options.litsep)
|
||||
.build()
|
||||
.unwrap();
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
assert_eq!(format!("(?-u){}", $re), pat.regex());
|
||||
}
|
||||
};
|
||||
@@ -963,11 +1068,17 @@ mod tests {
|
||||
($name:ident, $pat:expr, $path:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($options.casei)
|
||||
.literal_separator($options.litsep)
|
||||
.build()
|
||||
.unwrap();
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
let matcher = pat.compile_matcher();
|
||||
let strategic = pat.compile_strategic_matcher();
|
||||
let set = GlobSetBuilder::new().add(pat).build().unwrap();
|
||||
@@ -985,11 +1096,17 @@ mod tests {
|
||||
($name:ident, $pat:expr, $path:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($options.casei)
|
||||
.literal_separator($options.litsep)
|
||||
.build()
|
||||
.unwrap();
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
let matcher = pat.compile_matcher();
|
||||
let strategic = pat.compile_strategic_matcher();
|
||||
let set = GlobSetBuilder::new().add(pat).build().unwrap();
|
||||
@@ -1000,8 +1117,9 @@ mod tests {
|
||||
};
|
||||
}
|
||||
|
||||
fn s(string: &str) -> String { string.to_string() }
|
||||
fn os(string: &str) -> OsString { OsStr::new(string).to_os_string() }
|
||||
fn s(string: &str) -> String {
|
||||
string.to_string()
|
||||
}
|
||||
|
||||
fn class(s: char, e: char) -> Token {
|
||||
Class { negated: false, ranges: vec![(s, e)] }
|
||||
@@ -1025,16 +1143,20 @@ mod tests {
|
||||
syntax!(any2, "a?b", vec![Literal('a'), Any, Literal('b')]);
|
||||
syntax!(seq1, "*", vec![ZeroOrMore]);
|
||||
syntax!(seq2, "a*b", vec![Literal('a'), ZeroOrMore, Literal('b')]);
|
||||
syntax!(seq3, "*a*b*", vec![
|
||||
ZeroOrMore, Literal('a'), ZeroOrMore, Literal('b'), ZeroOrMore,
|
||||
]);
|
||||
syntax!(
|
||||
seq3,
|
||||
"*a*b*",
|
||||
vec![ZeroOrMore, Literal('a'), ZeroOrMore, Literal('b'), ZeroOrMore,]
|
||||
);
|
||||
syntax!(rseq1, "**", vec![RecursivePrefix]);
|
||||
syntax!(rseq2, "**/", vec![RecursivePrefix]);
|
||||
syntax!(rseq3, "/**", vec![RecursiveSuffix]);
|
||||
syntax!(rseq4, "/**/", vec![RecursiveZeroOrMore]);
|
||||
syntax!(rseq5, "a/**/b", vec![
|
||||
Literal('a'), RecursiveZeroOrMore, Literal('b'),
|
||||
]);
|
||||
syntax!(
|
||||
rseq5,
|
||||
"a/**/b",
|
||||
vec![Literal('a'), RecursiveZeroOrMore, Literal('b'),]
|
||||
);
|
||||
syntax!(cls1, "[a]", vec![class('a', 'a')]);
|
||||
syntax!(cls2, "[!a]", vec![classn('a', 'a')]);
|
||||
syntax!(cls3, "[a-z]", vec![class('a', 'z')]);
|
||||
@@ -1046,9 +1168,11 @@ mod tests {
|
||||
syntax!(cls9, "[a-]", vec![rclass(&[('a', 'a'), ('-', '-')])]);
|
||||
syntax!(cls10, "[-a-z]", vec![rclass(&[('-', '-'), ('a', 'z')])]);
|
||||
syntax!(cls11, "[a-z-]", vec![rclass(&[('a', 'z'), ('-', '-')])]);
|
||||
syntax!(cls12, "[-a-z-]", vec![
|
||||
rclass(&[('-', '-'), ('a', 'z'), ('-', '-')]),
|
||||
]);
|
||||
syntax!(
|
||||
cls12,
|
||||
"[-a-z-]",
|
||||
vec![rclass(&[('-', '-'), ('a', 'z'), ('-', '-')]),]
|
||||
);
|
||||
syntax!(cls13, "[]-z]", vec![class(']', 'z')]);
|
||||
syntax!(cls14, "[--z]", vec![class('-', 'z')]);
|
||||
syntax!(cls15, "[ --]", vec![class(' ', '-')]);
|
||||
@@ -1056,29 +1180,24 @@ mod tests {
|
||||
syntax!(cls17, "[a-z0-9]", vec![rclass(&[('a', 'z'), ('0', '9')])]);
|
||||
syntax!(cls18, "[!0-9a-z]", vec![rclassn(&[('0', '9'), ('a', 'z')])]);
|
||||
syntax!(cls19, "[!a-z0-9]", vec![rclassn(&[('a', 'z'), ('0', '9')])]);
|
||||
syntax!(cls20, "[^a]", vec![classn('a', 'a')]);
|
||||
syntax!(cls21, "[^a-z]", vec![classn('a', 'z')]);
|
||||
|
||||
syntaxerr!(err_rseq1, "a**", Error::InvalidRecursive);
|
||||
syntaxerr!(err_rseq2, "**a", Error::InvalidRecursive);
|
||||
syntaxerr!(err_rseq3, "a**b", Error::InvalidRecursive);
|
||||
syntaxerr!(err_rseq4, "***", Error::InvalidRecursive);
|
||||
syntaxerr!(err_rseq5, "/a**", Error::InvalidRecursive);
|
||||
syntaxerr!(err_rseq6, "/**a", Error::InvalidRecursive);
|
||||
syntaxerr!(err_rseq7, "/a**b", Error::InvalidRecursive);
|
||||
syntaxerr!(err_unclosed1, "[", Error::UnclosedClass);
|
||||
syntaxerr!(err_unclosed2, "[]", Error::UnclosedClass);
|
||||
syntaxerr!(err_unclosed3, "[!", Error::UnclosedClass);
|
||||
syntaxerr!(err_unclosed4, "[!]", Error::UnclosedClass);
|
||||
syntaxerr!(err_range1, "[z-a]", Error::InvalidRange('z', 'a'));
|
||||
syntaxerr!(err_range2, "[z--]", Error::InvalidRange('z', '-'));
|
||||
syntaxerr!(err_unclosed1, "[", ErrorKind::UnclosedClass);
|
||||
syntaxerr!(err_unclosed2, "[]", ErrorKind::UnclosedClass);
|
||||
syntaxerr!(err_unclosed3, "[!", ErrorKind::UnclosedClass);
|
||||
syntaxerr!(err_unclosed4, "[!]", ErrorKind::UnclosedClass);
|
||||
syntaxerr!(err_range1, "[z-a]", ErrorKind::InvalidRange('z', 'a'));
|
||||
syntaxerr!(err_range2, "[z--]", ErrorKind::InvalidRange('z', '-'));
|
||||
|
||||
const CASEI: Options = Options {
|
||||
casei: true,
|
||||
litsep: false,
|
||||
};
|
||||
const SLASHLIT: Options = Options {
|
||||
casei: false,
|
||||
litsep: true,
|
||||
};
|
||||
const CASEI: Options =
|
||||
Options { casei: Some(true), litsep: None, bsesc: None };
|
||||
const SLASHLIT: Options =
|
||||
Options { casei: None, litsep: Some(true), bsesc: None };
|
||||
const NOBSESC: Options =
|
||||
Options { casei: None, litsep: None, bsesc: Some(false) };
|
||||
const BSESC: Options =
|
||||
Options { casei: None, litsep: None, bsesc: Some(true) };
|
||||
|
||||
toregex!(re_casei, "a", "(?i)^a$", &CASEI);
|
||||
|
||||
@@ -1095,8 +1214,30 @@ mod tests {
|
||||
toregex!(re8, "[*]", r"^[\*]$");
|
||||
toregex!(re9, "[+]", r"^[\+]$");
|
||||
toregex!(re10, "+", r"^\+$");
|
||||
toregex!(re11, "**", r"^.*$");
|
||||
toregex!(re12, "☃", r"^\xe2\x98\x83$");
|
||||
toregex!(re11, "☃", r"^\xe2\x98\x83$");
|
||||
toregex!(re12, "**", r"^.*$");
|
||||
toregex!(re13, "**/", r"^.*$");
|
||||
toregex!(re14, "**/*", r"^(?:/?|.*/).*$");
|
||||
toregex!(re15, "**/**", r"^.*$");
|
||||
toregex!(re16, "**/**/*", r"^(?:/?|.*/).*$");
|
||||
toregex!(re17, "**/**/**", r"^.*$");
|
||||
toregex!(re18, "**/**/**/*", r"^(?:/?|.*/).*$");
|
||||
toregex!(re19, "a/**", r"^a(?:/?|/.*)$");
|
||||
toregex!(re20, "a/**/**", r"^a(?:/?|/.*)$");
|
||||
toregex!(re21, "a/**/**/**", r"^a(?:/?|/.*)$");
|
||||
toregex!(re22, "a/**/b", r"^a(?:/|/.*/)b$");
|
||||
toregex!(re23, "a/**/**/b", r"^a(?:/|/.*/)b$");
|
||||
toregex!(re24, "a/**/**/**/b", r"^a(?:/|/.*/)b$");
|
||||
toregex!(re25, "**/b", r"^(?:/?|.*/)b$");
|
||||
toregex!(re26, "**/**/b", r"^(?:/?|.*/)b$");
|
||||
toregex!(re27, "**/**/**/b", r"^(?:/?|.*/)b$");
|
||||
toregex!(re28, "a**", r"^a.*.*$");
|
||||
toregex!(re29, "**a", r"^.*.*a$");
|
||||
toregex!(re30, "a**b", r"^a.*.*b$");
|
||||
toregex!(re31, "***", r"^.*.*.*$");
|
||||
toregex!(re32, "/a**", r"^/a.*.*$");
|
||||
toregex!(re33, "/**a", r"^/.*.*a$");
|
||||
toregex!(re34, "/a**b", r"^/a.*.*b$");
|
||||
|
||||
matches!(match1, "a", "a");
|
||||
matches!(match2, "a*b", "a_b");
|
||||
@@ -1133,6 +1274,7 @@ mod tests {
|
||||
matches!(matchrec22, ".*/**", ".abc/abc");
|
||||
matches!(matchrec23, "foo/**", "foo");
|
||||
matches!(matchrec24, "**/foo/bar", "foo/bar");
|
||||
matches!(matchrec25, "some/*/needle.txt", "some/one/needle.txt");
|
||||
|
||||
matches!(matchrange1, "a[0-9]b", "a0b");
|
||||
matches!(matchrange2, "a[0-9]b", "a9b");
|
||||
@@ -1145,6 +1287,7 @@ mod tests {
|
||||
matches!(matchrange9, "[-a-c]", "b");
|
||||
matches!(matchrange10, "[a-c-]", "b");
|
||||
matches!(matchrange11, "[-]", "-");
|
||||
matches!(matchrange12, "a[^0-9]b", "a_b");
|
||||
|
||||
matches!(matchpat1, "*hello.txt", "hello.txt");
|
||||
matches!(matchpat2, "*hello.txt", "gareth_says_hello.txt");
|
||||
@@ -1152,8 +1295,11 @@ mod tests {
|
||||
matches!(matchpat4, "*hello.txt", "some\\path\\to\\hello.txt");
|
||||
matches!(matchpat5, "*hello.txt", "/an/absolute/path/to/hello.txt");
|
||||
matches!(matchpat6, "*some/path/to/hello.txt", "some/path/to/hello.txt");
|
||||
matches!(matchpat7, "*some/path/to/hello.txt",
|
||||
"a/bigger/some/path/to/hello.txt");
|
||||
matches!(
|
||||
matchpat7,
|
||||
"*some/path/to/hello.txt",
|
||||
"a/bigger/some/path/to/hello.txt"
|
||||
);
|
||||
|
||||
matches!(matchescape, "_[[]_[]]_[?]_[*]_!_", "_[_]_?_*_!_");
|
||||
|
||||
@@ -1188,6 +1334,17 @@ mod tests {
|
||||
#[cfg(not(unix))]
|
||||
matches!(matchslash5, "abc\\def", "abc/def", SLASHLIT);
|
||||
|
||||
matches!(matchbackslash1, "\\[", "[", BSESC);
|
||||
matches!(matchbackslash2, "\\?", "?", BSESC);
|
||||
matches!(matchbackslash3, "\\*", "*", BSESC);
|
||||
matches!(matchbackslash4, "\\[a-z]", "\\a", NOBSESC);
|
||||
matches!(matchbackslash5, "\\?", "\\a", NOBSESC);
|
||||
matches!(matchbackslash6, "\\*", "\\\\", NOBSESC);
|
||||
#[cfg(unix)]
|
||||
matches!(matchbackslash7, "\\a", "a");
|
||||
#[cfg(not(unix))]
|
||||
matches!(matchbackslash8, "\\a", "/a");
|
||||
|
||||
nmatches!(matchnot1, "a*b*c", "abcd");
|
||||
nmatches!(matchnot2, "abc*abc*abc", "abcabcabcabcabcabcabca");
|
||||
nmatches!(matchnot3, "some/**/needle.txt", "some/other/notthis.txt");
|
||||
@@ -1205,30 +1362,63 @@ mod tests {
|
||||
nmatches!(matchnot15, "[!-]", "-");
|
||||
nmatches!(matchnot16, "*hello.txt", "hello.txt-and-then-some");
|
||||
nmatches!(matchnot17, "*hello.txt", "goodbye.txt");
|
||||
nmatches!(matchnot18, "*some/path/to/hello.txt",
|
||||
"some/path/to/hello.txt-and-then-some");
|
||||
nmatches!(matchnot19, "*some/path/to/hello.txt",
|
||||
"some/other/path/to/hello.txt");
|
||||
nmatches!(
|
||||
matchnot18,
|
||||
"*some/path/to/hello.txt",
|
||||
"some/path/to/hello.txt-and-then-some"
|
||||
);
|
||||
nmatches!(
|
||||
matchnot19,
|
||||
"*some/path/to/hello.txt",
|
||||
"some/other/path/to/hello.txt"
|
||||
);
|
||||
nmatches!(matchnot20, "a", "foo/a");
|
||||
nmatches!(matchnot21, "./foo", "foo");
|
||||
nmatches!(matchnot22, "**/foo", "foofoo");
|
||||
nmatches!(matchnot23, "**/foo/bar", "foofoo/bar");
|
||||
nmatches!(matchnot24, "/*.c", "mozilla-sha1/sha1.c");
|
||||
nmatches!(matchnot25, "*.c", "mozilla-sha1/sha1.c", SLASHLIT);
|
||||
nmatches!(matchnot26, "**/m4/ltoptions.m4",
|
||||
"csharp/src/packages/repositories.config", SLASHLIT);
|
||||
nmatches!(
|
||||
matchnot26,
|
||||
"**/m4/ltoptions.m4",
|
||||
"csharp/src/packages/repositories.config",
|
||||
SLASHLIT
|
||||
);
|
||||
nmatches!(matchnot27, "a[^0-9]b", "a0b");
|
||||
nmatches!(matchnot28, "a[^0-9]b", "a9b");
|
||||
nmatches!(matchnot29, "[^-]", "-");
|
||||
nmatches!(matchnot30, "some/*/needle.txt", "some/needle.txt");
|
||||
nmatches!(
|
||||
matchrec31,
|
||||
"some/*/needle.txt",
|
||||
"some/one/two/needle.txt",
|
||||
SLASHLIT
|
||||
);
|
||||
nmatches!(
|
||||
matchrec32,
|
||||
"some/*/needle.txt",
|
||||
"some/one/two/three/needle.txt",
|
||||
SLASHLIT
|
||||
);
|
||||
|
||||
macro_rules! extract {
|
||||
($which:ident, $name:ident, $pat:expr, $expect:expr) => {
|
||||
extract!($which, $name, $pat, $expect, Options::default());
|
||||
};
|
||||
($which:ident, $name:ident, $pat:expr, $expect:expr, $opts:expr) => {
|
||||
($which:ident, $name:ident, $pat:expr, $expect:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($opts.casei)
|
||||
.literal_separator($opts.litsep)
|
||||
.build().unwrap();
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
assert_eq!($expect, pat.$which());
|
||||
}
|
||||
};
|
||||
@@ -1271,33 +1461,41 @@ mod tests {
|
||||
literal!(extract_lit7, "foo/bar", Some(s("foo/bar")));
|
||||
literal!(extract_lit8, "**/foo/bar", None);
|
||||
|
||||
basetokens!(extract_basetoks1, "**/foo", Some(&*vec![
|
||||
Literal('f'), Literal('o'), Literal('o'),
|
||||
]));
|
||||
basetokens!(
|
||||
extract_basetoks1,
|
||||
"**/foo",
|
||||
Some(&*vec![Literal('f'), Literal('o'), Literal('o'),])
|
||||
);
|
||||
basetokens!(extract_basetoks2, "**/foo", None, CASEI);
|
||||
basetokens!(extract_basetoks3, "**/foo", Some(&*vec![
|
||||
Literal('f'), Literal('o'), Literal('o'),
|
||||
]), SLASHLIT);
|
||||
basetokens!(
|
||||
extract_basetoks3,
|
||||
"**/foo",
|
||||
Some(&*vec![Literal('f'), Literal('o'), Literal('o'),]),
|
||||
SLASHLIT
|
||||
);
|
||||
basetokens!(extract_basetoks4, "*foo", None, SLASHLIT);
|
||||
basetokens!(extract_basetoks5, "*foo", None);
|
||||
basetokens!(extract_basetoks6, "**/fo*o", None);
|
||||
basetokens!(extract_basetoks7, "**/fo*o", Some(&*vec![
|
||||
Literal('f'), Literal('o'), ZeroOrMore, Literal('o'),
|
||||
]), SLASHLIT);
|
||||
basetokens!(
|
||||
extract_basetoks7,
|
||||
"**/fo*o",
|
||||
Some(&*vec![Literal('f'), Literal('o'), ZeroOrMore, Literal('o'),]),
|
||||
SLASHLIT
|
||||
);
|
||||
|
||||
ext!(extract_ext1, "**/*.rs", Some(os(".rs")));
|
||||
ext!(extract_ext1, "**/*.rs", Some(s(".rs")));
|
||||
ext!(extract_ext2, "**/*.rs.bak", None);
|
||||
ext!(extract_ext3, "*.rs", Some(os(".rs")));
|
||||
ext!(extract_ext3, "*.rs", Some(s(".rs")));
|
||||
ext!(extract_ext4, "a*.rs", None);
|
||||
ext!(extract_ext5, "/*.c", None);
|
||||
ext!(extract_ext6, "*.c", None, SLASHLIT);
|
||||
ext!(extract_ext7, "*.c", Some(os(".c")));
|
||||
ext!(extract_ext7, "*.c", Some(s(".c")));
|
||||
|
||||
required_ext!(extract_req_ext1, "*.rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext2, "/foo/bar/*.rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext3, "/foo/bar/*.rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext4, "/foo/bar/.rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext5, ".rs", Some(os(".rs")));
|
||||
required_ext!(extract_req_ext1, "*.rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext2, "/foo/bar/*.rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext3, "/foo/bar/*.rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext4, "/foo/bar/.rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext5, ".rs", Some(s(".rs")));
|
||||
required_ext!(extract_req_ext6, "./rs", None);
|
||||
required_ext!(extract_req_ext7, "foo", None);
|
||||
required_ext!(extract_req_ext8, ".foo/", None);
|
||||
@@ -22,7 +22,7 @@ This example shows how to match a single glob against a single file path.
|
||||
# fn example() -> Result<(), globset::Error> {
|
||||
use globset::Glob;
|
||||
|
||||
let glob = try!(Glob::new("*.rs")).compile_matcher();
|
||||
let glob = Glob::new("*.rs")?.compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(glob.is_match("foo/bar.rs"));
|
||||
@@ -39,8 +39,8 @@ semantics. In this example, we prevent wildcards from matching path separators.
|
||||
# fn example() -> Result<(), globset::Error> {
|
||||
use globset::GlobBuilder;
|
||||
|
||||
let glob = try!(GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()).compile_matcher();
|
||||
let glob = GlobBuilder::new("*.rs")
|
||||
.literal_separator(true).build()?.compile_matcher();
|
||||
|
||||
assert!(glob.is_match("foo.rs"));
|
||||
assert!(!glob.is_match("foo/bar.rs")); // no longer matches
|
||||
@@ -59,10 +59,10 @@ use globset::{Glob, GlobSetBuilder};
|
||||
let mut builder = GlobSetBuilder::new();
|
||||
// A GlobBuilder can be used to configure each glob's match semantics
|
||||
// independently.
|
||||
builder.add(try!(Glob::new("*.rs")));
|
||||
builder.add(try!(Glob::new("src/lib.rs")));
|
||||
builder.add(try!(Glob::new("src/**/foo.rs")));
|
||||
let set = try!(builder.build());
|
||||
builder.add(Glob::new("*.rs")?);
|
||||
builder.add(Glob::new("src/lib.rs")?);
|
||||
builder.add(Glob::new("src/**/foo.rs")?);
|
||||
let set = builder.build()?;
|
||||
|
||||
assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]);
|
||||
# Ok(()) } example().unwrap();
|
||||
@@ -91,6 +91,11 @@ Standard Unix-style glob syntax is supported:
|
||||
`[!ab]` to match any character except for `a` and `b`.
|
||||
* Metacharacters such as `*` and `?` can be escaped with character class
|
||||
notation. e.g., `[*]` matches `*`.
|
||||
* When backslash escapes are enabled, a backslash (`\`) will escape all meta
|
||||
characters in a glob. If it precedes a non-meta character, then the slash is
|
||||
ignored. A `\\` will match a literal `\\`. Note that this mode is only
|
||||
enabled on Unix platforms by default, but can be enabled on any platform
|
||||
via the `backslash_escape` setting on `Glob`.
|
||||
|
||||
A `GlobBuilder` can be used to prevent wildcards from matching path separators,
|
||||
or to enable case insensitive matching.
|
||||
@@ -99,38 +104,56 @@ or to enable case insensitive matching.
|
||||
#![deny(missing_docs)]
|
||||
|
||||
extern crate aho_corasick;
|
||||
extern crate bstr;
|
||||
extern crate fnv;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate memchr;
|
||||
extern crate regex;
|
||||
|
||||
#[cfg(feature = "serde1")]
|
||||
extern crate serde;
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::error::Error as StdError;
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::fmt;
|
||||
use std::hash;
|
||||
use std::path::Path;
|
||||
use std::str;
|
||||
|
||||
use aho_corasick::{Automaton, AcAutomaton, FullAcAutomaton};
|
||||
use aho_corasick::AhoCorasick;
|
||||
use bstr::{ByteSlice, ByteVec, B};
|
||||
use regex::bytes::{Regex, RegexBuilder, RegexSet};
|
||||
|
||||
use pathutil::{
|
||||
file_name, file_name_ext, normalize_path, os_str_bytes, path_bytes,
|
||||
};
|
||||
use glob::MatchStrategy;
|
||||
pub use glob::{Glob, GlobBuilder, GlobMatcher};
|
||||
use pathutil::{file_name, file_name_ext, normalize_path};
|
||||
|
||||
mod glob;
|
||||
mod pathutil;
|
||||
|
||||
#[cfg(feature = "serde1")]
|
||||
mod serde_impl;
|
||||
|
||||
/// Represents an error that can occur when parsing a glob pattern.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum Error {
|
||||
/// Occurs when a use of `**` is invalid. Namely, `**` can only appear
|
||||
/// adjacent to a path separator, or the beginning/end of a glob.
|
||||
pub struct Error {
|
||||
/// The original glob provided by the caller.
|
||||
glob: Option<String>,
|
||||
/// The kind of error.
|
||||
kind: ErrorKind,
|
||||
}
|
||||
|
||||
/// The kind of error that can occur when parsing a glob pattern.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum ErrorKind {
|
||||
/// **DEPRECATED**.
|
||||
///
|
||||
/// This error used to occur for consistency with git's glob specification,
|
||||
/// but the specification now accepts all uses of `**`. When `**` does not
|
||||
/// appear adjacent to a path separator or at the beginning/end of a glob,
|
||||
/// it is now treated as two consecutive `*` patterns. As such, this error
|
||||
/// is no longer used.
|
||||
InvalidRecursive,
|
||||
/// Occurs when a character class (e.g., `[abc]`) is not closed.
|
||||
UnclosedClass,
|
||||
@@ -145,52 +168,90 @@ pub enum Error {
|
||||
/// Occurs when an alternating group is nested inside another alternating
|
||||
/// group, e.g., `{{a,b},{c,d}}`.
|
||||
NestedAlternates,
|
||||
/// Occurs when an unescaped '\' is found at the end of a glob.
|
||||
DanglingEscape,
|
||||
/// An error associated with parsing or compiling a regex.
|
||||
Regex(String),
|
||||
/// Hints that destructuring should not be exhaustive.
|
||||
///
|
||||
/// This enum may grow additional variants, so this makes sure clients
|
||||
/// don't count on exhaustive matching. (Otherwise, adding a new variant
|
||||
/// could break existing code.)
|
||||
#[doc(hidden)]
|
||||
__Nonexhaustive,
|
||||
}
|
||||
|
||||
impl StdError for Error {
|
||||
fn description(&self) -> &str {
|
||||
self.kind.description()
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
/// Return the glob that caused this error, if one exists.
|
||||
pub fn glob(&self) -> Option<&str> {
|
||||
self.glob.as_ref().map(|s| &**s)
|
||||
}
|
||||
|
||||
/// Return the kind of this error.
|
||||
pub fn kind(&self) -> &ErrorKind {
|
||||
&self.kind
|
||||
}
|
||||
}
|
||||
|
||||
impl ErrorKind {
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
Error::InvalidRecursive => {
|
||||
ErrorKind::InvalidRecursive => {
|
||||
"invalid use of **; must be one path component"
|
||||
}
|
||||
Error::UnclosedClass => {
|
||||
ErrorKind::UnclosedClass => {
|
||||
"unclosed character class; missing ']'"
|
||||
}
|
||||
Error::InvalidRange(_, _) => {
|
||||
"invalid character range"
|
||||
}
|
||||
Error::UnopenedAlternates => {
|
||||
ErrorKind::InvalidRange(_, _) => "invalid character range",
|
||||
ErrorKind::UnopenedAlternates => {
|
||||
"unopened alternate group; missing '{' \
|
||||
(maybe escape '}' with '[}]'?)"
|
||||
}
|
||||
Error::UnclosedAlternates => {
|
||||
ErrorKind::UnclosedAlternates => {
|
||||
"unclosed alternate group; missing '}' \
|
||||
(maybe escape '{' with '[{]'?)"
|
||||
}
|
||||
Error::NestedAlternates => {
|
||||
ErrorKind::NestedAlternates => {
|
||||
"nested alternate groups are not allowed"
|
||||
}
|
||||
Error::Regex(ref err) => err,
|
||||
ErrorKind::DanglingEscape => "dangling '\\'",
|
||||
ErrorKind::Regex(ref err) => err,
|
||||
ErrorKind::__Nonexhaustive => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
Error::InvalidRecursive
|
||||
| Error::UnclosedClass
|
||||
| Error::UnopenedAlternates
|
||||
| Error::UnclosedAlternates
|
||||
| Error::NestedAlternates
|
||||
| Error::Regex(_) => {
|
||||
write!(f, "{}", self.description())
|
||||
match self.glob {
|
||||
None => self.kind.fmt(f),
|
||||
Some(ref glob) => {
|
||||
write!(f, "error parsing glob '{}': {}", glob, self.kind)
|
||||
}
|
||||
Error::InvalidRange(s, e) => {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ErrorKind {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
ErrorKind::InvalidRecursive
|
||||
| ErrorKind::UnclosedClass
|
||||
| ErrorKind::UnopenedAlternates
|
||||
| ErrorKind::UnclosedAlternates
|
||||
| ErrorKind::NestedAlternates
|
||||
| ErrorKind::DanglingEscape
|
||||
| ErrorKind::Regex(_) => write!(f, "{}", self.description()),
|
||||
ErrorKind::InvalidRange(s, e) => {
|
||||
write!(f, "invalid range; '{}' > '{}'", s, e)
|
||||
}
|
||||
ErrorKind::__Nonexhaustive => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -201,12 +262,21 @@ fn new_regex(pat: &str) -> Result<Regex, Error> {
|
||||
.size_limit(10 * (1 << 20))
|
||||
.dfa_size_limit(10 * (1 << 20))
|
||||
.build()
|
||||
.map_err(|err| Error::Regex(err.to_string()))
|
||||
.map_err(|err| Error {
|
||||
glob: Some(pat.to_string()),
|
||||
kind: ErrorKind::Regex(err.to_string()),
|
||||
})
|
||||
}
|
||||
|
||||
fn new_regex_set<I, S>(pats: I) -> Result<RegexSet, Error>
|
||||
where S: AsRef<str>, I: IntoIterator<Item=S> {
|
||||
RegexSet::new(pats).map_err(|err| Error::Regex(err.to_string()))
|
||||
where
|
||||
S: AsRef<str>,
|
||||
I: IntoIterator<Item = S>,
|
||||
{
|
||||
RegexSet::new(pats).map_err(|err| Error {
|
||||
glob: None,
|
||||
kind: ErrorKind::Regex(err.to_string()),
|
||||
})
|
||||
}
|
||||
|
||||
type Fnv = hash::BuildHasherDefault<fnv::FnvHasher>;
|
||||
@@ -220,12 +290,20 @@ pub struct GlobSet {
|
||||
}
|
||||
|
||||
impl GlobSet {
|
||||
/// Create an empty `GlobSet`. An empty set matches nothing.
|
||||
#[inline]
|
||||
pub fn empty() -> GlobSet {
|
||||
GlobSet { len: 0, strats: vec![] }
|
||||
}
|
||||
|
||||
/// Returns true if this set is empty, and therefore matches nothing.
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len == 0
|
||||
}
|
||||
|
||||
/// Returns the number of globs in this set.
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
@@ -350,11 +428,17 @@ impl GlobSet {
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!("built glob set; {} literals, {} basenames, {} extensions, \
|
||||
debug!(
|
||||
"built glob set; {} literals, {} basenames, {} extensions, \
|
||||
{} prefixes, {} suffixes, {} required extensions, {} regexes",
|
||||
lits.0.len(), base_lits.0.len(), exts.0.len(),
|
||||
prefixes.literals.len(), suffixes.literals.len(),
|
||||
required_exts.0.len(), regexes.literals.len());
|
||||
lits.0.len(),
|
||||
base_lits.0.len(),
|
||||
exts.0.len(),
|
||||
prefixes.literals.len(),
|
||||
suffixes.literals.len(),
|
||||
required_exts.0.len(),
|
||||
regexes.literals.len()
|
||||
);
|
||||
Ok(GlobSet {
|
||||
len: pats.len(),
|
||||
strats: vec![
|
||||
@@ -364,8 +448,9 @@ impl GlobSet {
|
||||
GlobSetMatchStrategy::Suffix(suffixes.suffix()),
|
||||
GlobSetMatchStrategy::Prefix(prefixes.prefix()),
|
||||
GlobSetMatchStrategy::RequiredExtension(
|
||||
try!(required_exts.build())),
|
||||
GlobSetMatchStrategy::Regex(try!(regexes.regex_set())),
|
||||
required_exts.build()?,
|
||||
),
|
||||
GlobSetMatchStrategy::Regex(regexes.regex_set()?),
|
||||
],
|
||||
})
|
||||
}
|
||||
@@ -373,6 +458,7 @@ impl GlobSet {
|
||||
|
||||
/// GlobSetBuilder builds a group of patterns that can be used to
|
||||
/// simultaneously match a file path.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GlobSetBuilder {
|
||||
pats: Vec<Glob>,
|
||||
}
|
||||
@@ -393,7 +479,6 @@ impl GlobSetBuilder {
|
||||
}
|
||||
|
||||
/// Add a new pattern to this set.
|
||||
#[allow(dead_code)]
|
||||
pub fn add(&mut self, pat: Glob) -> &mut GlobSetBuilder {
|
||||
self.pats.push(pat);
|
||||
self
|
||||
@@ -410,19 +495,16 @@ impl GlobSetBuilder {
|
||||
pub struct Candidate<'a> {
|
||||
path: Cow<'a, [u8]>,
|
||||
basename: Cow<'a, [u8]>,
|
||||
ext: &'a OsStr,
|
||||
ext: Cow<'a, [u8]>,
|
||||
}
|
||||
|
||||
impl<'a> Candidate<'a> {
|
||||
/// Create a new candidate for matching from the given path.
|
||||
pub fn new<P: AsRef<Path> + ?Sized>(path: &'a P) -> Candidate<'a> {
|
||||
let path = path.as_ref();
|
||||
let basename = file_name(path).unwrap_or(OsStr::new(""));
|
||||
Candidate {
|
||||
path: normalize_path(path_bytes(path)),
|
||||
basename: os_str_bytes(basename),
|
||||
ext: file_name_ext(basename).unwrap_or(OsStr::new("")),
|
||||
}
|
||||
let path = normalize_path(Vec::from_path_lossy(path.as_ref()));
|
||||
let basename = file_name(&path).unwrap_or(Cow::Borrowed(B("")));
|
||||
let ext = file_name_ext(&basename).unwrap_or(Cow::Borrowed(B("")));
|
||||
Candidate { path: path, basename: basename, ext: ext }
|
||||
}
|
||||
|
||||
fn path_prefix(&self, max: usize) -> &[u8] {
|
||||
@@ -494,12 +576,12 @@ impl LiteralStrategy {
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
self.0.contains_key(&*candidate.path)
|
||||
self.0.contains_key(candidate.path.as_bytes())
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
if let Some(hits) = self.0.get(&*candidate.path) {
|
||||
if let Some(hits) = self.0.get(candidate.path.as_bytes()) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
@@ -521,7 +603,7 @@ impl BasenameLiteralStrategy {
|
||||
if candidate.basename.is_empty() {
|
||||
return false;
|
||||
}
|
||||
self.0.contains_key(&*candidate.basename)
|
||||
self.0.contains_key(candidate.basename.as_bytes())
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
@@ -529,29 +611,29 @@ impl BasenameLiteralStrategy {
|
||||
if candidate.basename.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(hits) = self.0.get(&*candidate.basename) {
|
||||
if let Some(hits) = self.0.get(candidate.basename.as_bytes()) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct ExtensionStrategy(HashMap<OsString, Vec<usize>, Fnv>);
|
||||
struct ExtensionStrategy(HashMap<Vec<u8>, Vec<usize>, Fnv>);
|
||||
|
||||
impl ExtensionStrategy {
|
||||
fn new() -> ExtensionStrategy {
|
||||
ExtensionStrategy(HashMap::with_hasher(Fnv::default()))
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, ext: OsString) {
|
||||
self.0.entry(ext).or_insert(vec![]).push(global_index);
|
||||
fn add(&mut self, global_index: usize, ext: String) {
|
||||
self.0.entry(ext.into_bytes()).or_insert(vec![]).push(global_index);
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
self.0.contains_key(candidate.ext)
|
||||
self.0.contains_key(candidate.ext.as_bytes())
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
@@ -559,7 +641,7 @@ impl ExtensionStrategy {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(hits) = self.0.get(candidate.ext) {
|
||||
if let Some(hits) = self.0.get(candidate.ext.as_bytes()) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
@@ -567,7 +649,7 @@ impl ExtensionStrategy {
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct PrefixStrategy {
|
||||
matcher: FullAcAutomaton<Vec<u8>>,
|
||||
matcher: AhoCorasick,
|
||||
map: Vec<usize>,
|
||||
longest: usize,
|
||||
}
|
||||
@@ -575,8 +657,8 @@ struct PrefixStrategy {
|
||||
impl PrefixStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
let path = candidate.path_prefix(self.longest);
|
||||
for m in self.matcher.find_overlapping(path) {
|
||||
if m.start == 0 {
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.start() == 0 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -585,9 +667,9 @@ impl PrefixStrategy {
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
let path = candidate.path_prefix(self.longest);
|
||||
for m in self.matcher.find_overlapping(path) {
|
||||
if m.start == 0 {
|
||||
matches.push(self.map[m.pati]);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.start() == 0 {
|
||||
matches.push(self.map[m.pattern()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -595,7 +677,7 @@ impl PrefixStrategy {
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct SuffixStrategy {
|
||||
matcher: FullAcAutomaton<Vec<u8>>,
|
||||
matcher: AhoCorasick,
|
||||
map: Vec<usize>,
|
||||
longest: usize,
|
||||
}
|
||||
@@ -603,8 +685,8 @@ struct SuffixStrategy {
|
||||
impl SuffixStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
let path = candidate.path_suffix(self.longest);
|
||||
for m in self.matcher.find_overlapping(path) {
|
||||
if m.end == path.len() {
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.end() == path.len() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -613,27 +695,27 @@ impl SuffixStrategy {
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
let path = candidate.path_suffix(self.longest);
|
||||
for m in self.matcher.find_overlapping(path) {
|
||||
if m.end == path.len() {
|
||||
matches.push(self.map[m.pati]);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.end() == path.len() {
|
||||
matches.push(self.map[m.pattern()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RequiredExtensionStrategy(HashMap<OsString, Vec<(usize, Regex)>, Fnv>);
|
||||
struct RequiredExtensionStrategy(HashMap<Vec<u8>, Vec<(usize, Regex)>, Fnv>);
|
||||
|
||||
impl RequiredExtensionStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
match self.0.get(candidate.ext) {
|
||||
match self.0.get(candidate.ext.as_bytes()) {
|
||||
None => false,
|
||||
Some(regexes) => {
|
||||
for &(_, ref re) in regexes {
|
||||
if re.is_match(&*candidate.path) {
|
||||
if re.is_match(candidate.path.as_bytes()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -647,9 +729,9 @@ impl RequiredExtensionStrategy {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(regexes) = self.0.get(candidate.ext) {
|
||||
if let Some(regexes) = self.0.get(candidate.ext.as_bytes()) {
|
||||
for &(global_index, ref re) in regexes {
|
||||
if re.is_match(&*candidate.path) {
|
||||
if re.is_match(candidate.path.as_bytes()) {
|
||||
matches.push(global_index);
|
||||
}
|
||||
}
|
||||
@@ -665,11 +747,11 @@ struct RegexSetStrategy {
|
||||
|
||||
impl RegexSetStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
self.matcher.is_match(&*candidate.path)
|
||||
self.matcher.is_match(candidate.path.as_bytes())
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
for i in self.matcher.matches(&*candidate.path) {
|
||||
for i in self.matcher.matches(candidate.path.as_bytes()) {
|
||||
matches.push(self.map[i]);
|
||||
}
|
||||
}
|
||||
@@ -684,11 +766,7 @@ struct MultiStrategyBuilder {
|
||||
|
||||
impl MultiStrategyBuilder {
|
||||
fn new() -> MultiStrategyBuilder {
|
||||
MultiStrategyBuilder {
|
||||
literals: vec![],
|
||||
map: vec![],
|
||||
longest: 0,
|
||||
}
|
||||
MultiStrategyBuilder { literals: vec![], map: vec![], longest: 0 }
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, literal: String) {
|
||||
@@ -700,18 +778,16 @@ impl MultiStrategyBuilder {
|
||||
}
|
||||
|
||||
fn prefix(self) -> PrefixStrategy {
|
||||
let it = self.literals.into_iter().map(|s| s.into_bytes());
|
||||
PrefixStrategy {
|
||||
matcher: AcAutomaton::new(it).into_full(),
|
||||
matcher: AhoCorasick::new_auto_configured(&self.literals),
|
||||
map: self.map,
|
||||
longest: self.longest,
|
||||
}
|
||||
}
|
||||
|
||||
fn suffix(self) -> SuffixStrategy {
|
||||
let it = self.literals.into_iter().map(|s| s.into_bytes());
|
||||
SuffixStrategy {
|
||||
matcher: AcAutomaton::new(it).into_full(),
|
||||
matcher: AhoCorasick::new_auto_configured(&self.literals),
|
||||
map: self.map,
|
||||
longest: self.longest,
|
||||
}
|
||||
@@ -719,7 +795,7 @@ impl MultiStrategyBuilder {
|
||||
|
||||
fn regex_set(self) -> Result<RegexSetStrategy, Error> {
|
||||
Ok(RegexSetStrategy {
|
||||
matcher: try!(new_regex_set(self.literals)),
|
||||
matcher: new_regex_set(self.literals)?,
|
||||
map: self.map,
|
||||
})
|
||||
}
|
||||
@@ -727,7 +803,7 @@ impl MultiStrategyBuilder {
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RequiredExtensionStrategyBuilder(
|
||||
HashMap<OsString, Vec<(usize, String)>>,
|
||||
HashMap<Vec<u8>, Vec<(usize, String)>>,
|
||||
);
|
||||
|
||||
impl RequiredExtensionStrategyBuilder {
|
||||
@@ -735,8 +811,11 @@ impl RequiredExtensionStrategyBuilder {
|
||||
RequiredExtensionStrategyBuilder(HashMap::new())
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, ext: OsString, regex: String) {
|
||||
self.0.entry(ext).or_insert(vec![]).push((global_index, regex));
|
||||
fn add(&mut self, global_index: usize, ext: String, regex: String) {
|
||||
self.0
|
||||
.entry(ext.into_bytes())
|
||||
.or_insert(vec![])
|
||||
.push((global_index, regex));
|
||||
}
|
||||
|
||||
fn build(self) -> Result<RequiredExtensionStrategy, Error> {
|
||||
@@ -744,7 +823,7 @@ impl RequiredExtensionStrategyBuilder {
|
||||
for (ext, regexes) in self.0.into_iter() {
|
||||
exts.insert(ext.clone(), vec![]);
|
||||
for (global_index, regex) in regexes {
|
||||
let compiled = try!(new_regex(®ex));
|
||||
let compiled = new_regex(®ex)?;
|
||||
exts.get_mut(&ext).unwrap().push((global_index, compiled));
|
||||
}
|
||||
}
|
||||
129
crates/globset/src/pathutil.rs
Normal file
129
crates/globset/src/pathutil.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
use std::borrow::Cow;
|
||||
|
||||
use bstr::{ByteSlice, ByteVec};
|
||||
|
||||
/// The final component of the path, if it is a normal file.
|
||||
///
|
||||
/// If the path terminates in ., .., or consists solely of a root of prefix,
|
||||
/// file_name will return None.
|
||||
pub fn file_name<'a>(path: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
|
||||
if path.is_empty() {
|
||||
return None;
|
||||
} else if path.last_byte() == Some(b'.') {
|
||||
return None;
|
||||
}
|
||||
let last_slash = path.rfind_byte(b'/').map(|i| i + 1).unwrap_or(0);
|
||||
Some(match *path {
|
||||
Cow::Borrowed(path) => Cow::Borrowed(&path[last_slash..]),
|
||||
Cow::Owned(ref path) => {
|
||||
let mut path = path.clone();
|
||||
path.drain_bytes(..last_slash);
|
||||
Cow::Owned(path)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a file extension given a path's file name.
|
||||
///
|
||||
/// Note that this does NOT match the semantics of std::path::Path::extension.
|
||||
/// Namely, the extension includes the `.` and matching is otherwise more
|
||||
/// liberal. Specifically, the extenion is:
|
||||
///
|
||||
/// * None, if the file name given is empty;
|
||||
/// * None, if there is no embedded `.`;
|
||||
/// * Otherwise, the portion of the file name starting with the final `.`.
|
||||
///
|
||||
/// e.g., A file name of `.rs` has an extension `.rs`.
|
||||
///
|
||||
/// N.B. This is done to make certain glob match optimizations easier. Namely,
|
||||
/// a pattern like `*.rs` is obviously trying to match files with a `rs`
|
||||
/// extension, but it also matches files like `.rs`, which doesn't have an
|
||||
/// extension according to std::path::Path::extension.
|
||||
pub fn file_name_ext<'a>(name: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
|
||||
if name.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let last_dot_at = match name.rfind_byte(b'.') {
|
||||
None => return None,
|
||||
Some(i) => i,
|
||||
};
|
||||
Some(match *name {
|
||||
Cow::Borrowed(name) => Cow::Borrowed(&name[last_dot_at..]),
|
||||
Cow::Owned(ref name) => {
|
||||
let mut name = name.clone();
|
||||
name.drain_bytes(..last_dot_at);
|
||||
Cow::Owned(name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Normalizes a path to use `/` as a separator everywhere, even on platforms
|
||||
/// that recognize other characters as separators.
|
||||
#[cfg(unix)]
|
||||
pub fn normalize_path(path: Cow<[u8]>) -> Cow<[u8]> {
|
||||
// UNIX only uses /, so we're good.
|
||||
path
|
||||
}
|
||||
|
||||
/// Normalizes a path to use `/` as a separator everywhere, even on platforms
|
||||
/// that recognize other characters as separators.
|
||||
#[cfg(not(unix))]
|
||||
pub fn normalize_path(mut path: Cow<[u8]>) -> Cow<[u8]> {
|
||||
use std::path::is_separator;
|
||||
|
||||
for i in 0..path.len() {
|
||||
if path[i] == b'/' || !is_separator(path[i] as char) {
|
||||
continue;
|
||||
}
|
||||
path.to_mut()[i] = b'/';
|
||||
}
|
||||
path
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::borrow::Cow;
|
||||
|
||||
use bstr::{ByteVec, B};
|
||||
|
||||
use super::{file_name_ext, normalize_path};
|
||||
|
||||
macro_rules! ext {
|
||||
($name:ident, $file_name:expr, $ext:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let bs = Vec::from($file_name);
|
||||
let got = file_name_ext(&Cow::Owned(bs));
|
||||
assert_eq!($ext.map(|s| Cow::Borrowed(B(s))), got);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
ext!(ext1, "foo.rs", Some(".rs"));
|
||||
ext!(ext2, ".rs", Some(".rs"));
|
||||
ext!(ext3, "..rs", Some(".rs"));
|
||||
ext!(ext4, "", None::<&str>);
|
||||
ext!(ext5, "foo", None::<&str>);
|
||||
|
||||
macro_rules! normalize {
|
||||
($name:ident, $path:expr, $expected:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let bs = Vec::from_slice($path);
|
||||
let got = normalize_path(Cow::Owned(bs));
|
||||
assert_eq!($expected.to_vec(), got.into_owned());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
normalize!(normal1, b"foo", b"foo");
|
||||
normalize!(normal2, b"foo/bar", b"foo/bar");
|
||||
#[cfg(unix)]
|
||||
normalize!(normal3, b"foo\\bar", b"foo\\bar");
|
||||
#[cfg(not(unix))]
|
||||
normalize!(normal3, b"foo\\bar", b"foo/bar");
|
||||
#[cfg(unix)]
|
||||
normalize!(normal4, b"foo\\bar/baz", b"foo\\bar/baz");
|
||||
#[cfg(not(unix))]
|
||||
normalize!(normal4, b"foo\\bar/baz", b"foo/bar/baz");
|
||||
}
|
||||
38
crates/globset/src/serde_impl.rs
Normal file
38
crates/globset/src/serde_impl.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
use serde::de::Error;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use Glob;
|
||||
|
||||
impl Serialize for Glob {
|
||||
fn serialize<S: Serializer>(
|
||||
&self,
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error> {
|
||||
serializer.serialize_str(self.glob())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Glob {
|
||||
fn deserialize<D: Deserializer<'de>>(
|
||||
deserializer: D,
|
||||
) -> Result<Self, D::Error> {
|
||||
let glob = <&str as Deserialize>::deserialize(deserializer)?;
|
||||
Glob::new(glob).map_err(D::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use Glob;
|
||||
|
||||
#[test]
|
||||
fn glob_json_works() {
|
||||
let test_glob = Glob::new("src/**/*.rs").unwrap();
|
||||
|
||||
let ser = serde_json::to_string(&test_glob).unwrap();
|
||||
assert_eq!(ser, "\"src/**/*.rs\"");
|
||||
|
||||
let de: Glob = serde_json::from_str(&ser).unwrap();
|
||||
assert_eq!(test_glob, de);
|
||||
}
|
||||
}
|
||||
32
crates/grep/Cargo.toml
Normal file
32
crates/grep/Cargo.toml
Normal file
@@ -0,0 +1,32 @@
|
||||
[package]
|
||||
name = "grep"
|
||||
version = "0.2.5" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Fast line oriented regex searching as a library.
|
||||
"""
|
||||
documentation = "http://burntsushi.net/rustdoc/grep/"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/grep"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/grep"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "grep", "egrep", "search", "pattern"]
|
||||
license = "Unlicense/MIT"
|
||||
|
||||
[dependencies]
|
||||
grep-cli = { version = "0.1.4", path = "../cli" }
|
||||
grep-matcher = { version = "0.1.4", path = "../matcher" }
|
||||
grep-pcre2 = { version = "0.1.4", path = "../pcre2", optional = true }
|
||||
grep-printer = { version = "0.1.4", path = "../printer" }
|
||||
grep-regex = { version = "0.1.6", path = "../regex" }
|
||||
grep-searcher = { version = "0.1.7", path = "../searcher" }
|
||||
|
||||
[dev-dependencies]
|
||||
termcolor = "1.0.4"
|
||||
walkdir = "2.2.7"
|
||||
|
||||
[features]
|
||||
simd-accel = ["grep-searcher/simd-accel"]
|
||||
pcre2 = ["grep-pcre2"]
|
||||
|
||||
# This feature is DEPRECATED. Runtime dispatch is used for SIMD now.
|
||||
avx-accel = []
|
||||
41
crates/grep/README.md
Normal file
41
crates/grep/README.md
Normal file
@@ -0,0 +1,41 @@
|
||||
grep
|
||||
----
|
||||
ripgrep, as a library.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://crates.io/crates/grep)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
[https://docs.rs/grep](https://docs.rs/grep)
|
||||
|
||||
NOTE: This crate isn't ready for wide use yet. Ambitious individuals can
|
||||
probably piece together the parts, but there is no high level documentation
|
||||
describing how all of the pieces fit together.
|
||||
|
||||
|
||||
### Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
grep = "0.2"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
```rust
|
||||
extern crate grep;
|
||||
```
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
This crate provides a `pcre2` feature (disabled by default) which, when
|
||||
enabled, re-exports the `grep-pcre2` crate as an alternative `Matcher`
|
||||
implementation to the standard `grep-regex` implementation.
|
||||
72
crates/grep/examples/simplegrep.rs
Normal file
72
crates/grep/examples/simplegrep.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
extern crate grep;
|
||||
extern crate termcolor;
|
||||
extern crate walkdir;
|
||||
|
||||
use std::env;
|
||||
use std::error::Error;
|
||||
use std::ffi::OsString;
|
||||
use std::process;
|
||||
|
||||
use grep::cli;
|
||||
use grep::printer::{ColorSpecs, StandardBuilder};
|
||||
use grep::regex::RegexMatcher;
|
||||
use grep::searcher::{BinaryDetection, SearcherBuilder};
|
||||
use termcolor::ColorChoice;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
fn main() {
|
||||
if let Err(err) = try_main() {
|
||||
eprintln!("{}", err);
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
fn try_main() -> Result<(), Box<dyn Error>> {
|
||||
let mut args: Vec<OsString> = env::args_os().collect();
|
||||
if args.len() < 2 {
|
||||
return Err("Usage: simplegrep <pattern> [<path> ...]".into());
|
||||
}
|
||||
if args.len() == 2 {
|
||||
args.push(OsString::from("./"));
|
||||
}
|
||||
search(cli::pattern_from_os(&args[1])?, &args[2..])
|
||||
}
|
||||
|
||||
fn search(pattern: &str, paths: &[OsString]) -> Result<(), Box<dyn Error>> {
|
||||
let matcher = RegexMatcher::new_line_matcher(&pattern)?;
|
||||
let mut searcher = SearcherBuilder::new()
|
||||
.binary_detection(BinaryDetection::quit(b'\x00'))
|
||||
.line_number(false)
|
||||
.build();
|
||||
let mut printer = StandardBuilder::new()
|
||||
.color_specs(ColorSpecs::default_with_color())
|
||||
.build(cli::stdout(if cli::is_tty_stdout() {
|
||||
ColorChoice::Auto
|
||||
} else {
|
||||
ColorChoice::Never
|
||||
}));
|
||||
|
||||
for path in paths {
|
||||
for result in WalkDir::new(path) {
|
||||
let dent = match result {
|
||||
Ok(dent) => dent,
|
||||
Err(err) => {
|
||||
eprintln!("{}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if !dent.file_type().is_file() {
|
||||
continue;
|
||||
}
|
||||
let result = searcher.search_path(
|
||||
&matcher,
|
||||
dent.path(),
|
||||
printer.sink_with_path(&matcher, dent.path()),
|
||||
);
|
||||
if let Err(err) = result {
|
||||
eprintln!("{}: {}", dent.path().display(), err);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
23
crates/grep/src/lib.rs
Normal file
23
crates/grep/src/lib.rs
Normal file
@@ -0,0 +1,23 @@
|
||||
/*!
|
||||
ripgrep, as a library.
|
||||
|
||||
This library is intended to provide a high level facade to the crates that
|
||||
make up ripgrep's core searching routines. However, there is no high level
|
||||
documentation available yet guiding users on how to fit all of the pieces
|
||||
together.
|
||||
|
||||
Every public API item in the constituent crates is documented, but examples
|
||||
are sparse.
|
||||
|
||||
A cookbook and a guide are planned.
|
||||
*/
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
pub extern crate grep_cli as cli;
|
||||
pub extern crate grep_matcher as matcher;
|
||||
#[cfg(feature = "pcre2")]
|
||||
pub extern crate grep_pcre2 as pcre2;
|
||||
pub extern crate grep_printer as printer;
|
||||
pub extern crate grep_regex as regex;
|
||||
pub extern crate grep_searcher as searcher;
|
||||
@@ -1,14 +1,14 @@
|
||||
[package]
|
||||
name = "ignore"
|
||||
version = "0.1.8" #:version
|
||||
version = "0.4.12" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
A fast library for efficiently matching ignore files such as `.gitignore`
|
||||
against file paths.
|
||||
"""
|
||||
documentation = "https://docs.rs/ignore"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/ignore"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/ignore"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/ignore"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/ignore"
|
||||
readme = "README.md"
|
||||
keywords = ["glob", "ignore", "gitignore", "pattern", "file"]
|
||||
license = "Unlicense/MIT"
|
||||
@@ -18,20 +18,19 @@ name = "ignore"
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
crossbeam = "0.2"
|
||||
globset = { version = "0.1.4", path = "../globset" }
|
||||
lazy_static = "0.2"
|
||||
log = "0.3"
|
||||
memchr = "1"
|
||||
regex = "0.2.1"
|
||||
thread_local = "0.3.2"
|
||||
walkdir = "1.0.7"
|
||||
crossbeam-channel = "0.4.0"
|
||||
crossbeam-utils = "0.7.0"
|
||||
globset = { version = "0.4.3", path = "../globset" }
|
||||
lazy_static = "1.1"
|
||||
log = "0.4.5"
|
||||
memchr = "2.1"
|
||||
regex = "1.1"
|
||||
same-file = "1.0.4"
|
||||
thread_local = "1"
|
||||
walkdir = "2.2.7"
|
||||
|
||||
[dev-dependencies]
|
||||
tempdir = "0.3.5"
|
||||
[target.'cfg(windows)'.dependencies.winapi-util]
|
||||
version = "0.1.2"
|
||||
|
||||
[features]
|
||||
simd-accel = ["globset/simd-accel"]
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
@@ -4,7 +4,7 @@ The ignore crate provides a fast recursive directory iterator that respects
|
||||
various filters such as globs, file types and `.gitignore` files. This crate
|
||||
also provides lower level direct access to gitignore and file type matchers.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://crates.io/crates/ignore)
|
||||
|
||||
@@ -20,7 +20,7 @@ Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
ignore = "0.1"
|
||||
ignore = "0.4"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
@@ -1,17 +1,12 @@
|
||||
#![allow(dead_code, unused_imports, unused_mut, unused_variables)]
|
||||
|
||||
extern crate crossbeam;
|
||||
extern crate crossbeam_channel as channel;
|
||||
extern crate ignore;
|
||||
extern crate walkdir;
|
||||
|
||||
use std::env;
|
||||
use std::io::{self, Write};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::thread;
|
||||
|
||||
use crossbeam::sync::MsQueue;
|
||||
use ignore::WalkBuilder;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
@@ -19,7 +14,7 @@ fn main() {
|
||||
let mut path = env::args().nth(1).unwrap();
|
||||
let mut parallel = false;
|
||||
let mut simple = false;
|
||||
let queue: Arc<MsQueue<Option<DirEntry>>> = Arc::new(MsQueue::new());
|
||||
let (tx, rx) = channel::bounded::<DirEntry>(100);
|
||||
if path == "parallel" {
|
||||
path = env::args().nth(2).unwrap();
|
||||
parallel = true;
|
||||
@@ -28,10 +23,9 @@ fn main() {
|
||||
simple = true;
|
||||
}
|
||||
|
||||
let stdout_queue = queue.clone();
|
||||
let stdout_thread = thread::spawn(move || {
|
||||
let mut stdout = io::BufWriter::new(io::stdout());
|
||||
while let Some(dent) = stdout_queue.pop() {
|
||||
for dent in rx {
|
||||
write_path(&mut stdout, dent.path());
|
||||
}
|
||||
});
|
||||
@@ -39,28 +33,26 @@ fn main() {
|
||||
if parallel {
|
||||
let walker = WalkBuilder::new(path).threads(6).build_parallel();
|
||||
walker.run(|| {
|
||||
let queue = queue.clone();
|
||||
let tx = tx.clone();
|
||||
Box::new(move |result| {
|
||||
use ignore::WalkState::*;
|
||||
|
||||
queue.push(Some(DirEntry::Y(result.unwrap())));
|
||||
tx.send(DirEntry::Y(result.unwrap())).unwrap();
|
||||
Continue
|
||||
})
|
||||
});
|
||||
} else if simple {
|
||||
let mut stdout = io::BufWriter::new(io::stdout());
|
||||
let walker = WalkDir::new(path);
|
||||
for result in walker {
|
||||
queue.push(Some(DirEntry::X(result.unwrap())));
|
||||
tx.send(DirEntry::X(result.unwrap())).unwrap();
|
||||
}
|
||||
} else {
|
||||
let mut stdout = io::BufWriter::new(io::stdout());
|
||||
let walker = WalkBuilder::new(path).build();
|
||||
for result in walker {
|
||||
queue.push(Some(DirEntry::Y(result.unwrap())));
|
||||
tx.send(DirEntry::Y(result.unwrap())).unwrap();
|
||||
}
|
||||
}
|
||||
queue.push(None);
|
||||
drop(tx);
|
||||
stdout_thread.join().unwrap();
|
||||
}
|
||||
|
||||
246
crates/ignore/src/default_types.rs
Normal file
246
crates/ignore/src/default_types.rs
Normal file
@@ -0,0 +1,246 @@
|
||||
/// This list represents the default file types that ripgrep ships with. In
|
||||
/// general, any file format is fair game, although it should generally be
|
||||
/// limited to reasonably popular open formats. For other cases, you can add
|
||||
/// types to each invocation of ripgrep with the '--type-add' flag.
|
||||
///
|
||||
/// If you would like to add or improve this list, please file a PR:
|
||||
/// https://github.com/BurntSushi/ripgrep
|
||||
///
|
||||
/// Please try to keep this list sorted lexicographically and wrapped to 79
|
||||
/// columns (inclusive).
|
||||
#[rustfmt::skip]
|
||||
pub const DEFAULT_TYPES: &[(&str, &[&str])] = &[
|
||||
("agda", &["*.agda", "*.lagda"]),
|
||||
("aidl", &["*.aidl"]),
|
||||
("amake", &["*.mk", "*.bp"]),
|
||||
("asciidoc", &["*.adoc", "*.asc", "*.asciidoc"]),
|
||||
("asm", &["*.asm", "*.s", "*.S"]),
|
||||
("asp", &[
|
||||
"*.aspx", "*.aspx.cs", "*.aspx.cs", "*.ascx", "*.ascx.cs", "*.ascx.vb",
|
||||
]),
|
||||
("ats", &["*.ats", "*.dats", "*.sats", "*.hats"]),
|
||||
("avro", &["*.avdl", "*.avpr", "*.avsc"]),
|
||||
("awk", &["*.awk"]),
|
||||
("bazel", &["*.bzl", "WORKSPACE", "BUILD", "BUILD.bazel"]),
|
||||
("bitbake", &["*.bb", "*.bbappend", "*.bbclass", "*.conf", "*.inc"]),
|
||||
("brotli", &["*.br"]),
|
||||
("buildstream", &["*.bst"]),
|
||||
("bzip2", &["*.bz2", "*.tbz2"]),
|
||||
("c", &["*.[chH]", "*.[chH].in", "*.cats"]),
|
||||
("cabal", &["*.cabal"]),
|
||||
("cbor", &["*.cbor"]),
|
||||
("ceylon", &["*.ceylon"]),
|
||||
("clojure", &["*.clj", "*.cljc", "*.cljs", "*.cljx"]),
|
||||
("cmake", &["*.cmake", "CMakeLists.txt"]),
|
||||
("coffeescript", &["*.coffee"]),
|
||||
("config", &["*.cfg", "*.conf", "*.config", "*.ini"]),
|
||||
("coq", &["*.v"]),
|
||||
("cpp", &[
|
||||
"*.[ChH]", "*.cc", "*.[ch]pp", "*.[ch]xx", "*.hh", "*.inl",
|
||||
"*.[ChH].in", "*.cc.in", "*.[ch]pp.in", "*.[ch]xx.in", "*.hh.in",
|
||||
]),
|
||||
("creole", &["*.creole"]),
|
||||
("crystal", &["Projectfile", "*.cr"]),
|
||||
("cs", &["*.cs"]),
|
||||
("csharp", &["*.cs"]),
|
||||
("cshtml", &["*.cshtml"]),
|
||||
("css", &["*.css", "*.scss"]),
|
||||
("csv", &["*.csv"]),
|
||||
("cython", &["*.pyx", "*.pxi", "*.pxd"]),
|
||||
("d", &["*.d"]),
|
||||
("dart", &["*.dart"]),
|
||||
("dhall", &["*.dhall"]),
|
||||
("diff", &["*.patch", "*.diff"]),
|
||||
("docker", &["*Dockerfile*"]),
|
||||
("edn", &["*.edn"]),
|
||||
("elisp", &["*.el"]),
|
||||
("elixir", &["*.ex", "*.eex", "*.exs"]),
|
||||
("elm", &["*.elm"]),
|
||||
("erb", &["*.erb"]),
|
||||
("erlang", &["*.erl", "*.hrl"]),
|
||||
("fidl", &["*.fidl"]),
|
||||
("fish", &["*.fish"]),
|
||||
("fortran", &[
|
||||
"*.f", "*.F", "*.f77", "*.F77", "*.pfo",
|
||||
"*.f90", "*.F90", "*.f95", "*.F95",
|
||||
]),
|
||||
("fsharp", &["*.fs", "*.fsx", "*.fsi"]),
|
||||
("gap", &["*.g", "*.gap", "*.gi", "*.gd", "*.tst"]),
|
||||
("gn", &["*.gn", "*.gni"]),
|
||||
("go", &["*.go"]),
|
||||
("gradle", &["*.gradle"]),
|
||||
("groovy", &["*.groovy", "*.gradle"]),
|
||||
("gzip", &["*.gz", "*.tgz"]),
|
||||
("h", &["*.h", "*.hpp"]),
|
||||
("haml", &["*.haml"]),
|
||||
("haskell", &["*.hs", "*.lhs", "*.cpphs", "*.c2hs", "*.hsc"]),
|
||||
("hbs", &["*.hbs"]),
|
||||
("hs", &["*.hs", "*.lhs"]),
|
||||
("html", &["*.htm", "*.html", "*.ejs"]),
|
||||
("idris", &["*.idr", "*.lidr"]),
|
||||
("java", &["*.java", "*.jsp", "*.jspx", "*.properties"]),
|
||||
("jinja", &["*.j2", "*.jinja", "*.jinja2"]),
|
||||
("jl", &["*.jl"]),
|
||||
("js", &["*.js", "*.jsx", "*.vue"]),
|
||||
("json", &["*.json", "composer.lock"]),
|
||||
("jsonl", &["*.jsonl"]),
|
||||
("julia", &["*.jl"]),
|
||||
("jupyter", &["*.ipynb", "*.jpynb"]),
|
||||
("k", &["*.k"]),
|
||||
("kotlin", &["*.kt", "*.kts"]),
|
||||
("less", &["*.less"]),
|
||||
("license", &[
|
||||
// General
|
||||
"COPYING", "COPYING[.-]*",
|
||||
"COPYRIGHT", "COPYRIGHT[.-]*",
|
||||
"EULA", "EULA[.-]*",
|
||||
"licen[cs]e", "licen[cs]e.*",
|
||||
"LICEN[CS]E", "LICEN[CS]E[.-]*", "*[.-]LICEN[CS]E*",
|
||||
"NOTICE", "NOTICE[.-]*",
|
||||
"PATENTS", "PATENTS[.-]*",
|
||||
"UNLICEN[CS]E", "UNLICEN[CS]E[.-]*",
|
||||
// GPL (gpl.txt, etc.)
|
||||
"agpl[.-]*",
|
||||
"gpl[.-]*",
|
||||
"lgpl[.-]*",
|
||||
// Other license-specific (APACHE-2.0.txt, etc.)
|
||||
"AGPL-*[0-9]*",
|
||||
"APACHE-*[0-9]*",
|
||||
"BSD-*[0-9]*",
|
||||
"CC-BY-*",
|
||||
"GFDL-*[0-9]*",
|
||||
"GNU-*[0-9]*",
|
||||
"GPL-*[0-9]*",
|
||||
"LGPL-*[0-9]*",
|
||||
"MIT-*[0-9]*",
|
||||
"MPL-*[0-9]*",
|
||||
"OFL-*[0-9]*",
|
||||
]),
|
||||
("lisp", &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]),
|
||||
("lock", &["*.lock", "package-lock.json"]),
|
||||
("log", &["*.log"]),
|
||||
("lua", &["*.lua"]),
|
||||
("lz4", &["*.lz4"]),
|
||||
("lzma", &["*.lzma"]),
|
||||
("m4", &["*.ac", "*.m4"]),
|
||||
("make", &[
|
||||
"[Gg][Nn][Uu]makefile", "[Mm]akefile",
|
||||
"[Gg][Nn][Uu]makefile.am", "[Mm]akefile.am",
|
||||
"[Gg][Nn][Uu]makefile.in", "[Mm]akefile.in",
|
||||
"*.mk", "*.mak"
|
||||
]),
|
||||
("mako", &["*.mako", "*.mao"]),
|
||||
("man", &["*.[0-9lnpx]", "*.[0-9][cEFMmpSx]"]),
|
||||
("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
|
||||
("matlab", &["*.m"]),
|
||||
("md", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
|
||||
("mk", &["mkfile"]),
|
||||
("ml", &["*.ml"]),
|
||||
("msbuild", &[
|
||||
"*.csproj", "*.fsproj", "*.vcxproj", "*.proj", "*.props", "*.targets",
|
||||
]),
|
||||
("nim", &["*.nim", "*.nimf", "*.nimble", "*.nims"]),
|
||||
("nix", &["*.nix"]),
|
||||
("objc", &["*.h", "*.m"]),
|
||||
("objcpp", &["*.h", "*.mm"]),
|
||||
("ocaml", &["*.ml", "*.mli", "*.mll", "*.mly"]),
|
||||
("org", &["*.org", "*.org_archive"]),
|
||||
("pascal", &["*.pas", "*.dpr", "*.lpr", "*.pp", "*.inc"]),
|
||||
("pdf", &["*.pdf"]),
|
||||
("perl", &["*.perl", "*.pl", "*.PL", "*.plh", "*.plx", "*.pm", "*.t"]),
|
||||
("php", &["*.php", "*.php3", "*.php4", "*.php5", "*.phtml"]),
|
||||
("pod", &["*.pod"]),
|
||||
("postscript", &["*.eps", "*.ps"]),
|
||||
("protobuf", &["*.proto"]),
|
||||
("ps", &["*.cdxml", "*.ps1", "*.ps1xml", "*.psd1", "*.psm1"]),
|
||||
("puppet", &["*.erb", "*.pp", "*.rb"]),
|
||||
("purs", &["*.purs"]),
|
||||
("py", &["*.py"]),
|
||||
("qmake", &["*.pro", "*.pri", "*.prf"]),
|
||||
("qml", &["*.qml"]),
|
||||
("r", &["*.R", "*.r", "*.Rmd", "*.Rnw"]),
|
||||
("rdoc", &["*.rdoc"]),
|
||||
("readme", &["README*", "*README"]),
|
||||
("robot", &["*.robot"]),
|
||||
("rst", &["*.rst"]),
|
||||
("ruby", &["Gemfile", "*.gemspec", ".irbrc", "Rakefile", "*.rb"]),
|
||||
("rust", &["*.rs"]),
|
||||
("sass", &["*.sass", "*.scss"]),
|
||||
("scala", &["*.scala", "*.sbt"]),
|
||||
("sh", &[
|
||||
// Portable/misc. init files
|
||||
".login", ".logout", ".profile", "profile",
|
||||
// bash-specific init files
|
||||
".bash_login", "bash_login",
|
||||
".bash_logout", "bash_logout",
|
||||
".bash_profile", "bash_profile",
|
||||
".bashrc", "bashrc", "*.bashrc",
|
||||
// csh-specific init files
|
||||
".cshrc", "*.cshrc",
|
||||
// ksh-specific init files
|
||||
".kshrc", "*.kshrc",
|
||||
// tcsh-specific init files
|
||||
".tcshrc",
|
||||
// zsh-specific init files
|
||||
".zshenv", "zshenv",
|
||||
".zlogin", "zlogin",
|
||||
".zlogout", "zlogout",
|
||||
".zprofile", "zprofile",
|
||||
".zshrc", "zshrc",
|
||||
// Extensions
|
||||
"*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh", "*.zsh",
|
||||
]),
|
||||
("slim", &["*.skim", "*.slim", "*.slime"]),
|
||||
("smarty", &["*.tpl"]),
|
||||
("sml", &["*.sml", "*.sig"]),
|
||||
("soy", &["*.soy"]),
|
||||
("spark", &["*.spark"]),
|
||||
("spec", &["*.spec"]),
|
||||
("sql", &["*.sql", "*.psql"]),
|
||||
("stylus", &["*.styl"]),
|
||||
("sv", &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]),
|
||||
("svg", &["*.svg"]),
|
||||
("swift", &["*.swift"]),
|
||||
("swig", &["*.def", "*.i"]),
|
||||
("systemd", &[
|
||||
"*.automount", "*.conf", "*.device", "*.link", "*.mount", "*.path",
|
||||
"*.scope", "*.service", "*.slice", "*.socket", "*.swap", "*.target",
|
||||
"*.timer",
|
||||
]),
|
||||
("taskpaper", &["*.taskpaper"]),
|
||||
("tcl", &["*.tcl"]),
|
||||
("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib", "*.dtx", "*.ins"]),
|
||||
("textile", &["*.textile"]),
|
||||
("tf", &["*.tf"]),
|
||||
("thrift", &["*.thrift"]),
|
||||
("toml", &["*.toml", "Cargo.lock"]),
|
||||
("ts", &["*.ts", "*.tsx"]),
|
||||
("twig", &["*.twig"]),
|
||||
("txt", &["*.txt"]),
|
||||
("typoscript", &["*.typoscript", "*.ts"]),
|
||||
("vala", &["*.vala"]),
|
||||
("vb", &["*.vb"]),
|
||||
("verilog", &["*.v", "*.vh", "*.sv", "*.svh"]),
|
||||
("vhdl", &["*.vhd", "*.vhdl"]),
|
||||
("vim", &["*.vim"]),
|
||||
("vimscript", &["*.vim"]),
|
||||
("webidl", &["*.idl", "*.webidl", "*.widl"]),
|
||||
("wiki", &["*.mediawiki", "*.wiki"]),
|
||||
("xml", &[
|
||||
"*.xml", "*.xml.dist", "*.dtd", "*.xsl", "*.xslt", "*.xsd", "*.xjb",
|
||||
"*.rng", "*.sch", "*.xhtml",
|
||||
]),
|
||||
("xz", &["*.xz", "*.txz"]),
|
||||
("yacc", &["*.y"]),
|
||||
("yaml", &["*.yaml", "*.yml"]),
|
||||
("zig", &["*.zig"]),
|
||||
("zsh", &[
|
||||
".zshenv", "zshenv",
|
||||
".zlogin", "zlogin",
|
||||
".zlogout", "zlogout",
|
||||
".zprofile", "zprofile",
|
||||
".zshrc", "zshrc",
|
||||
"*.zsh",
|
||||
]),
|
||||
("zstd", &["*.zst", "*.zstd"]),
|
||||
];
|
||||
@@ -14,14 +14,17 @@
|
||||
// well.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::OsString;
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::fs::{File, FileType};
|
||||
use std::io::{self, BufRead};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use gitignore::{self, Gitignore, GitignoreBuilder};
|
||||
use pathutil::{is_hidden, strip_prefix};
|
||||
use overrides::{self, Override};
|
||||
use pathutil::{is_hidden, strip_prefix};
|
||||
use types::{self, Types};
|
||||
use walk::DirEntry;
|
||||
use {Error, Match, PartialErrorBuilder};
|
||||
|
||||
/// IgnoreMatch represents information about where a match came from when using
|
||||
@@ -65,19 +68,19 @@ struct IgnoreOptions {
|
||||
hidden: bool,
|
||||
/// Whether to read .ignore files.
|
||||
ignore: bool,
|
||||
/// Whether to respect any ignore files in parent directories.
|
||||
parents: bool,
|
||||
/// Whether to read git's global gitignore file.
|
||||
git_global: bool,
|
||||
/// Whether to read .gitignore files.
|
||||
git_ignore: bool,
|
||||
/// Whether to read .git/info/exclude files.
|
||||
git_exclude: bool,
|
||||
}
|
||||
|
||||
impl IgnoreOptions {
|
||||
/// Returns true if at least one type of ignore rules should be matched.
|
||||
fn has_any_ignore_options(&self) -> bool {
|
||||
self.ignore || self.git_global || self.git_ignore || self.git_exclude
|
||||
}
|
||||
/// Whether to ignore files case insensitively
|
||||
ignore_case_insensitive: bool,
|
||||
/// Whether a git repository must be present in order to apply any
|
||||
/// git-related ignore rules.
|
||||
require_git: bool,
|
||||
}
|
||||
|
||||
/// Ignore is a matcher useful for recursively walking one or more directories.
|
||||
@@ -109,8 +112,12 @@ struct IgnoreInner {
|
||||
/// The absolute base path of this matcher. Populated only if parent
|
||||
/// directories are added.
|
||||
absolute_base: Option<Arc<PathBuf>>,
|
||||
/// Explicit ignore matchers specified by the caller.
|
||||
/// Explicit global ignore matchers specified by the caller.
|
||||
explicit_ignores: Arc<Vec<Gitignore>>,
|
||||
/// Ignore files used in addition to `.ignore`
|
||||
custom_ignore_filenames: Arc<Vec<OsString>>,
|
||||
/// The matcher for custom ignore files
|
||||
custom_ignore_matcher: Gitignore,
|
||||
/// The matcher for .ignore files.
|
||||
ignore_matcher: Gitignore,
|
||||
/// A global gitignore matcher, usually from $XDG_CONFIG_HOME/git/ignore.
|
||||
@@ -127,7 +134,6 @@ struct IgnoreInner {
|
||||
|
||||
impl Ignore {
|
||||
/// Return the directory path of this matcher.
|
||||
#[allow(dead_code)]
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.0.dir
|
||||
}
|
||||
@@ -155,6 +161,15 @@ impl Ignore {
|
||||
&self,
|
||||
path: P,
|
||||
) -> (Ignore, Option<Error>) {
|
||||
if !self.0.opts.parents
|
||||
&& !self.0.opts.git_ignore
|
||||
&& !self.0.opts.git_exclude
|
||||
&& !self.0.opts.git_global
|
||||
{
|
||||
// If we never need info from parent directories, then don't do
|
||||
// anything.
|
||||
return (self.clone(), None);
|
||||
}
|
||||
if !self.is_root() {
|
||||
panic!("Ignore::add_parents called on non-root matcher");
|
||||
}
|
||||
@@ -187,6 +202,11 @@ impl Ignore {
|
||||
errs.maybe_push(err);
|
||||
igtmp.is_absolute_parent = true;
|
||||
igtmp.absolute_base = Some(absolute_base.clone());
|
||||
igtmp.has_git = if self.0.opts.git_ignore {
|
||||
parent.join(".git").exists()
|
||||
} else {
|
||||
false
|
||||
};
|
||||
ig = Ignore(Arc::new(igtmp));
|
||||
compiled.insert(parent.as_os_str().to_os_string(), ig.clone());
|
||||
}
|
||||
@@ -211,33 +231,70 @@ impl Ignore {
|
||||
|
||||
/// Like add_child, but takes a full path and returns an IgnoreInner.
|
||||
fn add_child_path(&self, dir: &Path) -> (IgnoreInner, Option<Error>) {
|
||||
static IG_NAMES: &'static [&'static str] = &[".rgignore", ".ignore"];
|
||||
let git_type = if self.0.opts.git_ignore || self.0.opts.git_exclude {
|
||||
dir.join(".git").metadata().ok().map(|md| md.file_type())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let has_git = git_type.map(|_| true).unwrap_or(false);
|
||||
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
let ig_matcher =
|
||||
if !self.0.opts.ignore {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) = create_gitignore(&dir, IG_NAMES);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
let gi_matcher =
|
||||
if !self.0.opts.git_ignore {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) = create_gitignore(&dir, &[".gitignore"]);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
let gi_exclude_matcher =
|
||||
if !self.0.opts.git_exclude {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) = create_gitignore(&dir, &[".git/info/exclude"]);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
let custom_ig_matcher = if self.0.custom_ignore_filenames.is_empty() {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) = create_gitignore(
|
||||
&dir,
|
||||
&dir,
|
||||
&self.0.custom_ignore_filenames,
|
||||
self.0.opts.ignore_case_insensitive,
|
||||
);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
let ig_matcher = if !self.0.opts.ignore {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) = create_gitignore(
|
||||
&dir,
|
||||
&dir,
|
||||
&[".ignore"],
|
||||
self.0.opts.ignore_case_insensitive,
|
||||
);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
let gi_matcher = if !self.0.opts.git_ignore {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) = create_gitignore(
|
||||
&dir,
|
||||
&dir,
|
||||
&[".gitignore"],
|
||||
self.0.opts.ignore_case_insensitive,
|
||||
);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
let gi_exclude_matcher = if !self.0.opts.git_exclude {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
match resolve_git_commondir(dir, git_type) {
|
||||
Ok(git_dir) => {
|
||||
let (m, err) = create_gitignore(
|
||||
&dir,
|
||||
&git_dir,
|
||||
&["info/exclude"],
|
||||
self.0.opts.ignore_case_insensitive,
|
||||
);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
}
|
||||
Err(err) => {
|
||||
errs.maybe_push(err);
|
||||
Gitignore::empty()
|
||||
}
|
||||
}
|
||||
};
|
||||
let ig = IgnoreInner {
|
||||
compiled: self.0.compiled.clone(),
|
||||
dir: dir.to_path_buf(),
|
||||
@@ -247,21 +304,50 @@ impl Ignore {
|
||||
is_absolute_parent: false,
|
||||
absolute_base: self.0.absolute_base.clone(),
|
||||
explicit_ignores: self.0.explicit_ignores.clone(),
|
||||
custom_ignore_filenames: self.0.custom_ignore_filenames.clone(),
|
||||
custom_ignore_matcher: custom_ig_matcher,
|
||||
ignore_matcher: ig_matcher,
|
||||
git_global_matcher: self.0.git_global_matcher.clone(),
|
||||
git_ignore_matcher: gi_matcher,
|
||||
git_exclude_matcher: gi_exclude_matcher,
|
||||
has_git: dir.join(".git").is_dir(),
|
||||
has_git: has_git,
|
||||
opts: self.0.opts,
|
||||
};
|
||||
(ig, errs.into_error_option())
|
||||
}
|
||||
|
||||
/// Returns true if at least one type of ignore rule should be matched.
|
||||
fn has_any_ignore_rules(&self) -> bool {
|
||||
let opts = self.0.opts;
|
||||
let has_custom_ignore_files =
|
||||
!self.0.custom_ignore_filenames.is_empty();
|
||||
let has_explicit_ignores = !self.0.explicit_ignores.is_empty();
|
||||
|
||||
opts.ignore
|
||||
|| opts.git_global
|
||||
|| opts.git_ignore
|
||||
|| opts.git_exclude
|
||||
|| has_custom_ignore_files
|
||||
|| has_explicit_ignores
|
||||
}
|
||||
|
||||
/// Like `matched`, but works with a directory entry instead.
|
||||
pub fn matched_dir_entry<'a>(
|
||||
&'a self,
|
||||
dent: &DirEntry,
|
||||
) -> Match<IgnoreMatch<'a>> {
|
||||
let m = self.matched(dent.path(), dent.is_dir());
|
||||
if m.is_none() && self.0.opts.hidden && is_hidden(dent) {
|
||||
return Match::Ignore(IgnoreMatch::hidden());
|
||||
}
|
||||
m
|
||||
}
|
||||
|
||||
/// Returns a match indicating whether the given file path should be
|
||||
/// ignored or not.
|
||||
///
|
||||
/// The match contains information about its origin.
|
||||
pub fn matched<'a, P: AsRef<Path>>(
|
||||
fn matched<'a, P: AsRef<Path>>(
|
||||
&'a self,
|
||||
path: P,
|
||||
is_dir: bool,
|
||||
@@ -277,15 +363,17 @@ impl Ignore {
|
||||
// return that result immediately. Overrides have the highest
|
||||
// precedence.
|
||||
if !self.0.overrides.is_empty() {
|
||||
let mat =
|
||||
self.0.overrides.matched(path, is_dir)
|
||||
.map(IgnoreMatch::overrides);
|
||||
let mat = self
|
||||
.0
|
||||
.overrides
|
||||
.matched(path, is_dir)
|
||||
.map(IgnoreMatch::overrides);
|
||||
if !mat.is_none() {
|
||||
return mat;
|
||||
}
|
||||
}
|
||||
let mut whitelisted = Match::None;
|
||||
if self.0.opts.has_any_ignore_options() {
|
||||
if self.has_any_ignore_rules() {
|
||||
let mat = self.matched_ignore(path, is_dir);
|
||||
if mat.is_ignore() {
|
||||
return mat;
|
||||
@@ -302,9 +390,6 @@ impl Ignore {
|
||||
whitelisted = mat;
|
||||
}
|
||||
}
|
||||
if whitelisted.is_none() && self.0.opts.hidden && is_hidden(path) {
|
||||
return Match::Ignore(IgnoreMatch::hidden());
|
||||
}
|
||||
whitelisted
|
||||
}
|
||||
|
||||
@@ -315,46 +400,75 @@ impl Ignore {
|
||||
path: &Path,
|
||||
is_dir: bool,
|
||||
) -> Match<IgnoreMatch<'a>> {
|
||||
let (mut m_ignore, mut m_gi, mut m_gi_exclude, mut m_explicit) =
|
||||
(Match::None, Match::None, Match::None, Match::None);
|
||||
let (
|
||||
mut m_custom_ignore,
|
||||
mut m_ignore,
|
||||
mut m_gi,
|
||||
mut m_gi_exclude,
|
||||
mut m_explicit,
|
||||
) = (Match::None, Match::None, Match::None, Match::None, Match::None);
|
||||
let any_git =
|
||||
!self.0.opts.require_git || self.parents().any(|ig| ig.0.has_git);
|
||||
let mut saw_git = false;
|
||||
for ig in self.parents().take_while(|ig| !ig.0.is_absolute_parent) {
|
||||
if m_custom_ignore.is_none() {
|
||||
m_custom_ignore =
|
||||
ig.0.custom_ignore_matcher
|
||||
.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if m_ignore.is_none() {
|
||||
m_ignore =
|
||||
ig.0.ignore_matcher.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
ig.0.ignore_matcher
|
||||
.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if !saw_git && m_gi.is_none() {
|
||||
if any_git && !saw_git && m_gi.is_none() {
|
||||
m_gi =
|
||||
ig.0.git_ignore_matcher.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
ig.0.git_ignore_matcher
|
||||
.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if !saw_git && m_gi_exclude.is_none() {
|
||||
if any_git && !saw_git && m_gi_exclude.is_none() {
|
||||
m_gi_exclude =
|
||||
ig.0.git_exclude_matcher.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
ig.0.git_exclude_matcher
|
||||
.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
saw_git = saw_git || ig.0.has_git;
|
||||
}
|
||||
if let Some(abs_parent_path) = self.absolute_base() {
|
||||
let path = abs_parent_path.join(path);
|
||||
for ig in self.parents().skip_while(|ig|!ig.0.is_absolute_parent) {
|
||||
if m_ignore.is_none() {
|
||||
m_ignore =
|
||||
ig.0.ignore_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
if self.0.opts.parents {
|
||||
if let Some(abs_parent_path) = self.absolute_base() {
|
||||
let path = abs_parent_path.join(path);
|
||||
for ig in
|
||||
self.parents().skip_while(|ig| !ig.0.is_absolute_parent)
|
||||
{
|
||||
if m_custom_ignore.is_none() {
|
||||
m_custom_ignore =
|
||||
ig.0.custom_ignore_matcher
|
||||
.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if m_ignore.is_none() {
|
||||
m_ignore =
|
||||
ig.0.ignore_matcher
|
||||
.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if any_git && !saw_git && m_gi.is_none() {
|
||||
m_gi =
|
||||
ig.0.git_ignore_matcher
|
||||
.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if any_git && !saw_git && m_gi_exclude.is_none() {
|
||||
m_gi_exclude =
|
||||
ig.0.git_exclude_matcher
|
||||
.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
saw_git = saw_git || ig.0.has_git;
|
||||
}
|
||||
if !saw_git && m_gi.is_none() {
|
||||
m_gi =
|
||||
ig.0.git_ignore_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if !saw_git && m_gi_exclude.is_none() {
|
||||
m_gi_exclude =
|
||||
ig.0.git_exclude_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
saw_git = saw_git || ig.0.has_git;
|
||||
}
|
||||
}
|
||||
for gi in self.0.explicit_ignores.iter().rev() {
|
||||
@@ -363,10 +477,21 @@ impl Ignore {
|
||||
}
|
||||
m_explicit = gi.matched(&path, is_dir).map(IgnoreMatch::gitignore);
|
||||
}
|
||||
let m_global = self.0.git_global_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
let m_global = if any_git {
|
||||
self.0
|
||||
.git_global_matcher
|
||||
.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore)
|
||||
} else {
|
||||
Match::None
|
||||
};
|
||||
|
||||
m_ignore.or(m_gi).or(m_gi_exclude).or(m_global).or(m_explicit)
|
||||
m_custom_ignore
|
||||
.or(m_ignore)
|
||||
.or(m_gi)
|
||||
.or(m_gi_exclude)
|
||||
.or(m_global)
|
||||
.or(m_explicit)
|
||||
}
|
||||
|
||||
/// Returns an iterator over parent ignore matchers, including this one.
|
||||
@@ -409,8 +534,10 @@ pub struct IgnoreBuilder {
|
||||
overrides: Arc<Override>,
|
||||
/// A type matcher (default is empty).
|
||||
types: Arc<Types>,
|
||||
/// Explicit ignore matchers.
|
||||
/// Explicit global ignore matchers.
|
||||
explicit_ignores: Vec<Gitignore>,
|
||||
/// Ignore files in addition to .ignore.
|
||||
custom_ignore_filenames: Vec<OsString>,
|
||||
/// Ignore config.
|
||||
opts: IgnoreOptions,
|
||||
}
|
||||
@@ -426,12 +553,16 @@ impl IgnoreBuilder {
|
||||
overrides: Arc::new(Override::empty()),
|
||||
types: Arc::new(Types::empty()),
|
||||
explicit_ignores: vec![],
|
||||
custom_ignore_filenames: vec![],
|
||||
opts: IgnoreOptions {
|
||||
hidden: true,
|
||||
ignore: true,
|
||||
parents: true,
|
||||
git_global: true,
|
||||
git_ignore: true,
|
||||
git_exclude: true,
|
||||
ignore_case_insensitive: false,
|
||||
require_git: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -441,16 +572,20 @@ impl IgnoreBuilder {
|
||||
/// The matcher returned won't match anything until ignore rules from
|
||||
/// directories are added to it.
|
||||
pub fn build(&self) -> Ignore {
|
||||
let git_global_matcher =
|
||||
if !self.opts.git_global {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (gi, err) = Gitignore::global();
|
||||
if let Some(err) = err {
|
||||
debug!("{}", err);
|
||||
}
|
||||
gi
|
||||
};
|
||||
let git_global_matcher = if !self.opts.git_global {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let mut builder = GitignoreBuilder::new("");
|
||||
builder
|
||||
.case_insensitive(self.opts.ignore_case_insensitive)
|
||||
.unwrap();
|
||||
let (gi, err) = builder.build_global();
|
||||
if let Some(err) = err {
|
||||
debug!("{}", err);
|
||||
}
|
||||
gi
|
||||
};
|
||||
|
||||
Ignore(Arc::new(IgnoreInner {
|
||||
compiled: Arc::new(RwLock::new(HashMap::new())),
|
||||
dir: self.dir.clone(),
|
||||
@@ -460,6 +595,10 @@ impl IgnoreBuilder {
|
||||
is_absolute_parent: true,
|
||||
absolute_base: None,
|
||||
explicit_ignores: Arc::new(self.explicit_ignores.clone()),
|
||||
custom_ignore_filenames: Arc::new(
|
||||
self.custom_ignore_filenames.clone(),
|
||||
),
|
||||
custom_ignore_matcher: Gitignore::empty(),
|
||||
ignore_matcher: Gitignore::empty(),
|
||||
git_global_matcher: Arc::new(git_global_matcher),
|
||||
git_ignore_matcher: Gitignore::empty(),
|
||||
@@ -495,6 +634,20 @@ impl IgnoreBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a custom ignore file name
|
||||
///
|
||||
/// These ignore files have higher precedence than all other ignore files.
|
||||
///
|
||||
/// When specifying multiple names, earlier names have lower precedence than
|
||||
/// later names.
|
||||
pub fn add_custom_ignore_filename<S: AsRef<OsStr>>(
|
||||
&mut self,
|
||||
file_name: S,
|
||||
) -> &mut IgnoreBuilder {
|
||||
self.custom_ignore_filenames.push(file_name.as_ref().to_os_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Enables ignoring hidden files.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
@@ -514,6 +667,17 @@ impl IgnoreBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Enables reading ignore files from parent directories.
|
||||
///
|
||||
/// If this is enabled, then .gitignore files in parent directories of each
|
||||
/// file path given are respected. Otherwise, they are ignored.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn parents(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
self.opts.parents = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a global gitignore matcher.
|
||||
///
|
||||
/// Its precedence is lower than both normal `.gitignore` files and
|
||||
@@ -548,23 +712,63 @@ impl IgnoreBuilder {
|
||||
self.opts.git_exclude = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Whether a git repository is required to apply git-related ignore
|
||||
/// rules (global rules, .gitignore and local exclude rules).
|
||||
///
|
||||
/// When disabled, git-related ignore rules are applied even when searching
|
||||
/// outside a git repository.
|
||||
pub fn require_git(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
self.opts.require_git = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Process ignore files case insensitively
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn ignore_case_insensitive(
|
||||
&mut self,
|
||||
yes: bool,
|
||||
) -> &mut IgnoreBuilder {
|
||||
self.opts.ignore_case_insensitive = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new gitignore matcher for the directory given.
|
||||
///
|
||||
/// Ignore globs are extracted from each of the file names in `dir` in the
|
||||
/// order given (earlier names have lower precedence than later names).
|
||||
/// The matcher is meant to match files below `dir`.
|
||||
/// Ignore globs are extracted from each of the file names relative to
|
||||
/// `dir_for_ignorefile` in the order given (earlier names have lower
|
||||
/// precedence than later names).
|
||||
///
|
||||
/// I/O errors are ignored.
|
||||
pub fn create_gitignore(
|
||||
pub fn create_gitignore<T: AsRef<OsStr>>(
|
||||
dir: &Path,
|
||||
names: &[&str],
|
||||
dir_for_ignorefile: &Path,
|
||||
names: &[T],
|
||||
case_insensitive: bool,
|
||||
) -> (Gitignore, Option<Error>) {
|
||||
let mut builder = GitignoreBuilder::new(dir);
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
builder.case_insensitive(case_insensitive).unwrap();
|
||||
for name in names {
|
||||
let gipath = dir.join(name);
|
||||
errs.maybe_push_ignore_io(builder.add(gipath));
|
||||
let gipath = dir_for_ignorefile.join(name.as_ref());
|
||||
// This check is not necessary, but is added for performance. Namely,
|
||||
// a simple stat call checking for existence can often be just a bit
|
||||
// quicker than actually trying to open a file. Since the number of
|
||||
// directories without ignore files likely greatly exceeds the number
|
||||
// with ignore files, this check generally makes sense.
|
||||
//
|
||||
// However, until demonstrated otherwise, we speculatively do not do
|
||||
// this on Windows since Windows is notorious for having slow file
|
||||
// system operations. Namely, it's not clear whether this analysis
|
||||
// makes sense on Windows.
|
||||
//
|
||||
// For more details: https://github.com/BurntSushi/ripgrep/pull/1381
|
||||
if cfg!(windows) || gipath.exists() {
|
||||
errs.maybe_push_ignore_io(builder.add(gipath));
|
||||
}
|
||||
}
|
||||
let gi = match builder.build() {
|
||||
Ok(gi) => gi,
|
||||
@@ -576,16 +780,71 @@ pub fn create_gitignore(
|
||||
(gi, errs.into_error_option())
|
||||
}
|
||||
|
||||
/// Find the GIT_COMMON_DIR for the given git worktree.
|
||||
///
|
||||
/// This is the directory that may contain a private ignore file
|
||||
/// "info/exclude". Unlike git, this function does *not* read environment
|
||||
/// variables GIT_DIR and GIT_COMMON_DIR, because it is not clear how to use
|
||||
/// them when multiple repositories are searched.
|
||||
///
|
||||
/// Some I/O errors are ignored.
|
||||
fn resolve_git_commondir(
|
||||
dir: &Path,
|
||||
git_type: Option<FileType>,
|
||||
) -> Result<PathBuf, Option<Error>> {
|
||||
let git_dir_path = || dir.join(".git");
|
||||
let git_dir = git_dir_path();
|
||||
if !git_type.map_or(false, |ft| ft.is_file()) {
|
||||
return Ok(git_dir);
|
||||
}
|
||||
let file = match File::open(git_dir) {
|
||||
Ok(file) => io::BufReader::new(file),
|
||||
Err(err) => {
|
||||
return Err(Some(Error::Io(err).with_path(git_dir_path())));
|
||||
}
|
||||
};
|
||||
let dot_git_line = match file.lines().next() {
|
||||
Some(Ok(line)) => line,
|
||||
Some(Err(err)) => {
|
||||
return Err(Some(Error::Io(err).with_path(git_dir_path())));
|
||||
}
|
||||
None => return Err(None),
|
||||
};
|
||||
if !dot_git_line.starts_with("gitdir: ") {
|
||||
return Err(None);
|
||||
}
|
||||
let real_git_dir = PathBuf::from(&dot_git_line["gitdir: ".len()..]);
|
||||
let git_commondir_file = || real_git_dir.join("commondir");
|
||||
let file = match File::open(git_commondir_file()) {
|
||||
Ok(file) => io::BufReader::new(file),
|
||||
Err(err) => {
|
||||
return Err(Some(Error::Io(err).with_path(git_commondir_file())));
|
||||
}
|
||||
};
|
||||
let commondir_line = match file.lines().next() {
|
||||
Some(Ok(line)) => line,
|
||||
Some(Err(err)) => {
|
||||
return Err(Some(Error::Io(err).with_path(git_commondir_file())));
|
||||
}
|
||||
None => return Err(None),
|
||||
};
|
||||
let commondir_abs = if commondir_line.starts_with(".") {
|
||||
real_git_dir.join(commondir_line) // relative commondir
|
||||
} else {
|
||||
PathBuf::from(commondir_line)
|
||||
};
|
||||
Ok(commondir_abs)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::fs::{self, File};
|
||||
use std::io::Write;
|
||||
use std::io::{self, Write};
|
||||
use std::path::Path;
|
||||
|
||||
use tempdir::TempDir;
|
||||
|
||||
use dir::IgnoreBuilder;
|
||||
use gitignore::Gitignore;
|
||||
use tests::TempDir;
|
||||
use Error;
|
||||
|
||||
fn wfile<P: AsRef<Path>>(path: P, contents: &str) {
|
||||
@@ -604,15 +863,19 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn tmpdir() -> TempDir {
|
||||
TempDir::new().unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn explicit_ignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join("not-an-ignore"), "foo\n!bar");
|
||||
|
||||
let (gi, err) = Gitignore::new(td.path().join("not-an-ignore"));
|
||||
assert!(err.is_none());
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_ignore(gi).build().add_child(td.path());
|
||||
let (ig, err) =
|
||||
IgnoreBuilder::new().add_ignore(gi).build().add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_ignore());
|
||||
assert!(ig.matched("bar", false).is_whitelist());
|
||||
@@ -621,7 +884,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn git_exclude() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
mkdirp(td.path().join(".git/info"));
|
||||
wfile(td.path().join(".git/info/exclude"), "foo\n!bar");
|
||||
|
||||
@@ -634,7 +897,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn gitignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
mkdirp(td.path().join(".git"));
|
||||
wfile(td.path().join(".gitignore"), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
@@ -644,9 +908,36 @@ mod tests {
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gitignore_no_git() {
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_none());
|
||||
assert!(ig.matched("bar", false).is_none());
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gitignore_allowed_no_git() {
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.require_git(false)
|
||||
.build()
|
||||
.add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_ignore());
|
||||
assert!(ig.matched("bar", false).is_whitelist());
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".ignore"), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
@@ -656,10 +947,60 @@ mod tests {
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn custom_ignore() {
|
||||
let td = tmpdir();
|
||||
let custom_ignore = ".customignore";
|
||||
wfile(td.path().join(custom_ignore), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_custom_ignore_filename(custom_ignore)
|
||||
.build()
|
||||
.add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_ignore());
|
||||
assert!(ig.matched("bar", false).is_whitelist());
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
// Tests that a custom ignore file will override an .ignore.
|
||||
#[test]
|
||||
fn custom_ignore_over_ignore() {
|
||||
let td = tmpdir();
|
||||
let custom_ignore = ".customignore";
|
||||
wfile(td.path().join(".ignore"), "foo");
|
||||
wfile(td.path().join(custom_ignore), "!foo");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_custom_ignore_filename(custom_ignore)
|
||||
.build()
|
||||
.add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_whitelist());
|
||||
}
|
||||
|
||||
// Tests that earlier custom ignore files have lower precedence than later.
|
||||
#[test]
|
||||
fn custom_ignore_precedence() {
|
||||
let td = tmpdir();
|
||||
let custom_ignore1 = ".customignore1";
|
||||
let custom_ignore2 = ".customignore2";
|
||||
wfile(td.path().join(custom_ignore1), "foo");
|
||||
wfile(td.path().join(custom_ignore2), "!foo");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_custom_ignore_filename(custom_ignore1)
|
||||
.add_custom_ignore_filename(custom_ignore2)
|
||||
.build()
|
||||
.add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_whitelist());
|
||||
}
|
||||
|
||||
// Tests that an .ignore will override a .gitignore.
|
||||
#[test]
|
||||
fn ignore_over_gitignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "foo");
|
||||
wfile(td.path().join(".ignore"), "!foo");
|
||||
|
||||
@@ -671,7 +1012,7 @@ mod tests {
|
||||
// Tests that exclude has lower precedent than both .ignore and .gitignore.
|
||||
#[test]
|
||||
fn exclude_lowest() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "!foo");
|
||||
wfile(td.path().join(".ignore"), "!bar");
|
||||
mkdirp(td.path().join(".git/info"));
|
||||
@@ -686,8 +1027,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn errored() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
wfile(td.path().join(".gitignore"), "f**oo");
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "{foo");
|
||||
|
||||
let (_, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert!(err.is_some());
|
||||
@@ -695,9 +1036,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn errored_both() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
wfile(td.path().join(".gitignore"), "f**oo");
|
||||
wfile(td.path().join(".ignore"), "fo**o");
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "{foo");
|
||||
wfile(td.path().join(".ignore"), "{bar");
|
||||
|
||||
let (_, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert_eq!(2, partial(err.expect("an error")).len());
|
||||
@@ -705,8 +1046,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn errored_partial() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
wfile(td.path().join(".gitignore"), "f**oo\nbar");
|
||||
let td = tmpdir();
|
||||
mkdirp(td.path().join(".git"));
|
||||
wfile(td.path().join(".gitignore"), "{foo\nbar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert!(err.is_some());
|
||||
@@ -715,8 +1057,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn errored_partial_and_ignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
wfile(td.path().join(".gitignore"), "f**oo\nbar");
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "{foo\nbar");
|
||||
wfile(td.path().join(".ignore"), "!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
@@ -726,7 +1068,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn not_present_empty() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
|
||||
let (_, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
@@ -736,7 +1078,7 @@ mod tests {
|
||||
fn stops_at_git_dir() {
|
||||
// This tests that .gitignore files beyond a .git barrier aren't
|
||||
// matched, but .ignore files are.
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
mkdirp(td.path().join(".git"));
|
||||
mkdirp(td.path().join("foo/.git"));
|
||||
wfile(td.path().join(".gitignore"), "foo");
|
||||
@@ -757,7 +1099,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn absolute_parent() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
mkdirp(td.path().join(".git"));
|
||||
mkdirp(td.path().join("foo"));
|
||||
wfile(td.path().join(".gitignore"), "bar");
|
||||
@@ -780,7 +1122,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn absolute_parent_anchored() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
mkdirp(td.path().join(".git"));
|
||||
mkdirp(td.path().join("src/llvm"));
|
||||
wfile(td.path().join(".gitignore"), "/llvm/\nfoo");
|
||||
@@ -797,4 +1139,62 @@ mod tests {
|
||||
assert!(ig2.matched("foo", false).is_ignore());
|
||||
assert!(ig2.matched("src/foo", false).is_ignore());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn git_info_exclude_in_linked_worktree() {
|
||||
let td = tmpdir();
|
||||
let git_dir = td.path().join(".git");
|
||||
mkdirp(git_dir.join("info"));
|
||||
wfile(git_dir.join("info/exclude"), "ignore_me");
|
||||
mkdirp(git_dir.join("worktrees/linked-worktree"));
|
||||
let commondir_path =
|
||||
|| git_dir.join("worktrees/linked-worktree/commondir");
|
||||
mkdirp(td.path().join("linked-worktree"));
|
||||
let worktree_git_dir_abs = format!(
|
||||
"gitdir: {}",
|
||||
git_dir.join("worktrees/linked-worktree").to_str().unwrap(),
|
||||
);
|
||||
wfile(td.path().join("linked-worktree/.git"), &worktree_git_dir_abs);
|
||||
|
||||
// relative commondir
|
||||
wfile(commondir_path(), "../..");
|
||||
let ib = IgnoreBuilder::new().build();
|
||||
let (ignore, err) = ib.add_child(td.path().join("linked-worktree"));
|
||||
assert!(err.is_none());
|
||||
assert!(ignore.matched("ignore_me", false).is_ignore());
|
||||
|
||||
// absolute commondir
|
||||
wfile(commondir_path(), git_dir.to_str().unwrap());
|
||||
let (ignore, err) = ib.add_child(td.path().join("linked-worktree"));
|
||||
assert!(err.is_none());
|
||||
assert!(ignore.matched("ignore_me", false).is_ignore());
|
||||
|
||||
// missing commondir file
|
||||
assert!(fs::remove_file(commondir_path()).is_ok());
|
||||
let (_, err) = ib.add_child(td.path().join("linked-worktree"));
|
||||
assert!(err.is_some());
|
||||
assert!(match err {
|
||||
Some(Error::WithPath { path, err }) => {
|
||||
if path != commondir_path() {
|
||||
false
|
||||
} else {
|
||||
match *err {
|
||||
Error::Io(ioerr) => {
|
||||
ioerr.kind() == io::ErrorKind::NotFound
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => false,
|
||||
});
|
||||
|
||||
wfile(td.path().join("linked-worktree/.git"), "garbage");
|
||||
let (_, err) = ib.add_child(td.path().join("linked-worktree"));
|
||||
assert!(err.is_none());
|
||||
|
||||
wfile(td.path().join("linked-worktree/.git"), "gitdir: garbage");
|
||||
let (_, err) = ib.add_child(td.path().join("linked-worktree"));
|
||||
assert!(err.is_some());
|
||||
}
|
||||
}
|
||||
@@ -66,6 +66,11 @@ impl Glob {
|
||||
pub fn is_only_dir(&self) -> bool {
|
||||
self.is_only_dir
|
||||
}
|
||||
|
||||
/// Returns true if and only if this glob has a `**/` prefix.
|
||||
fn has_doublestar_prefix(&self) -> bool {
|
||||
self.actual.starts_with("**/") || self.actual == "**"
|
||||
}
|
||||
}
|
||||
|
||||
/// Gitignore is a matcher for the globs in one or more gitignore files
|
||||
@@ -77,7 +82,7 @@ pub struct Gitignore {
|
||||
globs: Vec<Glob>,
|
||||
num_ignores: u64,
|
||||
num_whitelists: u64,
|
||||
matches: Arc<ThreadLocal<RefCell<Vec<usize>>>>,
|
||||
matches: Option<Arc<ThreadLocal<RefCell<Vec<usize>>>>>,
|
||||
}
|
||||
|
||||
impl Gitignore {
|
||||
@@ -121,23 +126,21 @@ impl Gitignore {
|
||||
/// `$XDG_CONFIG_HOME/git/ignore` is read. If `$XDG_CONFIG_HOME` is not
|
||||
/// set or is empty, then `$HOME/.config/git/ignore` is used instead.
|
||||
pub fn global() -> (Gitignore, Option<Error>) {
|
||||
match gitconfig_excludes_path() {
|
||||
None => (Gitignore::empty(), None),
|
||||
Some(path) => {
|
||||
if !path.is_file() {
|
||||
(Gitignore::empty(), None)
|
||||
} else {
|
||||
Gitignore::new(path)
|
||||
}
|
||||
}
|
||||
}
|
||||
GitignoreBuilder::new("").build_global()
|
||||
}
|
||||
|
||||
/// Creates a new empty gitignore matcher that never matches anything.
|
||||
///
|
||||
/// Its path is empty.
|
||||
pub fn empty() -> Gitignore {
|
||||
GitignoreBuilder::new("").build().unwrap()
|
||||
Gitignore {
|
||||
set: GlobSet::empty(),
|
||||
root: PathBuf::from(""),
|
||||
globs: vec![],
|
||||
num_ignores: 0,
|
||||
num_whitelists: 0,
|
||||
matches: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the directory containing this gitignore matcher.
|
||||
@@ -169,8 +172,8 @@ impl Gitignore {
|
||||
self.num_whitelists
|
||||
}
|
||||
|
||||
/// Returns whether the given file path matched a pattern in this gitignore
|
||||
/// matcher.
|
||||
/// Returns whether the given path (file or directory) matched a pattern in
|
||||
/// this gitignore matcher.
|
||||
///
|
||||
/// `is_dir` should be true if the path refers to a directory and false
|
||||
/// otherwise.
|
||||
@@ -191,6 +194,51 @@ impl Gitignore {
|
||||
self.matched_stripped(self.strip(path.as_ref()), is_dir)
|
||||
}
|
||||
|
||||
/// Returns whether the given path (file or directory, and expected to be
|
||||
/// under the root) or any of its parent directories (up to the root)
|
||||
/// matched a pattern in this gitignore matcher.
|
||||
///
|
||||
/// NOTE: This method is more expensive than walking the directory hierarchy
|
||||
/// top-to-bottom and matching the entries. But, is easier to use in cases
|
||||
/// when a list of paths are available without a hierarchy.
|
||||
///
|
||||
/// `is_dir` should be true if the path refers to a directory and false
|
||||
/// otherwise.
|
||||
///
|
||||
/// The given path is matched relative to the path given when building
|
||||
/// the matcher. Specifically, before matching `path`, its prefix (as
|
||||
/// determined by a common suffix of the directory containing this
|
||||
/// gitignore) is stripped. If there is no common suffix/prefix overlap,
|
||||
/// then `path` is assumed to be relative to this matcher.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method panics if the given file path is not under the root path
|
||||
/// of this matcher.
|
||||
pub fn matched_path_or_any_parents<P: AsRef<Path>>(
|
||||
&self,
|
||||
path: P,
|
||||
is_dir: bool,
|
||||
) -> Match<&Glob> {
|
||||
if self.is_empty() {
|
||||
return Match::None;
|
||||
}
|
||||
let mut path = self.strip(path.as_ref());
|
||||
assert!(!path.has_root(), "path is expected to be under the root");
|
||||
|
||||
match self.matched_stripped(path, is_dir) {
|
||||
Match::None => (), // walk up
|
||||
a_match => return a_match,
|
||||
}
|
||||
while let Some(parent) = path.parent() {
|
||||
match self.matched_stripped(parent, /* is_dir */ true) {
|
||||
Match::None => path = parent, // walk up
|
||||
a_match => return a_match,
|
||||
}
|
||||
}
|
||||
Match::None
|
||||
}
|
||||
|
||||
/// Like matched, but takes a path that has already been stripped.
|
||||
fn matched_stripped<P: AsRef<Path>>(
|
||||
&self,
|
||||
@@ -201,7 +249,7 @@ impl Gitignore {
|
||||
return Match::None;
|
||||
}
|
||||
let path = path.as_ref();
|
||||
let _matches = self.matches.get_default();
|
||||
let _matches = self.matches.as_ref().unwrap().get_or_default();
|
||||
let mut matches = _matches.borrow_mut();
|
||||
let candidate = Candidate::new(path);
|
||||
self.set.matches_candidate_into(&candidate, &mut *matches);
|
||||
@@ -236,7 +284,10 @@ impl Gitignore {
|
||||
// BUT, a file name might not have any directory components to it,
|
||||
// in which case, we don't want to accidentally strip any part of the
|
||||
// file name.
|
||||
if !is_file_name(path) {
|
||||
//
|
||||
// As an additional special case, if the root is just `.`, then we
|
||||
// shouldn't try to strip anything, e.g., when path begins with a `.`.
|
||||
if self.root != Path::new(".") && !is_file_name(path) {
|
||||
if let Some(p) = strip_prefix(&self.root, path) {
|
||||
path = p;
|
||||
// If we're left with a leading slash, get rid of it.
|
||||
@@ -250,10 +301,12 @@ impl Gitignore {
|
||||
}
|
||||
|
||||
/// Builds a matcher for a single set of globs from a .gitignore file.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GitignoreBuilder {
|
||||
builder: GlobSetBuilder,
|
||||
root: PathBuf,
|
||||
globs: Vec<Glob>,
|
||||
case_insensitive: bool,
|
||||
}
|
||||
|
||||
impl GitignoreBuilder {
|
||||
@@ -269,6 +322,7 @@ impl GitignoreBuilder {
|
||||
builder: GlobSetBuilder::new(),
|
||||
root: strip_prefix("./", root).unwrap_or(root).to_path_buf(),
|
||||
globs: vec![],
|
||||
case_insensitive: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,18 +332,50 @@ impl GitignoreBuilder {
|
||||
pub fn build(&self) -> Result<Gitignore, Error> {
|
||||
let nignore = self.globs.iter().filter(|g| !g.is_whitelist()).count();
|
||||
let nwhite = self.globs.iter().filter(|g| g.is_whitelist()).count();
|
||||
let set = try!(
|
||||
self.builder.build().map_err(|err| Error::Glob(err.to_string())));
|
||||
let set = self
|
||||
.builder
|
||||
.build()
|
||||
.map_err(|err| Error::Glob { glob: None, err: err.to_string() })?;
|
||||
Ok(Gitignore {
|
||||
set: set,
|
||||
root: self.root.clone(),
|
||||
globs: self.globs.clone(),
|
||||
num_ignores: nignore as u64,
|
||||
num_whitelists: nwhite as u64,
|
||||
matches: Arc::new(ThreadLocal::default()),
|
||||
matches: Some(Arc::new(ThreadLocal::default())),
|
||||
})
|
||||
}
|
||||
|
||||
/// Build a global gitignore matcher using the configuration in this
|
||||
/// builder.
|
||||
///
|
||||
/// This consumes ownership of the builder unlike `build` because it
|
||||
/// must mutate the builder to add the global gitignore globs.
|
||||
///
|
||||
/// Note that this ignores the path given to this builder's constructor
|
||||
/// and instead derives the path automatically from git's global
|
||||
/// configuration.
|
||||
pub fn build_global(mut self) -> (Gitignore, Option<Error>) {
|
||||
match gitconfig_excludes_path() {
|
||||
None => (Gitignore::empty(), None),
|
||||
Some(path) => {
|
||||
if !path.is_file() {
|
||||
(Gitignore::empty(), None)
|
||||
} else {
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
errs.maybe_push_ignore_io(self.add(path));
|
||||
match self.build() {
|
||||
Ok(gi) => (gi, errs.into_error_option()),
|
||||
Err(err) => {
|
||||
errs.push(err);
|
||||
(Gitignore::empty(), errs.into_error_option())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Add each glob from the file path given.
|
||||
///
|
||||
/// The file given should be formatted as a `gitignore` file.
|
||||
@@ -334,7 +420,7 @@ impl GitignoreBuilder {
|
||||
gitignore: &str,
|
||||
) -> Result<&mut GitignoreBuilder, Error> {
|
||||
for line in gitignore.lines() {
|
||||
try!(self.add_line(from.clone(), line));
|
||||
self.add_line(from.clone(), line)?;
|
||||
}
|
||||
Ok(self)
|
||||
}
|
||||
@@ -350,6 +436,8 @@ impl GitignoreBuilder {
|
||||
from: Option<PathBuf>,
|
||||
mut line: &str,
|
||||
) -> Result<&mut GitignoreBuilder, Error> {
|
||||
#![allow(deprecated)]
|
||||
|
||||
if line.starts_with("#") {
|
||||
return Ok(self);
|
||||
}
|
||||
@@ -366,8 +454,6 @@ impl GitignoreBuilder {
|
||||
is_whitelist: false,
|
||||
is_only_dir: false,
|
||||
};
|
||||
let mut literal_separator = false;
|
||||
let has_slash = line.chars().any(|c| c == '/');
|
||||
let mut is_absolute = false;
|
||||
if line.starts_with("\\!") || line.starts_with("\\#") {
|
||||
line = &line[1..];
|
||||
@@ -382,7 +468,6 @@ impl GitignoreBuilder {
|
||||
// then the glob can only match the beginning of a path
|
||||
// (relative to the location of gitignore). We achieve this by
|
||||
// simply banning wildcards from matching /.
|
||||
literal_separator = true;
|
||||
line = &line[1..];
|
||||
is_absolute = true;
|
||||
}
|
||||
@@ -395,18 +480,13 @@ impl GitignoreBuilder {
|
||||
line = &line[..i];
|
||||
}
|
||||
}
|
||||
// If there is a literal slash, then we note that so that globbing
|
||||
// doesn't let wildcards match slashes.
|
||||
glob.actual = line.to_string();
|
||||
if has_slash {
|
||||
literal_separator = true;
|
||||
}
|
||||
// If there was a leading slash, then this is a glob that must
|
||||
// match the entire path name. Otherwise, we should let it match
|
||||
// anywhere, so use a **/ prefix.
|
||||
if !is_absolute {
|
||||
// If there is a literal slash, then this is a glob that must match the
|
||||
// entire path name. Otherwise, we should let it match anywhere, so use
|
||||
// a **/ prefix.
|
||||
if !is_absolute && !line.chars().any(|c| c == '/') {
|
||||
// ... but only if we don't already have a **/ prefix.
|
||||
if !glob.actual.starts_with("**/") {
|
||||
if !glob.has_doublestar_prefix() {
|
||||
glob.actual = format!("**/{}", glob.actual);
|
||||
}
|
||||
}
|
||||
@@ -416,31 +496,62 @@ impl GitignoreBuilder {
|
||||
if glob.actual.ends_with("/**") {
|
||||
glob.actual = format!("{}/*", glob.actual);
|
||||
}
|
||||
let parsed = try!(
|
||||
GlobBuilder::new(&glob.actual)
|
||||
.literal_separator(literal_separator)
|
||||
.build()
|
||||
.map_err(|err| Error::Glob(err.to_string())));
|
||||
let parsed = GlobBuilder::new(&glob.actual)
|
||||
.literal_separator(true)
|
||||
.case_insensitive(self.case_insensitive)
|
||||
.backslash_escape(true)
|
||||
.build()
|
||||
.map_err(|err| Error::Glob {
|
||||
glob: Some(glob.original.clone()),
|
||||
err: err.kind().to_string(),
|
||||
})?;
|
||||
self.builder.add(parsed);
|
||||
self.globs.push(glob);
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Toggle whether the globs should be matched case insensitively or not.
|
||||
///
|
||||
/// When this option is changed, only globs added after the change will be
|
||||
/// affected.
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn case_insensitive(
|
||||
&mut self,
|
||||
yes: bool,
|
||||
) -> Result<&mut GitignoreBuilder, Error> {
|
||||
// TODO: This should not return a `Result`. Fix this in the next semver
|
||||
// release.
|
||||
self.case_insensitive = yes;
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the file path of the current environment's global gitignore file.
|
||||
///
|
||||
/// Note that the file path returned may not exist.
|
||||
fn gitconfig_excludes_path() -> Option<PathBuf> {
|
||||
gitconfig_contents()
|
||||
.and_then(|data| parse_excludes_file(&data))
|
||||
.or_else(excludes_file_default)
|
||||
// git supports $HOME/.gitconfig and $XDG_CONFIG_HOME/git/config. Notably,
|
||||
// both can be active at the same time, where $HOME/.gitconfig takes
|
||||
// precedent. So if $HOME/.gitconfig defines a `core.excludesFile`, then
|
||||
// we're done.
|
||||
match gitconfig_home_contents().and_then(|x| parse_excludes_file(&x)) {
|
||||
Some(path) => return Some(path),
|
||||
None => {}
|
||||
}
|
||||
match gitconfig_xdg_contents().and_then(|x| parse_excludes_file(&x)) {
|
||||
Some(path) => return Some(path),
|
||||
None => {}
|
||||
}
|
||||
excludes_file_default()
|
||||
}
|
||||
|
||||
/// Returns the file contents of git's global config file, if one exists.
|
||||
fn gitconfig_contents() -> Option<Vec<u8>> {
|
||||
let home = match env::var_os("HOME") {
|
||||
/// Returns the file contents of git's global config file, if one exists, in
|
||||
/// the user's home directory.
|
||||
fn gitconfig_home_contents() -> Option<Vec<u8>> {
|
||||
let home = match home_dir() {
|
||||
None => return None,
|
||||
Some(home) => PathBuf::from(home),
|
||||
Some(home) => home,
|
||||
};
|
||||
let mut file = match File::open(home.join(".gitconfig")) {
|
||||
Err(_) => return None,
|
||||
@@ -450,13 +561,28 @@ fn gitconfig_contents() -> Option<Vec<u8>> {
|
||||
file.read_to_end(&mut contents).ok().map(|_| contents)
|
||||
}
|
||||
|
||||
/// Returns the file contents of git's global config file, if one exists, in
|
||||
/// the user's XDG_CONFIG_HOME directory.
|
||||
fn gitconfig_xdg_contents() -> Option<Vec<u8>> {
|
||||
let path = env::var_os("XDG_CONFIG_HOME")
|
||||
.and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) })
|
||||
.or_else(|| home_dir().map(|p| p.join(".config")))
|
||||
.map(|x| x.join("git/config"));
|
||||
let mut file = match path.and_then(|p| File::open(p).ok()) {
|
||||
None => return None,
|
||||
Some(file) => io::BufReader::new(file),
|
||||
};
|
||||
let mut contents = vec![];
|
||||
file.read_to_end(&mut contents).ok().map(|_| contents)
|
||||
}
|
||||
|
||||
/// Returns the default file path for a global .gitignore file.
|
||||
///
|
||||
/// Specifically, this respects XDG_CONFIG_HOME.
|
||||
fn excludes_file_default() -> Option<PathBuf> {
|
||||
env::var_os("XDG_CONFIG_HOME")
|
||||
.and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) })
|
||||
.or_else(|| env::home_dir().map(|p| p.join(".config")))
|
||||
.or_else(|| home_dir().map(|p| p.join(".config")))
|
||||
.map(|x| x.join("git/ignore"))
|
||||
}
|
||||
|
||||
@@ -467,8 +593,8 @@ fn parse_excludes_file(data: &[u8]) -> Option<PathBuf> {
|
||||
// probably works in more circumstances. I guess we would ideally have
|
||||
// a full INI parser. Yuck.
|
||||
lazy_static! {
|
||||
static ref RE: Regex = Regex::new(
|
||||
r"(?ium)^\s*excludesfile\s*=\s*(.+)\s*$").unwrap();
|
||||
static ref RE: Regex =
|
||||
Regex::new(r"(?im)^\s*excludesfile\s*=\s*(.+)\s*$").unwrap();
|
||||
};
|
||||
let caps = match RE.captures(data) {
|
||||
None => return None,
|
||||
@@ -479,17 +605,26 @@ fn parse_excludes_file(data: &[u8]) -> Option<PathBuf> {
|
||||
|
||||
/// Expands ~ in file paths to the value of $HOME.
|
||||
fn expand_tilde(path: &str) -> String {
|
||||
let home = match env::var("HOME") {
|
||||
Err(_) => return path.to_string(),
|
||||
Ok(home) => home,
|
||||
let home = match home_dir() {
|
||||
None => return path.to_string(),
|
||||
Some(home) => home.to_string_lossy().into_owned(),
|
||||
};
|
||||
path.replace("~", &home)
|
||||
}
|
||||
|
||||
/// Returns the location of the user's home directory.
|
||||
fn home_dir() -> Option<PathBuf> {
|
||||
// We're fine with using env::home_dir for now. Its bugs are, IMO, pretty
|
||||
// minor corner cases. We should still probably eventually migrate to
|
||||
// the `dirs` crate to get a proper implementation.
|
||||
#![allow(deprecated)]
|
||||
env::home_dir()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::Path;
|
||||
use super::{Gitignore, GitignoreBuilder};
|
||||
use std::path::Path;
|
||||
|
||||
fn gi_from_str<P: AsRef<Path>>(root: P, s: &str) -> Gitignore {
|
||||
let mut builder = GitignoreBuilder::new(root);
|
||||
@@ -552,9 +687,23 @@ mod tests {
|
||||
ignored!(ig25, ROOT, "Cargo.lock", "./tabwriter-bin/Cargo.lock");
|
||||
ignored!(ig26, ROOT, "/foo/bar/baz", "./foo/bar/baz");
|
||||
ignored!(ig27, ROOT, "foo/", "xyz/foo", true);
|
||||
ignored!(ig28, ROOT, "src/*.rs", "src/grep/src/main.rs");
|
||||
ignored!(ig29, "./src", "/llvm/", "./src/llvm", true);
|
||||
ignored!(ig30, ROOT, "node_modules/ ", "node_modules", true);
|
||||
ignored!(ig28, "./src", "/llvm/", "./src/llvm", true);
|
||||
ignored!(ig29, ROOT, "node_modules/ ", "node_modules", true);
|
||||
ignored!(ig30, ROOT, "**/", "foo/bar", true);
|
||||
ignored!(ig31, ROOT, "path1/*", "path1/foo");
|
||||
ignored!(ig32, ROOT, ".a/b", ".a/b");
|
||||
ignored!(ig33, "./", ".a/b", ".a/b");
|
||||
ignored!(ig34, ".", ".a/b", ".a/b");
|
||||
ignored!(ig35, "./.", ".a/b", ".a/b");
|
||||
ignored!(ig36, "././", ".a/b", ".a/b");
|
||||
ignored!(ig37, "././.", ".a/b", ".a/b");
|
||||
ignored!(ig38, ROOT, "\\[", "[");
|
||||
ignored!(ig39, ROOT, "\\?", "?");
|
||||
ignored!(ig40, ROOT, "\\*", "*");
|
||||
ignored!(ig41, ROOT, "\\a", "a");
|
||||
ignored!(ig42, ROOT, "s*.rs", "sfoo.rs");
|
||||
ignored!(ig43, ROOT, "**", "foo.rs");
|
||||
ignored!(ig44, ROOT, "**/**/*", "a/foo.rs");
|
||||
|
||||
not_ignored!(ignot1, ROOT, "amonths", "months");
|
||||
not_ignored!(ignot2, ROOT, "monthsa", "months");
|
||||
@@ -570,9 +719,16 @@ mod tests {
|
||||
not_ignored!(ignot12, ROOT, "\n\n\n", "foo");
|
||||
not_ignored!(ignot13, ROOT, "foo/**", "foo", true);
|
||||
not_ignored!(
|
||||
ignot14, "./third_party/protobuf", "m4/ltoptions.m4",
|
||||
"./third_party/protobuf/csharp/src/packages/repositories.config");
|
||||
ignot14,
|
||||
"./third_party/protobuf",
|
||||
"m4/ltoptions.m4",
|
||||
"./third_party/protobuf/csharp/src/packages/repositories.config"
|
||||
);
|
||||
not_ignored!(ignot15, ROOT, "!/bar", "foo/bar");
|
||||
not_ignored!(ignot16, ROOT, "*\n!**/", "foo", true);
|
||||
not_ignored!(ignot17, ROOT, "src/*.rs", "src/grep/src/main.rs");
|
||||
not_ignored!(ignot18, ROOT, "path1/*", "path2/path1/foo");
|
||||
not_ignored!(ignot19, ROOT, "s*.rs", "src/foo.rs");
|
||||
|
||||
fn bytes(s: &str) -> Vec<u8> {
|
||||
s.to_string().into_bytes()
|
||||
@@ -607,4 +763,24 @@ mod tests {
|
||||
fn regression_106() {
|
||||
gi_from_str("/", " ");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn case_insensitive() {
|
||||
let gi = GitignoreBuilder::new(ROOT)
|
||||
.case_insensitive(true)
|
||||
.unwrap()
|
||||
.add_str(None, "*.html")
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
assert!(gi.matched("foo.html", false).is_ignore());
|
||||
assert!(gi.matched("foo.HTML", false).is_ignore());
|
||||
assert!(!gi.matched("foo.htm", false).is_ignore());
|
||||
assert!(!gi.matched("foo.HTM", false).is_ignore());
|
||||
}
|
||||
|
||||
ignored!(cs1, ROOT, "*.html", "foo.html");
|
||||
not_ignored!(cs2, ROOT, "*.html", "foo.HTML");
|
||||
not_ignored!(cs3, ROOT, "*.html", "foo.htm");
|
||||
not_ignored!(cs4, ROOT, "*.html", "foo.HTM");
|
||||
}
|
||||
@@ -46,7 +46,7 @@ See the documentation for `WalkBuilder` for many other options.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
extern crate crossbeam;
|
||||
extern crate crossbeam_channel as channel;
|
||||
extern crate globset;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
@@ -54,22 +54,27 @@ extern crate lazy_static;
|
||||
extern crate log;
|
||||
extern crate memchr;
|
||||
extern crate regex;
|
||||
#[cfg(test)]
|
||||
extern crate tempdir;
|
||||
extern crate same_file;
|
||||
extern crate thread_local;
|
||||
extern crate walkdir;
|
||||
#[cfg(windows)]
|
||||
extern crate winapi_util;
|
||||
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
pub use walk::{DirEntry, Walk, WalkBuilder, WalkParallel, WalkState};
|
||||
pub use walk::{
|
||||
DirEntry, ParallelVisitor, ParallelVisitorBuilder, Walk, WalkBuilder,
|
||||
WalkParallel, WalkState,
|
||||
};
|
||||
|
||||
mod default_types;
|
||||
mod dir;
|
||||
pub mod gitignore;
|
||||
mod pathutil;
|
||||
pub mod overrides;
|
||||
mod pathutil;
|
||||
pub mod types;
|
||||
mod walk;
|
||||
|
||||
@@ -112,13 +117,55 @@ pub enum Error {
|
||||
/// An error that occurs when doing I/O, such as reading an ignore file.
|
||||
Io(io::Error),
|
||||
/// An error that occurs when trying to parse a glob.
|
||||
Glob(String),
|
||||
Glob {
|
||||
/// The original glob that caused this error. This glob, when
|
||||
/// available, always corresponds to the glob provided by an end user.
|
||||
/// e.g., It is the glob as written in a `.gitignore` file.
|
||||
///
|
||||
/// (This glob may be distinct from the glob that is actually
|
||||
/// compiled, after accounting for `gitignore` semantics.)
|
||||
glob: Option<String>,
|
||||
/// The underlying glob error as a string.
|
||||
err: String,
|
||||
},
|
||||
/// A type selection for a file type that is not defined.
|
||||
UnrecognizedFileType(String),
|
||||
/// A user specified file type definition could not be parsed.
|
||||
InvalidDefinition,
|
||||
}
|
||||
|
||||
impl Clone for Error {
|
||||
fn clone(&self) -> Error {
|
||||
match *self {
|
||||
Error::Partial(ref errs) => Error::Partial(errs.clone()),
|
||||
Error::WithLineNumber { line, ref err } => {
|
||||
Error::WithLineNumber { line: line, err: err.clone() }
|
||||
}
|
||||
Error::WithPath { ref path, ref err } => {
|
||||
Error::WithPath { path: path.clone(), err: err.clone() }
|
||||
}
|
||||
Error::WithDepth { depth, ref err } => {
|
||||
Error::WithDepth { depth: depth, err: err.clone() }
|
||||
}
|
||||
Error::Loop { ref ancestor, ref child } => Error::Loop {
|
||||
ancestor: ancestor.clone(),
|
||||
child: child.clone(),
|
||||
},
|
||||
Error::Io(ref err) => match err.raw_os_error() {
|
||||
Some(e) => Error::Io(io::Error::from_raw_os_error(e)),
|
||||
None => Error::Io(io::Error::new(err.kind(), err.to_string())),
|
||||
},
|
||||
Error::Glob { ref glob, ref err } => {
|
||||
Error::Glob { glob: glob.clone(), err: err.clone() }
|
||||
}
|
||||
Error::UnrecognizedFileType(ref err) => {
|
||||
Error::UnrecognizedFileType(err.clone())
|
||||
}
|
||||
Error::InvalidDefinition => Error::InvalidDefinition,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
/// Returns true if this is a partial error.
|
||||
///
|
||||
@@ -144,7 +191,7 @@ impl Error {
|
||||
Error::WithDepth { ref err, .. } => err.is_io(),
|
||||
Error::Loop { .. } => false,
|
||||
Error::Io(_) => true,
|
||||
Error::Glob(_) => false,
|
||||
Error::Glob { .. } => false,
|
||||
Error::UnrecognizedFileType(_) => false,
|
||||
Error::InvalidDefinition => false,
|
||||
}
|
||||
@@ -170,27 +217,43 @@ impl Error {
|
||||
|
||||
/// Turn an error into a tagged error with the given depth.
|
||||
fn with_depth(self, depth: usize) -> Error {
|
||||
Error::WithDepth {
|
||||
depth: depth,
|
||||
err: Box::new(self),
|
||||
}
|
||||
Error::WithDepth { depth: depth, err: Box::new(self) }
|
||||
}
|
||||
|
||||
/// Turn an error into a tagged error with the given file path and line
|
||||
/// number. If path is empty, then it is omitted from the error.
|
||||
fn tagged<P: AsRef<Path>>(self, path: P, lineno: u64) -> Error {
|
||||
let errline = Error::WithLineNumber {
|
||||
line: lineno,
|
||||
err: Box::new(self),
|
||||
};
|
||||
let errline =
|
||||
Error::WithLineNumber { line: lineno, err: Box::new(self) };
|
||||
if path.as_ref().as_os_str().is_empty() {
|
||||
return errline;
|
||||
}
|
||||
errline.with_path(path)
|
||||
}
|
||||
|
||||
/// Build an error from a walkdir error.
|
||||
fn from_walkdir(err: walkdir::Error) -> Error {
|
||||
let depth = err.depth();
|
||||
if let (Some(anc), Some(child)) = (err.loop_ancestor(), err.path()) {
|
||||
return Error::WithDepth {
|
||||
depth: depth,
|
||||
err: Box::new(Error::Loop {
|
||||
ancestor: anc.to_path_buf(),
|
||||
child: child.to_path_buf(),
|
||||
}),
|
||||
};
|
||||
}
|
||||
let path = err.path().map(|p| p.to_path_buf());
|
||||
let mut ig_err = Error::Io(io::Error::from(err));
|
||||
if let Some(path) = path {
|
||||
ig_err = Error::WithPath { path: path, err: Box::new(ig_err) };
|
||||
}
|
||||
ig_err
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for Error {
|
||||
#[allow(deprecated)]
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
Error::Partial(_) => "partial error",
|
||||
@@ -199,7 +262,7 @@ impl error::Error for Error {
|
||||
Error::WithDepth { ref err, .. } => err.description(),
|
||||
Error::Loop { .. } => "file system loop found",
|
||||
Error::Io(ref err) => err.description(),
|
||||
Error::Glob(ref msg) => msg,
|
||||
Error::Glob { ref err, .. } => err,
|
||||
Error::UnrecognizedFileType(_) => "unrecognized file type",
|
||||
Error::InvalidDefinition => "invalid definition",
|
||||
}
|
||||
@@ -221,20 +284,26 @@ impl fmt::Display for Error {
|
||||
write!(f, "{}: {}", path.display(), err)
|
||||
}
|
||||
Error::WithDepth { ref err, .. } => err.fmt(f),
|
||||
Error::Loop { ref ancestor, ref child } => {
|
||||
write!(f, "File system loop found: \
|
||||
Error::Loop { ref ancestor, ref child } => write!(
|
||||
f,
|
||||
"File system loop found: \
|
||||
{} points to an ancestor {}",
|
||||
child.display(), ancestor.display())
|
||||
}
|
||||
child.display(),
|
||||
ancestor.display()
|
||||
),
|
||||
Error::Io(ref err) => err.fmt(f),
|
||||
Error::Glob(ref msg) => write!(f, "{}", msg),
|
||||
Error::Glob { glob: None, ref err } => write!(f, "{}", err),
|
||||
Error::Glob { glob: Some(ref glob), ref err } => {
|
||||
write!(f, "error parsing glob '{}': {}", glob, err)
|
||||
}
|
||||
Error::UnrecognizedFileType(ref ty) => {
|
||||
write!(f, "unrecognized file type: {}", ty)
|
||||
}
|
||||
Error::InvalidDefinition => {
|
||||
write!(f, "invalid definition (format is type:glob, e.g., \
|
||||
html:*.html)")
|
||||
}
|
||||
Error::InvalidDefinition => write!(
|
||||
f,
|
||||
"invalid definition (format is type:glob, e.g., \
|
||||
html:*.html)"
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -245,30 +314,6 @@ impl From<io::Error> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<walkdir::Error> for Error {
|
||||
fn from(err: walkdir::Error) -> Error {
|
||||
let depth = err.depth();
|
||||
if let (Some(anc), Some(child)) = (err.loop_ancestor(), err.path()) {
|
||||
return Error::WithDepth {
|
||||
depth: depth,
|
||||
err: Box::new(Error::Loop {
|
||||
ancestor: anc.to_path_buf(),
|
||||
child: child.to_path_buf(),
|
||||
}),
|
||||
};
|
||||
}
|
||||
let path = err.path().map(|p| p.to_path_buf());
|
||||
let mut ig_err = Error::Io(io::Error::from(err));
|
||||
if let Some(path) = path {
|
||||
ig_err = Error::WithPath {
|
||||
path: path,
|
||||
err: Box::new(ig_err),
|
||||
};
|
||||
}
|
||||
ig_err
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct PartialErrorBuilder(Vec<Error>);
|
||||
|
||||
@@ -389,3 +434,66 @@ impl<T> Match<T> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::env;
|
||||
use std::error;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::result;
|
||||
|
||||
/// A convenient result type alias.
|
||||
pub type Result<T> =
|
||||
result::Result<T, Box<dyn error::Error + Send + Sync>>;
|
||||
|
||||
macro_rules! err {
|
||||
($($tt:tt)*) => {
|
||||
Box::<dyn error::Error + Send + Sync>::from(format!($($tt)*))
|
||||
}
|
||||
}
|
||||
|
||||
/// A simple wrapper for creating a temporary directory that is
|
||||
/// automatically deleted when it's dropped.
|
||||
///
|
||||
/// We use this in lieu of tempfile because tempfile brings in too many
|
||||
/// dependencies.
|
||||
#[derive(Debug)]
|
||||
pub struct TempDir(PathBuf);
|
||||
|
||||
impl Drop for TempDir {
|
||||
fn drop(&mut self) {
|
||||
fs::remove_dir_all(&self.0).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl TempDir {
|
||||
/// Create a new empty temporary directory under the system's configured
|
||||
/// temporary directory.
|
||||
pub fn new() -> Result<TempDir> {
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
static TRIES: usize = 100;
|
||||
static COUNTER: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
let tmpdir = env::temp_dir();
|
||||
for _ in 0..TRIES {
|
||||
let count = COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
let path = tmpdir.join("rust-ignore").join(count.to_string());
|
||||
if path.is_dir() {
|
||||
continue;
|
||||
}
|
||||
fs::create_dir_all(&path).map_err(|e| {
|
||||
err!("failed to create {}: {}", path.display(), e)
|
||||
})?;
|
||||
return Ok(TempDir(path));
|
||||
}
|
||||
Err(err!("failed to create temp dir after {} tries", TRIES))
|
||||
}
|
||||
|
||||
/// Return the underlying path to this temporary directory.
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -115,16 +115,14 @@ impl OverrideBuilder {
|
||||
///
|
||||
/// Matching is done relative to the directory path provided.
|
||||
pub fn new<P: AsRef<Path>>(path: P) -> OverrideBuilder {
|
||||
OverrideBuilder {
|
||||
builder: GitignoreBuilder::new(path),
|
||||
}
|
||||
OverrideBuilder { builder: GitignoreBuilder::new(path) }
|
||||
}
|
||||
|
||||
/// Builds a new override matcher from the globs added so far.
|
||||
///
|
||||
/// Once a matcher is built, no new globs can be added to it.
|
||||
pub fn build(&self) -> Result<Override, Error> {
|
||||
Ok(Override(try!(self.builder.build())))
|
||||
Ok(Override(self.builder.build()?))
|
||||
}
|
||||
|
||||
/// Add a glob to the set of overrides.
|
||||
@@ -134,7 +132,22 @@ impl OverrideBuilder {
|
||||
/// namely, `!` at the beginning of a glob will ignore a file. Without `!`,
|
||||
/// all matches of the glob provided are treated as whitelist matches.
|
||||
pub fn add(&mut self, glob: &str) -> Result<&mut OverrideBuilder, Error> {
|
||||
try!(self.builder.add_line(None, glob));
|
||||
self.builder.add_line(None, glob)?;
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Toggle whether the globs should be matched case insensitively or not.
|
||||
///
|
||||
/// When this option is changed, only globs added after the change will be affected.
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn case_insensitive(
|
||||
&mut self,
|
||||
yes: bool,
|
||||
) -> Result<&mut OverrideBuilder, Error> {
|
||||
// TODO: This should not return a `Result`. Fix this in the next semver
|
||||
// release.
|
||||
self.builder.case_insensitive(yes)?;
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
@@ -192,8 +205,9 @@ mod tests {
|
||||
#[test]
|
||||
fn gitignore() {
|
||||
let ov = ov(&["/foo", "bar/*.rs", "baz/**"]);
|
||||
assert!(ov.matched("bar/lib.rs", false).is_whitelist());
|
||||
assert!(ov.matched("bar/wat/lib.rs", false).is_ignore());
|
||||
assert!(ov.matched("wat/bar/lib.rs", false).is_whitelist());
|
||||
assert!(ov.matched("wat/bar/lib.rs", false).is_ignore());
|
||||
assert!(ov.matched("foo", false).is_whitelist());
|
||||
assert!(ov.matched("wat/foo", false).is_ignore());
|
||||
assert!(ov.matched("baz", false).is_ignore());
|
||||
@@ -220,4 +234,29 @@ mod tests {
|
||||
let ov = ov(&["!/bar"]);
|
||||
assert!(ov.matched("./foo/bar", false).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn case_insensitive() {
|
||||
let ov = OverrideBuilder::new(ROOT)
|
||||
.case_insensitive(true)
|
||||
.unwrap()
|
||||
.add("*.html")
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
assert!(ov.matched("foo.html", false).is_whitelist());
|
||||
assert!(ov.matched("foo.HTML", false).is_whitelist());
|
||||
assert!(ov.matched("foo.htm", false).is_ignore());
|
||||
assert!(ov.matched("foo.HTM", false).is_ignore());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_case_sensitive() {
|
||||
let ov =
|
||||
OverrideBuilder::new(ROOT).add("*.html").unwrap().build().unwrap();
|
||||
assert!(ov.matched("foo.html", false).is_whitelist());
|
||||
assert!(ov.matched("foo.HTML", false).is_ignore());
|
||||
assert!(ov.matched("foo.htm", false).is_ignore());
|
||||
assert!(ov.matched("foo.HTM", false).is_ignore());
|
||||
}
|
||||
}
|
||||
@@ -1,22 +1,56 @@
|
||||
use std::ffi::OsStr;
|
||||
use std::path::Path;
|
||||
|
||||
/// Returns true if and only if this file path is considered to be hidden.
|
||||
use walk::DirEntry;
|
||||
|
||||
/// Returns true if and only if this entry is considered to be hidden.
|
||||
///
|
||||
/// This only returns true if the base name of the path starts with a `.`.
|
||||
///
|
||||
/// On Unix, this implements a more optimized check.
|
||||
#[cfg(unix)]
|
||||
pub fn is_hidden<P: AsRef<Path>>(path: P) -> bool {
|
||||
pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
if let Some(name) = file_name(path.as_ref()) {
|
||||
if let Some(name) = file_name(dent.path()) {
|
||||
name.as_bytes().get(0) == Some(&b'.')
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if this file path is considered to be hidden.
|
||||
#[cfg(not(unix))]
|
||||
pub fn is_hidden<P: AsRef<Path>>(path: P) -> bool {
|
||||
if let Some(name) = file_name(path.as_ref()) {
|
||||
/// Returns true if and only if this entry is considered to be hidden.
|
||||
///
|
||||
/// On Windows, this returns true if one of the following is true:
|
||||
///
|
||||
/// * The base name of the path starts with a `.`.
|
||||
/// * The file attributes have the `HIDDEN` property set.
|
||||
#[cfg(windows)]
|
||||
pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
use std::os::windows::fs::MetadataExt;
|
||||
use winapi_util::file;
|
||||
|
||||
// This looks like we're doing an extra stat call, but on Windows, the
|
||||
// directory traverser reuses the metadata retrieved from each directory
|
||||
// entry and stores it on the DirEntry itself. So this is "free."
|
||||
if let Ok(md) = dent.metadata() {
|
||||
if file::is_hidden(md.file_attributes() as u64) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if let Some(name) = file_name(dent.path()) {
|
||||
name.to_str().map(|s| s.starts_with(".")).unwrap_or(false)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry is considered to be hidden.
|
||||
///
|
||||
/// This only returns true if the base name of the path starts with a `.`.
|
||||
#[cfg(not(any(unix, windows)))]
|
||||
pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
if let Some(name) = file_name(dent.path()) {
|
||||
name.to_str().map(|s| s.starts_with(".")).unwrap_or(false)
|
||||
} else {
|
||||
false
|
||||
@@ -57,8 +91,8 @@ pub fn strip_prefix<'a, P: AsRef<Path> + ?Sized>(
|
||||
/// the empty string.
|
||||
#[cfg(unix)]
|
||||
pub fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use memchr::memchr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
let path = path.as_ref().as_os_str().as_bytes();
|
||||
memchr(b'/', path).is_none()
|
||||
@@ -79,8 +113,8 @@ pub fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
|
||||
pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
path: &'a P,
|
||||
) -> Option<&'a OsStr> {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use memchr::memrchr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
let path = path.as_ref().as_os_str().as_bytes();
|
||||
if path.is_empty() {
|
||||
@@ -93,112 +93,10 @@ use globset::{GlobBuilder, GlobSet, GlobSetBuilder};
|
||||
use regex::Regex;
|
||||
use thread_local::ThreadLocal;
|
||||
|
||||
use default_types::DEFAULT_TYPES;
|
||||
use pathutil::file_name;
|
||||
use {Error, Match};
|
||||
|
||||
const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("agda", &["*.agda", "*.lagda"]),
|
||||
("asciidoc", &["*.adoc", "*.asc", "*.asciidoc"]),
|
||||
("asm", &["*.asm", "*.s", "*.S"]),
|
||||
("awk", &["*.awk"]),
|
||||
("c", &["*.c", "*.h", "*.H"]),
|
||||
("cbor", &["*.cbor"]),
|
||||
("ceylon", &["*.ceylon"]),
|
||||
("clojure", &["*.clj", "*.cljc", "*.cljs", "*.cljx"]),
|
||||
("cmake", &["*.cmake", "CMakeLists.txt"]),
|
||||
("coffeescript", &["*.coffee"]),
|
||||
("creole", &["*.creole"]),
|
||||
("config", &["*.config"]),
|
||||
("cpp", &[
|
||||
"*.C", "*.cc", "*.cpp", "*.cxx",
|
||||
"*.h", "*.H", "*.hh", "*.hpp",
|
||||
]),
|
||||
("crystal", &["Projectfile", "*.cr"]),
|
||||
("cs", &["*.cs"]),
|
||||
("csharp", &["*.cs"]),
|
||||
("css", &["*.css", "*.scss"]),
|
||||
("cython", &["*.pyx"]),
|
||||
("dart", &["*.dart"]),
|
||||
("d", &["*.d"]),
|
||||
("elisp", &["*.el"]),
|
||||
("elixir", &["*.ex", "*.eex", "*.exs"]),
|
||||
("erlang", &["*.erl", "*.hrl"]),
|
||||
("fish", &["*.fish"]),
|
||||
("fortran", &[
|
||||
"*.f", "*.F", "*.f77", "*.F77", "*.pfo",
|
||||
"*.f90", "*.F90", "*.f95", "*.F95",
|
||||
]),
|
||||
("fsharp", &["*.fs", "*.fsx", "*.fsi"]),
|
||||
("go", &["*.go"]),
|
||||
("groovy", &["*.groovy", "*.gradle"]),
|
||||
("h", &["*.h", "*.hpp"]),
|
||||
("hbs", &["*.hbs"]),
|
||||
("haskell", &["*.hs", "*.lhs"]),
|
||||
("html", &["*.htm", "*.html", "*.ejs"]),
|
||||
("java", &["*.java"]),
|
||||
("jinja", &["*.jinja", "*.jinja2"]),
|
||||
("js", &[
|
||||
"*.js", "*.jsx", "*.vue",
|
||||
]),
|
||||
("json", &["*.json"]),
|
||||
("jsonl", &["*.jsonl"]),
|
||||
("kotlin", &["*.kt", "*.kts"]),
|
||||
("less", &["*.less"]),
|
||||
("lisp", &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]),
|
||||
("log", &["*.log"]),
|
||||
("lua", &["*.lua"]),
|
||||
("m4", &["*.ac", "*.m4"]),
|
||||
("make", &["gnumakefile", "Gnumakefile", "makefile", "Makefile", "*.mk", "*.mak"]),
|
||||
("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
|
||||
("md", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
|
||||
("matlab", &["*.m"]),
|
||||
("mk", &["mkfile"]),
|
||||
("ml", &["*.ml"]),
|
||||
("nim", &["*.nim"]),
|
||||
("objc", &["*.h", "*.m"]),
|
||||
("objcpp", &["*.h", "*.mm"]),
|
||||
("ocaml", &["*.ml", "*.mli", "*.mll", "*.mly"]),
|
||||
("org", &["*.org"]),
|
||||
("perl", &["*.perl", "*.pl", "*.PL", "*.plh", "*.plx", "*.pm", "*.t"]),
|
||||
("pdf", &["*.pdf"]),
|
||||
("php", &["*.php", "*.php3", "*.php4", "*.php5", "*.phtml"]),
|
||||
("pod", &["*.pod"]),
|
||||
("ps", &["*.cdxml", "*.ps1", "*.ps1xml", "*.psd1", "*.psm1"]),
|
||||
("py", &["*.py"]),
|
||||
("readme", &["README*", "*README"]),
|
||||
("r", &["*.R", "*.r", "*.Rmd", "*.Rnw"]),
|
||||
("rdoc", &["*.rdoc"]),
|
||||
("rst", &["*.rst"]),
|
||||
("ruby", &["Gemfile", "*.gemspec", ".irbrc", "Rakefile", "*.rb"]),
|
||||
("rust", &["*.rs"]),
|
||||
("sass", &["*.sass", "*.scss"]),
|
||||
("scala", &["*.scala"]),
|
||||
("sh", &["*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh"]),
|
||||
("spark", &["*.spark"]),
|
||||
("stylus", &["*.styl"]),
|
||||
("sql", &["*.sql"]),
|
||||
("sv", &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]),
|
||||
("svg", &["*.svg"]),
|
||||
("swift", &["*.swift"]),
|
||||
("swig", &["*.def", "*.i"]),
|
||||
("taskpaper", &["*.taskpaper"]),
|
||||
("tcl", &["*.tcl"]),
|
||||
("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib"]),
|
||||
("textile", &["*.textile"]),
|
||||
("ts", &["*.ts", "*.tsx"]),
|
||||
("txt", &["*.txt"]),
|
||||
("toml", &["*.toml", "Cargo.lock"]),
|
||||
("twig", &["*.twig"]),
|
||||
("vala", &["*.vala"]),
|
||||
("vb", &["*.vb"]),
|
||||
("vimscript", &["*.vim"]),
|
||||
("wiki", &["*.mediawiki", "*.wiki"]),
|
||||
("xml", &["*.xml"]),
|
||||
("yacc", &["*.y"]),
|
||||
("yaml", &["*.yaml", "*.yml"]),
|
||||
("zsh", &["zshenv", ".zshenv", "zprofile", ".zprofile", "zshrc", ".zshrc", "zlogin", ".zlogin", "zlogout", ".zlogout", "*.zsh"]),
|
||||
];
|
||||
|
||||
/// Glob represents a single glob in a set of file type definitions.
|
||||
///
|
||||
/// There may be more than one glob for a particular file type.
|
||||
@@ -228,13 +126,23 @@ enum GlobInner<'a> {
|
||||
which: usize,
|
||||
/// Whether the selection was negated or not.
|
||||
negated: bool,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
impl<'a> Glob<'a> {
|
||||
fn unmatched() -> Glob<'a> {
|
||||
Glob(GlobInner::UnmatchedIgnore)
|
||||
}
|
||||
|
||||
/// Return the file type defintion that matched, if one exists. A file type
|
||||
/// definition always exists when a specific definition matches a file
|
||||
/// path.
|
||||
pub fn file_type_def(&self) -> Option<&FileTypeDef> {
|
||||
match self {
|
||||
Glob(GlobInner::UnmatchedIgnore) => None,
|
||||
Glob(GlobInner::Matched { def, .. }) => Some(def),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A single file type definition.
|
||||
@@ -379,7 +287,7 @@ impl Types {
|
||||
return Match::None;
|
||||
}
|
||||
};
|
||||
let mut matches = self.matches.get_default().borrow_mut();
|
||||
let mut matches = self.matches.get_or_default().borrow_mut();
|
||||
self.set.matches_into(name, &mut *matches);
|
||||
// The highest precedent match is the last one.
|
||||
if let Some(&i) = matches.last() {
|
||||
@@ -418,10 +326,7 @@ impl TypesBuilder {
|
||||
/// of default type definitions can be added with `add_defaults`, and
|
||||
/// additional type definitions can be added with `select` and `negate`.
|
||||
pub fn new() -> TypesBuilder {
|
||||
TypesBuilder {
|
||||
types: HashMap::new(),
|
||||
selections: vec![],
|
||||
}
|
||||
TypesBuilder { types: HashMap::new(), selections: vec![] }
|
||||
}
|
||||
|
||||
/// Build the current set of file type definitions *and* selections into
|
||||
@@ -442,18 +347,22 @@ impl TypesBuilder {
|
||||
}
|
||||
};
|
||||
for (iglob, glob) in def.globs.iter().enumerate() {
|
||||
build_set.add(try!(
|
||||
build_set.add(
|
||||
GlobBuilder::new(glob)
|
||||
.literal_separator(true)
|
||||
.build()
|
||||
.map_err(|err| Error::Glob(err.to_string()))));
|
||||
.map_err(|err| Error::Glob {
|
||||
glob: Some(glob.to_string()),
|
||||
err: err.kind().to_string(),
|
||||
})?,
|
||||
);
|
||||
glob_to_selection.push((isel, iglob));
|
||||
}
|
||||
selections.push(selection.clone().map(move |_| def));
|
||||
}
|
||||
let set = try!(build_set.build().map_err(|err| {
|
||||
Error::Glob(err.to_string())
|
||||
}));
|
||||
let set = build_set
|
||||
.build()
|
||||
.map_err(|err| Error::Glob { glob: None, err: err.to_string() })?;
|
||||
Ok(Types {
|
||||
defs: defs,
|
||||
selections: selections,
|
||||
@@ -525,9 +434,14 @@ impl TypesBuilder {
|
||||
return Err(Error::InvalidDefinition);
|
||||
}
|
||||
let (key, glob) = (name.to_string(), glob.to_string());
|
||||
self.types.entry(key).or_insert_with(|| {
|
||||
FileTypeDef { name: name.to_string(), globs: vec![] }
|
||||
}).globs.push(glob);
|
||||
self.types
|
||||
.entry(key)
|
||||
.or_insert_with(|| FileTypeDef {
|
||||
name: name.to_string(),
|
||||
globs: vec![],
|
||||
})
|
||||
.globs
|
||||
.push(glob);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -554,7 +468,10 @@ impl TypesBuilder {
|
||||
3 => {
|
||||
let name = parts[0];
|
||||
let types_string = parts[2];
|
||||
if name.is_empty() || parts[1] != "include" || types_string.is_empty() {
|
||||
if name.is_empty()
|
||||
|| parts[1] != "include"
|
||||
|| types_string.is_empty()
|
||||
{
|
||||
return Err(Error::InvalidDefinition);
|
||||
}
|
||||
let types = types_string.split(',');
|
||||
@@ -564,14 +481,15 @@ impl TypesBuilder {
|
||||
return Err(Error::InvalidDefinition);
|
||||
}
|
||||
for type_name in types {
|
||||
let globs = self.types.get(type_name).unwrap().globs.clone();
|
||||
let globs =
|
||||
self.types.get(type_name).unwrap().globs.clone();
|
||||
for glob in globs {
|
||||
try!(self.add(name, &glob));
|
||||
self.add(name, &glob)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
_ => Err(Error::InvalidDefinition)
|
||||
_ => Err(Error::InvalidDefinition),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -628,7 +546,7 @@ mod tests {
|
||||
"rust:*.rs",
|
||||
"js:*.js",
|
||||
"foo:*.{rs,foo}",
|
||||
"combo:include:html,rust"
|
||||
"combo:include:html,rust",
|
||||
]
|
||||
}
|
||||
|
||||
@@ -662,7 +580,7 @@ mod tests {
|
||||
"combo:include:html,python",
|
||||
// Bad format
|
||||
"combo:foobar:html,rust",
|
||||
""
|
||||
"",
|
||||
];
|
||||
for def in bad_defs {
|
||||
assert!(btypes.add_def(def).is_err());
|
||||
2164
crates/ignore/src/walk.rs
Normal file
2164
crates/ignore/src/walk.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,216 @@
|
||||
# Based on https://github.com/behnam/gitignore-test/blob/master/.gitignore
|
||||
|
||||
### file in root
|
||||
|
||||
# MATCH /file_root_1
|
||||
file_root_00
|
||||
|
||||
# NO_MATCH
|
||||
file_root_01/
|
||||
|
||||
# NO_MATCH
|
||||
file_root_02/*
|
||||
|
||||
# NO_MATCH
|
||||
file_root_03/**
|
||||
|
||||
|
||||
# MATCH /file_root_10
|
||||
/file_root_10
|
||||
|
||||
# NO_MATCH
|
||||
/file_root_11/
|
||||
|
||||
# NO_MATCH
|
||||
/file_root_12/*
|
||||
|
||||
# NO_MATCH
|
||||
/file_root_13/**
|
||||
|
||||
|
||||
# NO_MATCH
|
||||
*/file_root_20
|
||||
|
||||
# NO_MATCH
|
||||
*/file_root_21/
|
||||
|
||||
# NO_MATCH
|
||||
*/file_root_22/*
|
||||
|
||||
# NO_MATCH
|
||||
*/file_root_23/**
|
||||
|
||||
|
||||
# MATCH /file_root_30
|
||||
**/file_root_30
|
||||
|
||||
# NO_MATCH
|
||||
**/file_root_31/
|
||||
|
||||
# NO_MATCH
|
||||
**/file_root_32/*
|
||||
|
||||
# NO_MATCH
|
||||
**/file_root_33/**
|
||||
|
||||
|
||||
### file in sub-dir
|
||||
|
||||
# MATCH /parent_dir/file_deep_1
|
||||
file_deep_00
|
||||
|
||||
# NO_MATCH
|
||||
file_deep_01/
|
||||
|
||||
# NO_MATCH
|
||||
file_deep_02/*
|
||||
|
||||
# NO_MATCH
|
||||
file_deep_03/**
|
||||
|
||||
|
||||
# NO_MATCH
|
||||
/file_deep_10
|
||||
|
||||
# NO_MATCH
|
||||
/file_deep_11/
|
||||
|
||||
# NO_MATCH
|
||||
/file_deep_12/*
|
||||
|
||||
# NO_MATCH
|
||||
/file_deep_13/**
|
||||
|
||||
|
||||
# MATCH /parent_dir/file_deep_20
|
||||
*/file_deep_20
|
||||
|
||||
# NO_MATCH
|
||||
*/file_deep_21/
|
||||
|
||||
# NO_MATCH
|
||||
*/file_deep_22/*
|
||||
|
||||
# NO_MATCH
|
||||
*/file_deep_23/**
|
||||
|
||||
|
||||
# MATCH /parent_dir/file_deep_30
|
||||
**/file_deep_30
|
||||
|
||||
# NO_MATCH
|
||||
**/file_deep_31/
|
||||
|
||||
# NO_MATCH
|
||||
**/file_deep_32/*
|
||||
|
||||
# NO_MATCH
|
||||
**/file_deep_33/**
|
||||
|
||||
|
||||
### dir in root
|
||||
|
||||
# MATCH /dir_root_00
|
||||
dir_root_00
|
||||
|
||||
# MATCH /dir_root_01
|
||||
dir_root_01/
|
||||
|
||||
# MATCH /dir_root_02
|
||||
dir_root_02/*
|
||||
|
||||
# MATCH /dir_root_03
|
||||
dir_root_03/**
|
||||
|
||||
|
||||
# MATCH /dir_root_10
|
||||
/dir_root_10
|
||||
|
||||
# MATCH /dir_root_11
|
||||
/dir_root_11/
|
||||
|
||||
# MATCH /dir_root_12
|
||||
/dir_root_12/*
|
||||
|
||||
# MATCH /dir_root_13
|
||||
/dir_root_13/**
|
||||
|
||||
|
||||
# NO_MATCH
|
||||
*/dir_root_20
|
||||
|
||||
# NO_MATCH
|
||||
*/dir_root_21/
|
||||
|
||||
# NO_MATCH
|
||||
*/dir_root_22/*
|
||||
|
||||
# NO_MATCH
|
||||
*/dir_root_23/**
|
||||
|
||||
|
||||
# MATCH /dir_root_30
|
||||
**/dir_root_30
|
||||
|
||||
# MATCH /dir_root_31
|
||||
**/dir_root_31/
|
||||
|
||||
# MATCH /dir_root_32
|
||||
**/dir_root_32/*
|
||||
|
||||
# MATCH /dir_root_33
|
||||
**/dir_root_33/**
|
||||
|
||||
|
||||
### dir in sub-dir
|
||||
|
||||
# MATCH /parent_dir/dir_deep_00
|
||||
dir_deep_00
|
||||
|
||||
# MATCH /parent_dir/dir_deep_01
|
||||
dir_deep_01/
|
||||
|
||||
# NO_MATCH
|
||||
dir_deep_02/*
|
||||
|
||||
# NO_MATCH
|
||||
dir_deep_03/**
|
||||
|
||||
|
||||
# NO_MATCH
|
||||
/dir_deep_10
|
||||
|
||||
# NO_MATCH
|
||||
/dir_deep_11/
|
||||
|
||||
# NO_MATCH
|
||||
/dir_deep_12/*
|
||||
|
||||
# NO_MATCH
|
||||
/dir_deep_13/**
|
||||
|
||||
|
||||
# MATCH /parent_dir/dir_deep_20
|
||||
*/dir_deep_20
|
||||
|
||||
# MATCH /parent_dir/dir_deep_21
|
||||
*/dir_deep_21/
|
||||
|
||||
# MATCH /parent_dir/dir_deep_22
|
||||
*/dir_deep_22/*
|
||||
|
||||
# MATCH /parent_dir/dir_deep_23
|
||||
*/dir_deep_23/**
|
||||
|
||||
|
||||
# MATCH /parent_dir/dir_deep_30
|
||||
**/dir_deep_30
|
||||
|
||||
# MATCH /parent_dir/dir_deep_31
|
||||
**/dir_deep_31/
|
||||
|
||||
# MATCH /parent_dir/dir_deep_32
|
||||
**/dir_deep_32/*
|
||||
|
||||
# MATCH /parent_dir/dir_deep_33
|
||||
**/dir_deep_33/**
|
||||
@@ -0,0 +1,300 @@
|
||||
extern crate ignore;
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use ignore::gitignore::{Gitignore, GitignoreBuilder};
|
||||
|
||||
const IGNORE_FILE: &'static str =
|
||||
"tests/gitignore_matched_path_or_any_parents_tests.gitignore";
|
||||
|
||||
fn get_gitignore() -> Gitignore {
|
||||
let mut builder = GitignoreBuilder::new("ROOT");
|
||||
let error = builder.add(IGNORE_FILE);
|
||||
assert!(error.is_none(), "failed to open gitignore file");
|
||||
builder.build().unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "path is expected to be under the root")]
|
||||
fn test_path_should_be_under_root() {
|
||||
let gitignore = get_gitignore();
|
||||
let path = "/tmp/some_file";
|
||||
gitignore.matched_path_or_any_parents(Path::new(path), false);
|
||||
assert!(false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_files_in_root() {
|
||||
let gitignore = get_gitignore();
|
||||
let m = |path: &str| {
|
||||
gitignore.matched_path_or_any_parents(Path::new(path), false)
|
||||
};
|
||||
|
||||
// 0x
|
||||
assert!(m("ROOT/file_root_00").is_ignore());
|
||||
assert!(m("ROOT/file_root_01").is_none());
|
||||
assert!(m("ROOT/file_root_02").is_none());
|
||||
assert!(m("ROOT/file_root_03").is_none());
|
||||
|
||||
// 1x
|
||||
assert!(m("ROOT/file_root_10").is_ignore());
|
||||
assert!(m("ROOT/file_root_11").is_none());
|
||||
assert!(m("ROOT/file_root_12").is_none());
|
||||
assert!(m("ROOT/file_root_13").is_none());
|
||||
|
||||
// 2x
|
||||
assert!(m("ROOT/file_root_20").is_none());
|
||||
assert!(m("ROOT/file_root_21").is_none());
|
||||
assert!(m("ROOT/file_root_22").is_none());
|
||||
assert!(m("ROOT/file_root_23").is_none());
|
||||
|
||||
// 3x
|
||||
assert!(m("ROOT/file_root_30").is_ignore());
|
||||
assert!(m("ROOT/file_root_31").is_none());
|
||||
assert!(m("ROOT/file_root_32").is_none());
|
||||
assert!(m("ROOT/file_root_33").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_files_in_deep() {
|
||||
let gitignore = get_gitignore();
|
||||
let m = |path: &str| {
|
||||
gitignore.matched_path_or_any_parents(Path::new(path), false)
|
||||
};
|
||||
|
||||
// 0x
|
||||
assert!(m("ROOT/parent_dir/file_deep_00").is_ignore());
|
||||
assert!(m("ROOT/parent_dir/file_deep_01").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_02").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_03").is_none());
|
||||
|
||||
// 1x
|
||||
assert!(m("ROOT/parent_dir/file_deep_10").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_11").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_12").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_13").is_none());
|
||||
|
||||
// 2x
|
||||
assert!(m("ROOT/parent_dir/file_deep_20").is_ignore());
|
||||
assert!(m("ROOT/parent_dir/file_deep_21").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_22").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_23").is_none());
|
||||
|
||||
// 3x
|
||||
assert!(m("ROOT/parent_dir/file_deep_30").is_ignore());
|
||||
assert!(m("ROOT/parent_dir/file_deep_31").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_32").is_none());
|
||||
assert!(m("ROOT/parent_dir/file_deep_33").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dirs_in_root() {
|
||||
let gitignore = get_gitignore();
|
||||
let m = |path: &str, is_dir: bool| {
|
||||
gitignore.matched_path_or_any_parents(Path::new(path), is_dir)
|
||||
};
|
||||
|
||||
// 00
|
||||
assert!(m("ROOT/dir_root_00", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_00/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_00/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_00/child_dir/file", false).is_ignore());
|
||||
|
||||
// 01
|
||||
assert!(m("ROOT/dir_root_01", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_01/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_01/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_01/child_dir/file", false).is_ignore());
|
||||
|
||||
// 02
|
||||
assert!(m("ROOT/dir_root_02", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/dir_root_02/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_02/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_02/child_dir/file", false).is_ignore());
|
||||
|
||||
// 03
|
||||
assert!(m("ROOT/dir_root_03", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/dir_root_03/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_03/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_03/child_dir/file", false).is_ignore());
|
||||
|
||||
// 10
|
||||
assert!(m("ROOT/dir_root_10", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_10/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_10/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_10/child_dir/file", false).is_ignore());
|
||||
|
||||
// 11
|
||||
assert!(m("ROOT/dir_root_11", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_11/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_11/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_11/child_dir/file", false).is_ignore());
|
||||
|
||||
// 12
|
||||
assert!(m("ROOT/dir_root_12", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/dir_root_12/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_12/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_12/child_dir/file", false).is_ignore());
|
||||
|
||||
// 13
|
||||
assert!(m("ROOT/dir_root_13", true).is_none());
|
||||
assert!(m("ROOT/dir_root_13/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_13/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_13/child_dir/file", false).is_ignore());
|
||||
|
||||
// 20
|
||||
assert!(m("ROOT/dir_root_20", true).is_none());
|
||||
assert!(m("ROOT/dir_root_20/file", false).is_none());
|
||||
assert!(m("ROOT/dir_root_20/child_dir", true).is_none());
|
||||
assert!(m("ROOT/dir_root_20/child_dir/file", false).is_none());
|
||||
|
||||
// 21
|
||||
assert!(m("ROOT/dir_root_21", true).is_none());
|
||||
assert!(m("ROOT/dir_root_21/file", false).is_none());
|
||||
assert!(m("ROOT/dir_root_21/child_dir", true).is_none());
|
||||
assert!(m("ROOT/dir_root_21/child_dir/file", false).is_none());
|
||||
|
||||
// 22
|
||||
assert!(m("ROOT/dir_root_22", true).is_none());
|
||||
assert!(m("ROOT/dir_root_22/file", false).is_none());
|
||||
assert!(m("ROOT/dir_root_22/child_dir", true).is_none());
|
||||
assert!(m("ROOT/dir_root_22/child_dir/file", false).is_none());
|
||||
|
||||
// 23
|
||||
assert!(m("ROOT/dir_root_23", true).is_none());
|
||||
assert!(m("ROOT/dir_root_23/file", false).is_none());
|
||||
assert!(m("ROOT/dir_root_23/child_dir", true).is_none());
|
||||
assert!(m("ROOT/dir_root_23/child_dir/file", false).is_none());
|
||||
|
||||
// 30
|
||||
assert!(m("ROOT/dir_root_30", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_30/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_30/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_30/child_dir/file", false).is_ignore());
|
||||
|
||||
// 31
|
||||
assert!(m("ROOT/dir_root_31", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_31/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_31/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_31/child_dir/file", false).is_ignore());
|
||||
|
||||
// 32
|
||||
assert!(m("ROOT/dir_root_32", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/dir_root_32/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_32/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_32/child_dir/file", false).is_ignore());
|
||||
|
||||
// 33
|
||||
assert!(m("ROOT/dir_root_33", true).is_none()); // dir itself doesn't match
|
||||
assert!(m("ROOT/dir_root_33/file", false).is_ignore());
|
||||
assert!(m("ROOT/dir_root_33/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/dir_root_33/child_dir/file", false).is_ignore());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dirs_in_deep() {
|
||||
let gitignore = get_gitignore();
|
||||
let m = |path: &str, is_dir: bool| {
|
||||
gitignore.matched_path_or_any_parents(Path::new(path), is_dir)
|
||||
};
|
||||
|
||||
// 00
|
||||
assert!(m("ROOT/parent_dir/dir_deep_00", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_00/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_00/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_00/child_dir/file", false).is_ignore());
|
||||
|
||||
// 01
|
||||
assert!(m("ROOT/parent_dir/dir_deep_01", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_01/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_01/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_01/child_dir/file", false).is_ignore());
|
||||
|
||||
// 02
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_02/child_dir/file", false).is_none());
|
||||
|
||||
// 03
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_03/child_dir/file", false).is_none());
|
||||
|
||||
// 10
|
||||
assert!(m("ROOT/parent_dir/dir_deep_10", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_10/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_10/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_10/child_dir/file", false).is_none());
|
||||
|
||||
// 11
|
||||
assert!(m("ROOT/parent_dir/dir_deep_11", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_11/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_11/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_11/child_dir/file", false).is_none());
|
||||
|
||||
// 12
|
||||
assert!(m("ROOT/parent_dir/dir_deep_12", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_12/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_12/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_12/child_dir/file", false).is_none());
|
||||
|
||||
// 13
|
||||
assert!(m("ROOT/parent_dir/dir_deep_13", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_13/file", false).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_13/child_dir", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_13/child_dir/file", false).is_none());
|
||||
|
||||
// 20
|
||||
assert!(m("ROOT/parent_dir/dir_deep_20", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_20/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_20/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_20/child_dir/file", false).is_ignore());
|
||||
|
||||
// 21
|
||||
assert!(m("ROOT/parent_dir/dir_deep_21", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_21/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_21/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_21/child_dir/file", false).is_ignore());
|
||||
|
||||
// 22
|
||||
// dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_22", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_22/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_22/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_22/child_dir/file", false).is_ignore());
|
||||
|
||||
// 23
|
||||
// dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_23", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_23/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_23/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_23/child_dir/file", false).is_ignore());
|
||||
|
||||
// 30
|
||||
assert!(m("ROOT/parent_dir/dir_deep_30", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_30/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_30/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_30/child_dir/file", false).is_ignore());
|
||||
|
||||
// 31
|
||||
assert!(m("ROOT/parent_dir/dir_deep_31", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_31/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_31/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_31/child_dir/file", false).is_ignore());
|
||||
|
||||
// 32
|
||||
// dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_32", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_32/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_32/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_32/child_dir/file", false).is_ignore());
|
||||
|
||||
// 33
|
||||
// dir itself doesn't match
|
||||
assert!(m("ROOT/parent_dir/dir_deep_33", true).is_none());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_33/file", false).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_33/child_dir", true).is_ignore());
|
||||
assert!(m("ROOT/parent_dir/dir_deep_33/child_dir/file", false).is_ignore());
|
||||
}
|
||||
24
crates/matcher/Cargo.toml
Normal file
24
crates/matcher/Cargo.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "grep-matcher"
|
||||
version = "0.1.4" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
A trait for regular expressions, with a focus on line oriented search.
|
||||
"""
|
||||
documentation = "https://docs.rs/grep-matcher"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/matcher"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/matcher"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "pattern", "trait"]
|
||||
license = "Unlicense/MIT"
|
||||
autotests = false
|
||||
|
||||
[dependencies]
|
||||
memchr = "2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
regex = "1.1"
|
||||
|
||||
[[test]]
|
||||
name = "integration"
|
||||
path = "tests/tests.rs"
|
||||
21
crates/matcher/LICENSE-MIT
Normal file
21
crates/matcher/LICENSE-MIT
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Andrew Gallant
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
36
crates/matcher/README.md
Normal file
36
crates/matcher/README.md
Normal file
@@ -0,0 +1,36 @@
|
||||
grep-matcher
|
||||
------------
|
||||
This crate provides a low level interface for describing regular expression
|
||||
matchers. The `grep` crate uses this interface in order to make the regex
|
||||
engine it uses pluggable.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://crates.io/crates/grep-matcher)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
|
||||
### Documentation
|
||||
|
||||
[https://docs.rs/grep-matcher](https://docs.rs/grep-matcher)
|
||||
|
||||
**NOTE:** You probably don't want to use this crate directly. Instead, you
|
||||
should prefer the facade defined in the
|
||||
[`grep`](https://docs.rs/grep)
|
||||
crate.
|
||||
|
||||
|
||||
### Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
grep-matcher = "0.1"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
```rust
|
||||
extern crate grep_matcher;
|
||||
```
|
||||
24
crates/matcher/UNLICENSE
Normal file
24
crates/matcher/UNLICENSE
Normal file
@@ -0,0 +1,24 @@
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <http://unlicense.org/>
|
||||
328
crates/matcher/src/interpolate.rs
Normal file
328
crates/matcher/src/interpolate.rs
Normal file
@@ -0,0 +1,328 @@
|
||||
use std::str;
|
||||
|
||||
use memchr::memchr;
|
||||
|
||||
/// Interpolate capture references in `replacement` and write the interpolation
|
||||
/// result to `dst`. References in `replacement` take the form of $N or $name,
|
||||
/// where `N` is a capture group index and `name` is a capture group name. The
|
||||
/// function provided, `name_to_index`, maps capture group names to indices.
|
||||
///
|
||||
/// The `append` function given is responsible for writing the replacement
|
||||
/// to the `dst` buffer. That is, it is called with the capture group index
|
||||
/// of a capture group reference and is expected to resolve the index to its
|
||||
/// corresponding matched text. If no such match exists, then `append` should
|
||||
/// not write anything to its given buffer.
|
||||
pub fn interpolate<A, N>(
|
||||
mut replacement: &[u8],
|
||||
mut append: A,
|
||||
mut name_to_index: N,
|
||||
dst: &mut Vec<u8>,
|
||||
) where
|
||||
A: FnMut(usize, &mut Vec<u8>),
|
||||
N: FnMut(&str) -> Option<usize>,
|
||||
{
|
||||
while !replacement.is_empty() {
|
||||
match memchr(b'$', replacement) {
|
||||
None => break,
|
||||
Some(i) => {
|
||||
dst.extend(&replacement[..i]);
|
||||
replacement = &replacement[i..];
|
||||
}
|
||||
}
|
||||
if replacement.get(1).map_or(false, |&b| b == b'$') {
|
||||
dst.push(b'$');
|
||||
replacement = &replacement[2..];
|
||||
continue;
|
||||
}
|
||||
debug_assert!(!replacement.is_empty());
|
||||
let cap_ref = match find_cap_ref(replacement) {
|
||||
Some(cap_ref) => cap_ref,
|
||||
None => {
|
||||
dst.push(b'$');
|
||||
replacement = &replacement[1..];
|
||||
continue;
|
||||
}
|
||||
};
|
||||
replacement = &replacement[cap_ref.end..];
|
||||
match cap_ref.cap {
|
||||
Ref::Number(i) => append(i, dst),
|
||||
Ref::Named(name) => {
|
||||
if let Some(i) = name_to_index(name) {
|
||||
append(i, dst);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
dst.extend(replacement);
|
||||
}
|
||||
|
||||
/// `CaptureRef` represents a reference to a capture group inside some text.
|
||||
/// The reference is either a capture group name or a number.
|
||||
///
|
||||
/// It is also tagged with the position in the text immediately proceding the
|
||||
/// capture reference.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
struct CaptureRef<'a> {
|
||||
cap: Ref<'a>,
|
||||
end: usize,
|
||||
}
|
||||
|
||||
/// A reference to a capture group in some text.
|
||||
///
|
||||
/// e.g., `$2`, `$foo`, `${foo}`.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
enum Ref<'a> {
|
||||
Named(&'a str),
|
||||
Number(usize),
|
||||
}
|
||||
|
||||
impl<'a> From<&'a str> for Ref<'a> {
|
||||
fn from(x: &'a str) -> Ref<'a> {
|
||||
Ref::Named(x)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<usize> for Ref<'static> {
|
||||
fn from(x: usize) -> Ref<'static> {
|
||||
Ref::Number(x)
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses a possible reference to a capture group name in the given text,
|
||||
/// starting at the beginning of `replacement`.
|
||||
///
|
||||
/// If no such valid reference could be found, None is returned.
|
||||
fn find_cap_ref(replacement: &[u8]) -> Option<CaptureRef> {
|
||||
let mut i = 0;
|
||||
if replacement.len() <= 1 || replacement[0] != b'$' {
|
||||
return None;
|
||||
}
|
||||
let mut brace = false;
|
||||
i += 1;
|
||||
if replacement[i] == b'{' {
|
||||
brace = true;
|
||||
i += 1;
|
||||
}
|
||||
let mut cap_end = i;
|
||||
while replacement.get(cap_end).map_or(false, is_valid_cap_letter) {
|
||||
cap_end += 1;
|
||||
}
|
||||
if cap_end == i {
|
||||
return None;
|
||||
}
|
||||
// We just verified that the range 0..cap_end is valid ASCII, so it must
|
||||
// therefore be valid UTF-8. If we really cared, we could avoid this UTF-8
|
||||
// check with an unchecked conversion or by parsing the number straight
|
||||
// from &[u8].
|
||||
let cap = str::from_utf8(&replacement[i..cap_end])
|
||||
.expect("valid UTF-8 capture name");
|
||||
if brace {
|
||||
if !replacement.get(cap_end).map_or(false, |&b| b == b'}') {
|
||||
return None;
|
||||
}
|
||||
cap_end += 1;
|
||||
}
|
||||
Some(CaptureRef {
|
||||
cap: match cap.parse::<u32>() {
|
||||
Ok(i) => Ref::Number(i as usize),
|
||||
Err(_) => Ref::Named(cap),
|
||||
},
|
||||
end: cap_end,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given byte is allowed in a capture name.
|
||||
fn is_valid_cap_letter(b: &u8) -> bool {
|
||||
match *b {
|
||||
b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' | b'_' => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{find_cap_ref, interpolate, CaptureRef};
|
||||
|
||||
macro_rules! find {
|
||||
($name:ident, $text:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
assert_eq!(None, find_cap_ref($text.as_bytes()));
|
||||
}
|
||||
};
|
||||
($name:ident, $text:expr, $capref:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
assert_eq!(Some($capref), find_cap_ref($text.as_bytes()));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! c {
|
||||
($name_or_number:expr, $pos:expr) => {
|
||||
CaptureRef { cap: $name_or_number.into(), end: $pos }
|
||||
};
|
||||
}
|
||||
|
||||
find!(find_cap_ref1, "$foo", c!("foo", 4));
|
||||
find!(find_cap_ref2, "${foo}", c!("foo", 6));
|
||||
find!(find_cap_ref3, "$0", c!(0, 2));
|
||||
find!(find_cap_ref4, "$5", c!(5, 2));
|
||||
find!(find_cap_ref5, "$10", c!(10, 3));
|
||||
find!(find_cap_ref6, "$42a", c!("42a", 4));
|
||||
find!(find_cap_ref7, "${42}a", c!(42, 5));
|
||||
find!(find_cap_ref8, "${42");
|
||||
find!(find_cap_ref9, "${42 ");
|
||||
find!(find_cap_ref10, " $0 ");
|
||||
find!(find_cap_ref11, "$");
|
||||
find!(find_cap_ref12, " ");
|
||||
find!(find_cap_ref13, "");
|
||||
|
||||
// A convenience routine for using interpolate's unwieldy but flexible API.
|
||||
fn interpolate_string(
|
||||
mut name_to_index: Vec<(&'static str, usize)>,
|
||||
caps: Vec<&'static str>,
|
||||
replacement: &str,
|
||||
) -> String {
|
||||
name_to_index.sort_by_key(|x| x.0);
|
||||
|
||||
let mut dst = vec![];
|
||||
interpolate(
|
||||
replacement.as_bytes(),
|
||||
|i, dst| {
|
||||
if let Some(&s) = caps.get(i) {
|
||||
dst.extend(s.as_bytes());
|
||||
}
|
||||
},
|
||||
|name| -> Option<usize> {
|
||||
name_to_index
|
||||
.binary_search_by_key(&name, |x| x.0)
|
||||
.ok()
|
||||
.map(|i| name_to_index[i].1)
|
||||
},
|
||||
&mut dst,
|
||||
);
|
||||
String::from_utf8(dst).unwrap()
|
||||
}
|
||||
|
||||
macro_rules! interp {
|
||||
($name:ident, $map:expr, $caps:expr, $hay:expr, $expected:expr $(,)*) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
assert_eq!($expected, interpolate_string($map, $caps, $hay));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
interp!(
|
||||
interp1,
|
||||
vec![("foo", 2)],
|
||||
vec!["", "", "xxx"],
|
||||
"test $foo test",
|
||||
"test xxx test",
|
||||
);
|
||||
|
||||
interp!(
|
||||
interp2,
|
||||
vec![("foo", 2)],
|
||||
vec!["", "", "xxx"],
|
||||
"test$footest",
|
||||
"test",
|
||||
);
|
||||
|
||||
interp!(
|
||||
interp3,
|
||||
vec![("foo", 2)],
|
||||
vec!["", "", "xxx"],
|
||||
"test${foo}test",
|
||||
"testxxxtest",
|
||||
);
|
||||
|
||||
interp!(
|
||||
interp4,
|
||||
vec![("foo", 2)],
|
||||
vec!["", "", "xxx"],
|
||||
"test$2test",
|
||||
"test",
|
||||
);
|
||||
|
||||
interp!(
|
||||
interp5,
|
||||
vec![("foo", 2)],
|
||||
vec!["", "", "xxx"],
|
||||
"test${2}test",
|
||||
"testxxxtest",
|
||||
);
|
||||
|
||||
interp!(
|
||||
interp6,
|
||||
vec![("foo", 2)],
|
||||
vec!["", "", "xxx"],
|
||||
"test $$foo test",
|
||||
"test $foo test",
|
||||
);
|
||||
|
||||
interp!(
|
||||
interp7,
|
||||
vec![("foo", 2)],
|
||||
vec!["", "", "xxx"],
|
||||
"test $foo",
|
||||
"test xxx",
|
||||
);
|
||||
|
||||
interp!(
|
||||
interp8,
|
||||
vec![("foo", 2)],
|
||||
vec!["", "", "xxx"],
|
||||
"$foo test",
|
||||
"xxx test",
|
||||
);
|
||||
|
||||
interp!(
|
||||
interp9,
|
||||
vec![("bar", 1), ("foo", 2)],
|
||||
vec!["", "yyy", "xxx"],
|
||||
"test $bar$foo",
|
||||
"test yyyxxx",
|
||||
);
|
||||
|
||||
interp!(
|
||||
interp10,
|
||||
vec![("bar", 1), ("foo", 2)],
|
||||
vec!["", "yyy", "xxx"],
|
||||
"test $ test",
|
||||
"test $ test",
|
||||
);
|
||||
|
||||
interp!(
|
||||
interp11,
|
||||
vec![("bar", 1), ("foo", 2)],
|
||||
vec!["", "yyy", "xxx"],
|
||||
"test ${} test",
|
||||
"test ${} test",
|
||||
);
|
||||
|
||||
interp!(
|
||||
interp12,
|
||||
vec![("bar", 1), ("foo", 2)],
|
||||
vec!["", "yyy", "xxx"],
|
||||
"test ${ } test",
|
||||
"test ${ } test",
|
||||
);
|
||||
|
||||
interp!(
|
||||
interp13,
|
||||
vec![("bar", 1), ("foo", 2)],
|
||||
vec!["", "yyy", "xxx"],
|
||||
"test ${a b} test",
|
||||
"test ${a b} test",
|
||||
);
|
||||
|
||||
interp!(
|
||||
interp14,
|
||||
vec![("bar", 1), ("foo", 2)],
|
||||
vec!["", "yyy", "xxx"],
|
||||
"test ${a} test",
|
||||
"test test",
|
||||
);
|
||||
}
|
||||
1151
crates/matcher/src/lib.rs
Normal file
1151
crates/matcher/src/lib.rs
Normal file
File diff suppressed because it is too large
Load Diff
230
crates/matcher/tests/test_matcher.rs
Normal file
230
crates/matcher/tests/test_matcher.rs
Normal file
@@ -0,0 +1,230 @@
|
||||
use grep_matcher::{Captures, Match, Matcher};
|
||||
use regex::bytes::Regex;
|
||||
|
||||
use util::{RegexMatcher, RegexMatcherNoCaps};
|
||||
|
||||
fn matcher(pattern: &str) -> RegexMatcher {
|
||||
RegexMatcher::new(Regex::new(pattern).unwrap())
|
||||
}
|
||||
|
||||
fn matcher_no_caps(pattern: &str) -> RegexMatcherNoCaps {
|
||||
RegexMatcherNoCaps(Regex::new(pattern).unwrap())
|
||||
}
|
||||
|
||||
fn m(start: usize, end: usize) -> Match {
|
||||
Match::new(start, end)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn find() {
|
||||
let matcher = matcher(r"(\w+)\s+(\w+)");
|
||||
assert_eq!(matcher.find(b" homer simpson ").unwrap(), Some(m(1, 14)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn find_iter() {
|
||||
let matcher = matcher(r"(\w+)\s+(\w+)");
|
||||
let mut matches = vec![];
|
||||
matcher
|
||||
.find_iter(b"aa bb cc dd", |m| {
|
||||
matches.push(m);
|
||||
true
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(matches, vec![m(0, 5), m(6, 11)]);
|
||||
|
||||
// Test that find_iter respects short circuiting.
|
||||
matches.clear();
|
||||
matcher
|
||||
.find_iter(b"aa bb cc dd", |m| {
|
||||
matches.push(m);
|
||||
false
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(matches, vec![m(0, 5)]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_find_iter() {
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
struct MyError;
|
||||
|
||||
let matcher = matcher(r"(\w+)\s+(\w+)");
|
||||
let mut matches = vec![];
|
||||
let err = matcher
|
||||
.try_find_iter(b"aa bb cc dd", |m| {
|
||||
if matches.is_empty() {
|
||||
matches.push(m);
|
||||
Ok(true)
|
||||
} else {
|
||||
Err(MyError)
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
.unwrap_err();
|
||||
assert_eq!(matches, vec![m(0, 5)]);
|
||||
assert_eq!(err, MyError);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shortest_match() {
|
||||
let matcher = matcher(r"a+");
|
||||
// This tests that the default impl isn't doing anything smart, and simply
|
||||
// defers to `find`.
|
||||
assert_eq!(matcher.shortest_match(b"aaa").unwrap(), Some(3));
|
||||
// The actual underlying regex is smarter.
|
||||
assert_eq!(matcher.re.shortest_match(b"aaa"), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn captures() {
|
||||
let matcher = matcher(r"(?P<a>\w+)\s+(?P<b>\w+)");
|
||||
assert_eq!(matcher.capture_count(), 3);
|
||||
assert_eq!(matcher.capture_index("a"), Some(1));
|
||||
assert_eq!(matcher.capture_index("b"), Some(2));
|
||||
assert_eq!(matcher.capture_index("nada"), None);
|
||||
|
||||
let mut caps = matcher.new_captures().unwrap();
|
||||
assert!(matcher.captures(b" homer simpson ", &mut caps).unwrap());
|
||||
assert_eq!(caps.get(0), Some(m(1, 14)));
|
||||
assert_eq!(caps.get(1), Some(m(1, 6)));
|
||||
assert_eq!(caps.get(2), Some(m(7, 14)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn captures_iter() {
|
||||
let matcher = matcher(r"(?P<a>\w+)\s+(?P<b>\w+)");
|
||||
let mut caps = matcher.new_captures().unwrap();
|
||||
let mut matches = vec![];
|
||||
matcher
|
||||
.captures_iter(b"aa bb cc dd", &mut caps, |caps| {
|
||||
matches.push(caps.get(0).unwrap());
|
||||
matches.push(caps.get(1).unwrap());
|
||||
matches.push(caps.get(2).unwrap());
|
||||
true
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
matches,
|
||||
vec![m(0, 5), m(0, 2), m(3, 5), m(6, 11), m(6, 8), m(9, 11),]
|
||||
);
|
||||
|
||||
// Test that captures_iter respects short circuiting.
|
||||
matches.clear();
|
||||
matcher
|
||||
.captures_iter(b"aa bb cc dd", &mut caps, |caps| {
|
||||
matches.push(caps.get(0).unwrap());
|
||||
matches.push(caps.get(1).unwrap());
|
||||
matches.push(caps.get(2).unwrap());
|
||||
false
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(matches, vec![m(0, 5), m(0, 2), m(3, 5),]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_captures_iter() {
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
struct MyError;
|
||||
|
||||
let matcher = matcher(r"(?P<a>\w+)\s+(?P<b>\w+)");
|
||||
let mut caps = matcher.new_captures().unwrap();
|
||||
let mut matches = vec![];
|
||||
let err = matcher
|
||||
.try_captures_iter(b"aa bb cc dd", &mut caps, |caps| {
|
||||
if matches.is_empty() {
|
||||
matches.push(caps.get(0).unwrap());
|
||||
matches.push(caps.get(1).unwrap());
|
||||
matches.push(caps.get(2).unwrap());
|
||||
Ok(true)
|
||||
} else {
|
||||
Err(MyError)
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
.unwrap_err();
|
||||
assert_eq!(matches, vec![m(0, 5), m(0, 2), m(3, 5)]);
|
||||
assert_eq!(err, MyError);
|
||||
}
|
||||
|
||||
// Test that our default impls for capturing are correct. Namely, when
|
||||
// capturing isn't supported by the underlying matcher, then all of the
|
||||
// various capturing related APIs fail fast.
|
||||
#[test]
|
||||
fn no_captures() {
|
||||
let matcher = matcher_no_caps(r"(?P<a>\w+)\s+(?P<b>\w+)");
|
||||
assert_eq!(matcher.capture_count(), 0);
|
||||
assert_eq!(matcher.capture_index("a"), None);
|
||||
assert_eq!(matcher.capture_index("b"), None);
|
||||
assert_eq!(matcher.capture_index("nada"), None);
|
||||
|
||||
let mut caps = matcher.new_captures().unwrap();
|
||||
assert!(!matcher.captures(b"homer simpson", &mut caps).unwrap());
|
||||
|
||||
let mut called = false;
|
||||
matcher
|
||||
.captures_iter(b"homer simpson", &mut caps, |_| {
|
||||
called = true;
|
||||
true
|
||||
})
|
||||
.unwrap();
|
||||
assert!(!called);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace() {
|
||||
let matcher = matcher(r"(\w+)\s+(\w+)");
|
||||
let mut dst = vec![];
|
||||
matcher
|
||||
.replace(b"aa bb cc dd", &mut dst, |_, dst| {
|
||||
dst.push(b'z');
|
||||
true
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(dst, b"z z");
|
||||
|
||||
// Test that replacements respect short circuiting.
|
||||
dst.clear();
|
||||
matcher
|
||||
.replace(b"aa bb cc dd", &mut dst, |_, dst| {
|
||||
dst.push(b'z');
|
||||
false
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(dst, b"z cc dd");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace_with_captures() {
|
||||
let matcher = matcher(r"(\w+)\s+(\w+)");
|
||||
let haystack = b"aa bb cc dd";
|
||||
let mut caps = matcher.new_captures().unwrap();
|
||||
let mut dst = vec![];
|
||||
matcher
|
||||
.replace_with_captures(haystack, &mut caps, &mut dst, |caps, dst| {
|
||||
caps.interpolate(
|
||||
|name| matcher.capture_index(name),
|
||||
haystack,
|
||||
b"$2 $1",
|
||||
dst,
|
||||
);
|
||||
true
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(dst, b"bb aa dd cc");
|
||||
|
||||
// Test that replacements respect short circuiting.
|
||||
dst.clear();
|
||||
matcher
|
||||
.replace_with_captures(haystack, &mut caps, &mut dst, |caps, dst| {
|
||||
caps.interpolate(
|
||||
|name| matcher.capture_index(name),
|
||||
haystack,
|
||||
b"$2 $1",
|
||||
dst,
|
||||
);
|
||||
false
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(dst, b"bb aa cc dd");
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user