mirror of
https://github.com/BurntSushi/ripgrep.git
synced 2025-08-19 14:13:49 -07:00
Compare commits
858 Commits
globset-0.
...
grep-print
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b80947a8b3 | ||
|
|
ad793a0d8f | ||
|
|
120e55e7c7 | ||
|
|
3941a7701d | ||
|
|
96e130fbf9 | ||
|
|
180c4eaf8b | ||
|
|
81529288cf | ||
|
|
bcc7473a87 | ||
|
|
bc78c644db | ||
|
|
dc7267a0fb | ||
|
|
3224324e25 | ||
|
|
0f61f08eb1 | ||
|
|
a0e8dbe9df | ||
|
|
e95254a86f | ||
|
|
2f484d8ce5 | ||
|
|
364772ddd2 | ||
|
|
2e207833bc | ||
|
|
92b35a65f8 | ||
|
|
ac8fecbbf2 | ||
|
|
8596817374 | ||
|
|
28bff84a0a | ||
|
|
61101289fa | ||
|
|
13faa39b66 | ||
|
|
6b61271bbb | ||
|
|
1be86392e0 | ||
|
|
63058453fa | ||
|
|
7f23cd63a5 | ||
|
|
8905d54a9f | ||
|
|
25a4eaf5ae | ||
|
|
0000157917 | ||
|
|
65b1b0e38a | ||
|
|
c032cda4b7 | ||
|
|
eab044d829 | ||
|
|
55e62a4411 | ||
|
|
5b2f614aad | ||
|
|
4386b8e805 | ||
|
|
6b012d8129 | ||
|
|
a928ca4221 | ||
|
|
d1570defbf | ||
|
|
b732c23e36 | ||
|
|
49965703fa | ||
|
|
609838aebd | ||
|
|
515f120b5c | ||
|
|
a66315d232 | ||
|
|
bdf10ab7c0 | ||
|
|
a02678800b | ||
|
|
387df97d85 | ||
|
|
a9d97a1dda | ||
|
|
3bb71b0cb8 | ||
|
|
87b33c96c0 | ||
|
|
5e975c43f8 | ||
|
|
7efa2e46d3 | ||
|
|
db0b92b62d | ||
|
|
33b81cac48 | ||
|
|
6a13a4f64d | ||
|
|
b13d835d95 | ||
|
|
d53506b7f7 | ||
|
|
78a35d4d43 | ||
|
|
a933d0bc90 | ||
|
|
2cae30e399 | ||
|
|
8e57989cd2 | ||
|
|
b9f5835534 | ||
|
|
e70778e89d | ||
|
|
87c4a2b4b1 | ||
|
|
0aa31676e3 | ||
|
|
9f0e88bcb1 | ||
|
|
eb4b389846 | ||
|
|
dc337bab0a | ||
|
|
2cfb338530 | ||
|
|
48646e3451 | ||
|
|
985394a19e | ||
|
|
ec36f8c3ff | ||
|
|
a726d03641 | ||
|
|
91afd4214a | ||
|
|
4dc6c73c5a | ||
|
|
36d03b4101 | ||
|
|
d161acb0a3 | ||
|
|
30ee6f08ee | ||
|
|
ced5b92aa9 | ||
|
|
191315a2ea | ||
|
|
5370064f00 | ||
|
|
b6189c659e | ||
|
|
0b36942f68 | ||
|
|
7e05cde008 | ||
|
|
418d048b27 | ||
|
|
009dda1488 | ||
|
|
ba535fb5a3 | ||
|
|
427aaeeb2e | ||
|
|
f5cff746bc | ||
|
|
457f53b7ee | ||
|
|
eb35f7978e | ||
|
|
fc69bd366c | ||
|
|
9b01a8f9ae | ||
|
|
0ff5dd2360 | ||
|
|
3c7819301b | ||
|
|
699e651db2 | ||
|
|
9eddb71b8e | ||
|
|
abf115228e | ||
|
|
fdfc418be5 | ||
|
|
5bf74362b9 | ||
|
|
431ea38620 | ||
|
|
caba5c4348 | ||
|
|
07f97d42cf | ||
|
|
e33d6e73f5 | ||
|
|
478da4f271 | ||
|
|
7ce66f73cf | ||
|
|
bc76a30c23 | ||
|
|
5e81c60b35 | ||
|
|
b3e5ae9d28 | ||
|
|
a024f14fdd | ||
|
|
8c30c8294a | ||
|
|
c44d263419 | ||
|
|
af6b6c543b | ||
|
|
1a4fec8b4a | ||
|
|
c8d8ab8ded | ||
|
|
1d53ed2744 | ||
|
|
29696d1455 | ||
|
|
57ce623a57 | ||
|
|
f1c656de40 | ||
|
|
dd47582619 | ||
|
|
9b88cf8b72 | ||
|
|
6668d7ba8a | ||
|
|
008da5dca4 | ||
|
|
a34df1f690 | ||
|
|
7f3fd6f7ce | ||
|
|
6331a7ac18 | ||
|
|
cd4386bd9b | ||
|
|
cdc20c5685 | ||
|
|
0cf2b98df2 | ||
|
|
9efdbf74a1 | ||
|
|
53cb9a779e | ||
|
|
14860b0f16 | ||
|
|
0eb1a1e7c9 | ||
|
|
5631e5c7a0 | ||
|
|
21644408f2 | ||
|
|
0ee85a89f5 | ||
|
|
ed9d37959f | ||
|
|
9f924ee187 | ||
|
|
35c5db6d1a | ||
|
|
e824531e38 | ||
|
|
af54069c51 | ||
|
|
77a9e99964 | ||
|
|
459a9c5637 | ||
|
|
e4c4540f6a | ||
|
|
5d0f2b0fc0 | ||
|
|
079a23b515 | ||
|
|
6e27649af1 | ||
|
|
df83b8b444 | ||
|
|
e48a17e189 | ||
|
|
fbb2cfed28 | ||
|
|
af8b27ffae | ||
|
|
8a4071eea9 | ||
|
|
ee23ab5173 | ||
|
|
efd9cfb2fc | ||
|
|
656aa12649 | ||
|
|
fc31aedcf3 | ||
|
|
578e1992fa | ||
|
|
46d0130597 | ||
|
|
7534d5144f | ||
|
|
a28e664abd | ||
|
|
0ca96e004c | ||
|
|
2295061e80 | ||
|
|
53c4855517 | ||
|
|
121e0135c1 | ||
|
|
c53c4c0ade | ||
|
|
4566882521 | ||
|
|
12dd455ee9 | ||
|
|
e6cac8b119 | ||
|
|
0f502a9439 | ||
|
|
51d2db7f19 | ||
|
|
b3a6a69f9d | ||
|
|
26a29c750e | ||
|
|
beda5f70dc | ||
|
|
5af7707a35 | ||
|
|
3f33a83a5f | ||
|
|
35b52d33b9 | ||
|
|
a77b914e7a | ||
|
|
2e2af50a4d | ||
|
|
229d1a8d41 | ||
|
|
8ec6ef373f | ||
|
|
581a35e568 | ||
|
|
ba965962fe | ||
|
|
94e4b8e301 | ||
|
|
2af77242c5 | ||
|
|
3f4c4188c1 | ||
|
|
ce4b587055 | ||
|
|
be63122508 | ||
|
|
92286ad4d2 | ||
|
|
4ebe8375ec | ||
|
|
7923d25228 | ||
|
|
1c3eebefec | ||
|
|
64ac2ebe0f | ||
|
|
46fb77c20c | ||
|
|
6a1c3253e0 | ||
|
|
c7730d1f3a | ||
|
|
c5ea5a13df | ||
|
|
9c8d873a75 | ||
|
|
7899a4b931 | ||
|
|
ae55a4e872 | ||
|
|
3a1780d841 | ||
|
|
a6d05475fb | ||
|
|
020c5453a5 | ||
|
|
873abecbf1 | ||
|
|
8c73833efc | ||
|
|
44e69ba627 | ||
|
|
13d77ab646 | ||
|
|
d97fb72d84 | ||
|
|
d6365117e2 | ||
|
|
f32e906012 | ||
|
|
59644d4592 | ||
|
|
3ca324fda7 | ||
|
|
8782f8200c | ||
|
|
2819212f89 | ||
|
|
810be0b348 | ||
|
|
a28bb1e953 | ||
|
|
3ef63dacbe | ||
|
|
e1ac18ef06 | ||
|
|
ba3f9673ad | ||
|
|
c777e2cd57 | ||
|
|
e5639cf22d | ||
|
|
86c843a44b | ||
|
|
2b1637d1db | ||
|
|
6301e20ee4 | ||
|
|
145cef2eff | ||
|
|
20534fad04 | ||
|
|
de0c24f31c | ||
|
|
c55e7af675 | ||
|
|
5ebb3ad039 | ||
|
|
b0066274cb | ||
|
|
def993bad1 | ||
|
|
f511849c81 | ||
|
|
e6e50054b0 | ||
|
|
11c7b2ae17 | ||
|
|
ac7d4c99b9 | ||
|
|
b5681e3694 | ||
|
|
fc2a99bb1f | ||
|
|
ffd4c9ccba | ||
|
|
a16bfcb3d6 | ||
|
|
1b2c1dc675 | ||
|
|
b1e3de246c | ||
|
|
bb36fc1bf8 | ||
|
|
7cb211378a | ||
|
|
a73c0a21d9 | ||
|
|
0b965f900c | ||
|
|
a2f90747c9 | ||
|
|
f97cc623f7 | ||
|
|
f35de5c523 | ||
|
|
c9bb78ceba | ||
|
|
72bdde6771 | ||
|
|
d66712a452 | ||
|
|
e8822ce97a | ||
|
|
a700b75843 | ||
|
|
b72ad8f8aa | ||
|
|
1980630f17 | ||
|
|
1e9a481a66 | ||
|
|
bacfca174e | ||
|
|
6162b000a3 | ||
|
|
2658bd4e46 | ||
|
|
4b8e1f030e | ||
|
|
72807462e8 | ||
|
|
08dee094dd | ||
|
|
caa53b7b09 | ||
|
|
c5d6141562 | ||
|
|
c0f0492b98 | ||
|
|
568018386b | ||
|
|
6219d29c24 | ||
|
|
b458cf39f2 | ||
|
|
3fd2694fbc | ||
|
|
b56315ea84 | ||
|
|
fac47906e6 | ||
|
|
e02bb6b99a | ||
|
|
16a1221fc7 | ||
|
|
793c1179cc | ||
|
|
df7a3bfc7f | ||
|
|
28f2a93cae | ||
|
|
0eb2501b6e | ||
|
|
184c15882e | ||
|
|
64a4dee495 | ||
|
|
50840ea43b | ||
|
|
17dcc2bf51 | ||
|
|
9a858e4909 | ||
|
|
cbfbe9312f | ||
|
|
7ed9a31819 | ||
|
|
a2e6aec7a4 | ||
|
|
73103df6d9 | ||
|
|
139f186e57 | ||
|
|
afb325f733 | ||
|
|
40af352d74 | ||
|
|
3f1d4b397d | ||
|
|
a75b4d122a | ||
|
|
f51b762c6d | ||
|
|
49de7b119c | ||
|
|
1c4b5adb7b | ||
|
|
3d6a58faff | ||
|
|
5b6ca04e39 | ||
|
|
47f20c2661 | ||
|
|
1d5b1011e5 | ||
|
|
1bb30b72fc | ||
|
|
09a4b75baf | ||
|
|
58c428827d | ||
|
|
b9bb04b793 | ||
|
|
4dfea016b9 | ||
|
|
3193d57ac1 | ||
|
|
67c0f576b6 | ||
|
|
543f99dbf1 | ||
|
|
0ea65efd6d | ||
|
|
20deae6497 | ||
|
|
655e33219a | ||
|
|
8ba6ccd159 | ||
|
|
34edb8123a | ||
|
|
5b30c2aed6 | ||
|
|
bf1027a83e | ||
|
|
031264e5fb | ||
|
|
b9cd95faf1 | ||
|
|
92daa34eb3 | ||
|
|
a8c1fb7c88 | ||
|
|
52ec68799c | ||
|
|
c0d78240df | ||
|
|
cda9acb876 | ||
|
|
1ece50694e | ||
|
|
f3a966bcbc | ||
|
|
a38913b63a | ||
|
|
e772a95b58 | ||
|
|
9dd4bf8d7f | ||
|
|
c4c43c733e | ||
|
|
447506ebe0 | ||
|
|
12e4180985 | ||
|
|
daa8319398 | ||
|
|
3a6a24a52a | ||
|
|
aab3d80374 | ||
|
|
1856cda77b | ||
|
|
7340d8dbbe | ||
|
|
50d2047ae2 | ||
|
|
227436624f | ||
|
|
5bfdd3a652 | ||
|
|
ecec6147d1 | ||
|
|
db7a8cdcb5 | ||
|
|
eef7a7e7ff | ||
|
|
4176050cdd | ||
|
|
109460fce2 | ||
|
|
da3431b478 | ||
|
|
f314b0d55f | ||
|
|
fab5c812f3 | ||
|
|
c824d095a7 | ||
|
|
ee21897ebd | ||
|
|
0373f6ddb0 | ||
|
|
b44554c803 | ||
|
|
0874aa115c | ||
|
|
fdd8510fdd | ||
|
|
0bc4f0447b | ||
|
|
c95f29e3ba | ||
|
|
3644208b03 | ||
|
|
66f045e055 | ||
|
|
3d59bd98aa | ||
|
|
52d7f47420 | ||
|
|
75cbe88fa2 | ||
|
|
711426a632 | ||
|
|
01eeec56bb | ||
|
|
322fc75a3d | ||
|
|
b435eaafc8 | ||
|
|
f8e70294d5 | ||
|
|
578e2d47a8 | ||
|
|
9f7c2ebc09 | ||
|
|
5c1eac41a3 | ||
|
|
6f2b79f584 | ||
|
|
0c3b673e4c | ||
|
|
297b428c8c | ||
|
|
804b43ecd8 | ||
|
|
2263b8ac92 | ||
|
|
cd8ec38a68 | ||
|
|
6a0e0147e0 | ||
|
|
ad97e9c93f | ||
|
|
24f8a3e5ec | ||
|
|
1bdb767851 | ||
|
|
a4897eca23 | ||
|
|
a070722ff2 | ||
|
|
4628d77808 | ||
|
|
f8418c6a52 | ||
|
|
040ca45ba0 | ||
|
|
91470572cd | ||
|
|
027adbf485 | ||
|
|
e71eedf0eb | ||
|
|
88f46d12f1 | ||
|
|
a18cf6ec39 | ||
|
|
c78c3236a8 | ||
|
|
7cf21600cd | ||
|
|
647b0d3977 | ||
|
|
e572fc1683 | ||
|
|
9cb93abd11 | ||
|
|
41695c66fa | ||
|
|
cb0dfda936 | ||
|
|
74d1fe59e9 | ||
|
|
9fd1e202e0 | ||
|
|
e76807b1b5 | ||
|
|
f8fb65f7e3 | ||
|
|
98de8d248a | ||
|
|
c358700dfb | ||
|
|
8670a4a969 | ||
|
|
e3b1f86908 | ||
|
|
46b07bb2ee | ||
|
|
8bdf84e3a8 | ||
|
|
5a6e17fcc1 | ||
|
|
00bfcd14a6 | ||
|
|
bf0ddc4675 | ||
|
|
0fb3f6a159 | ||
|
|
837fb5e21f | ||
|
|
2e1815606e | ||
|
|
cb2f6ddc61 | ||
|
|
bd7a42602f | ||
|
|
528ce56e1b | ||
|
|
8892bf648c | ||
|
|
8cb7271b64 | ||
|
|
4858267f3b | ||
|
|
5011dba2fd | ||
|
|
e14f9195e5 | ||
|
|
ef0e7af56a | ||
|
|
b266818aa5 | ||
|
|
81415ae52d | ||
|
|
5c4584aa7c | ||
|
|
0972c6e7c7 | ||
|
|
0a372bf2e4 | ||
|
|
345124a7fa | ||
|
|
31807f805a | ||
|
|
4de227fd9a | ||
|
|
d7ce274722 | ||
|
|
5b10328f41 | ||
|
|
813c676eca | ||
|
|
f625d72b6f | ||
|
|
3de31f7527 | ||
|
|
e402d6c260 | ||
|
|
48b5bdc441 | ||
|
|
709ca91f50 | ||
|
|
9c220f9a9b | ||
|
|
9085bed139 | ||
|
|
931ab35f76 | ||
|
|
b5e5979ff1 | ||
|
|
052c857da0 | ||
|
|
5e84e784c8 | ||
|
|
01e8e11621 | ||
|
|
9268ff8e8d | ||
|
|
c2cb0a4de4 | ||
|
|
adb9332f52 | ||
|
|
bc37c32717 | ||
|
|
08ae4da2b7 | ||
|
|
7ac95c1f50 | ||
|
|
7a6903bd4e | ||
|
|
9801fae29f | ||
|
|
abdf7140d7 | ||
|
|
b83e7968ef | ||
|
|
8ebc113847 | ||
|
|
785c1f1766 | ||
|
|
8b734cb490 | ||
|
|
b93762ea7a | ||
|
|
34677d2622 | ||
|
|
d1389db2e3 | ||
|
|
50bcb7409e | ||
|
|
7b9972c308 | ||
|
|
9f000c2910 | ||
|
|
392682d352 | ||
|
|
7d3f794588 | ||
|
|
290fd2a7b6 | ||
|
|
d1e4d28f30 | ||
|
|
5ce2d7351d | ||
|
|
9dcfd9a205 | ||
|
|
36b276c6d0 | ||
|
|
03bf37ff4a | ||
|
|
e7829c05d3 | ||
|
|
a6222939f9 | ||
|
|
6ffd434232 | ||
|
|
1f1cd9b467 | ||
|
|
973de50c9e | ||
|
|
5f8805a496 | ||
|
|
fdde2bcd38 | ||
|
|
7b3fe6b325 | ||
|
|
b3dd3ae203 | ||
|
|
f3083e4574 | ||
|
|
d03e30707e | ||
|
|
d7f57d9aab | ||
|
|
1a2a24ea74 | ||
|
|
d66610b295 | ||
|
|
019ae1989b | ||
|
|
36d3f235dc | ||
|
|
79018eb693 | ||
|
|
44cd344438 | ||
|
|
e493e54b9b | ||
|
|
8e8215aa65 | ||
|
|
3fe701498e | ||
|
|
e79085e9e4 | ||
|
|
764c197022 | ||
|
|
ef1611b5f5 | ||
|
|
45d12abbc5 | ||
|
|
5fde8391f9 | ||
|
|
3edb11c513 | ||
|
|
ed144be775 | ||
|
|
967e7ad0de | ||
|
|
9952ba2068 | ||
|
|
b751758d60 | ||
|
|
8f14cb18a5 | ||
|
|
da9d720431 | ||
|
|
a9d71a0368 | ||
|
|
f3646242cc | ||
|
|
601f212a0b | ||
|
|
5a565354f8 | ||
|
|
2a6532ae71 | ||
|
|
ece1f50cfe | ||
|
|
a7d26c8f14 | ||
|
|
bd222ae93f | ||
|
|
4359d8aac0 | ||
|
|
308819fb1f | ||
|
|
09108b7fda | ||
|
|
743d64f2e4 | ||
|
|
5962abc465 | ||
|
|
1604a18db3 | ||
|
|
9eeb0b01ce | ||
|
|
df4400209a | ||
|
|
77439f99a4 | ||
|
|
be7d6dd9ce | ||
|
|
9f15e3b671 | ||
|
|
254b8b67bb | ||
|
|
8a7f43b84d | ||
|
|
d968a27ed5 | ||
|
|
9b8f5cbaba | ||
|
|
c52da74ac3 | ||
|
|
7dcbff9a9b | ||
|
|
bef1f0e770 | ||
|
|
cd9815cb37 | ||
|
|
3f22c3a658 | ||
|
|
0913972104 | ||
|
|
f19b84fb23 | ||
|
|
59fc583aeb | ||
|
|
1c7c4e6640 | ||
|
|
69c5e3938d | ||
|
|
d9cf05ad50 | ||
|
|
af8b6caebb | ||
|
|
c84cfb6756 | ||
|
|
895e26a000 | ||
|
|
8c95290ff6 | ||
|
|
d6feeb7ff2 | ||
|
|
626ed00c19 | ||
|
|
332ad18401 | ||
|
|
fc3cf41247 | ||
|
|
a4868b8835 | ||
|
|
f99b991117 | ||
|
|
de0bc78982 | ||
|
|
147e96914c | ||
|
|
0abc40c23c | ||
|
|
f768796e4f | ||
|
|
da0c0c4705 | ||
|
|
05411b2b32 | ||
|
|
cc93db3b18 | ||
|
|
049354b766 | ||
|
|
386dd2806d | ||
|
|
5fe9a954e6 | ||
|
|
f158a42a71 | ||
|
|
5724391d39 | ||
|
|
0df71240ff | ||
|
|
f3164f2615 | ||
|
|
31d3e24130 | ||
|
|
bf842dbc7f | ||
|
|
6d5dba85bd | ||
|
|
afb89bcdad | ||
|
|
332dc56372 | ||
|
|
12a6ca45f9 | ||
|
|
9d703110cf | ||
|
|
e99b6bda0e | ||
|
|
276e2c9b9a | ||
|
|
9a9f54d44c | ||
|
|
47833b9ce7 | ||
|
|
44a9e37737 | ||
|
|
8fd05cacee | ||
|
|
4691d11034 | ||
|
|
519a6b68af | ||
|
|
9c940b45f4 | ||
|
|
0a167021c3 | ||
|
|
aeaa5fc1b1 | ||
|
|
7048a06c31 | ||
|
|
23be3cf850 | ||
|
|
b48bbf527d | ||
|
|
8eabe47b57 | ||
|
|
ff712bfd9d | ||
|
|
a7f2d48234 | ||
|
|
57500ad013 | ||
|
|
0b04553aff | ||
|
|
1ae121122f | ||
|
|
688003e51c | ||
|
|
718a00f6f2 | ||
|
|
7cbc535d70 | ||
|
|
7a6a40bae1 | ||
|
|
1e9ee2cc85 | ||
|
|
968491f8e9 | ||
|
|
63b0f31a22 | ||
|
|
7ecee299a5 | ||
|
|
dd396ff34e | ||
|
|
fb0a82f3c3 | ||
|
|
dbc8ca9cc1 | ||
|
|
c3db8db93d | ||
|
|
17ef4c40f3 | ||
|
|
a9e0477ea8 | ||
|
|
b3c5773266 | ||
|
|
118b950085 | ||
|
|
b45b2f58ea | ||
|
|
662a9bc73d | ||
|
|
401add0a99 | ||
|
|
f81b72721b | ||
|
|
1d4fccaadc | ||
|
|
09e464e674 | ||
|
|
31adff6f3c | ||
|
|
b41e596327 | ||
|
|
fb62266620 | ||
|
|
acf226c39d | ||
|
|
8299625e48 | ||
|
|
db256c87eb | ||
|
|
ba533f390e | ||
|
|
ba503eb677 | ||
|
|
f72c2dfd90 | ||
|
|
c0aa58b4f7 | ||
|
|
184ee4c328 | ||
|
|
e82fbf2c46 | ||
|
|
eb18da0450 | ||
|
|
0f7494216f | ||
|
|
442a278635 | ||
|
|
7ebed3ace6 | ||
|
|
8a7db1a918 | ||
|
|
ce80d794c0 | ||
|
|
c5d467a2ab | ||
|
|
a62cd553c2 | ||
|
|
ce5188335b | ||
|
|
b7a456ae83 | ||
|
|
d14f0b37d6 | ||
|
|
3ddc3c040f | ||
|
|
eeaa42ecaf | ||
|
|
3797a2a5cb | ||
|
|
0e2f8f7b47 | ||
|
|
3dd4b77dfb | ||
|
|
3b5cdea862 | ||
|
|
54b3e9eb10 | ||
|
|
56e8864426 | ||
|
|
b8f619d16e | ||
|
|
83dff33326 | ||
|
|
003c3695f4 | ||
|
|
10777c150d | ||
|
|
827179250b | ||
|
|
fd22cd520b | ||
|
|
241bc8f8fc | ||
|
|
b6e30124e0 | ||
|
|
4846d63539 | ||
|
|
13c47530a6 | ||
|
|
328f4369e6 | ||
|
|
04518e32e7 | ||
|
|
f2eaf5b977 | ||
|
|
3edeeca6e9 | ||
|
|
c41b353009 | ||
|
|
d18839f3dc | ||
|
|
8f978a3cf7 | ||
|
|
87b745454d | ||
|
|
e5bb750995 | ||
|
|
d599f0b3c7 | ||
|
|
40e310a9f9 | ||
|
|
510f15f4da | ||
|
|
f9ce7a84a8 | ||
|
|
1b6089674e | ||
|
|
05a0389555 | ||
|
|
16353bad6e | ||
|
|
fe442de091 | ||
|
|
1bb8b7170f | ||
|
|
55ed698a98 | ||
|
|
f1e025873f | ||
|
|
033ad2b8e4 | ||
|
|
098a8ee843 | ||
|
|
2f3dbf5fee | ||
|
|
5c80e4adb6 | ||
|
|
fcd1853031 | ||
|
|
74a89be641 | ||
|
|
5b1ce8bdc2 | ||
|
|
1529ce3341 | ||
|
|
95a4f15916 | ||
|
|
0eef05142a | ||
|
|
edd6eb4e06 | ||
|
|
7ac9782970 | ||
|
|
180054d7dc | ||
|
|
7eaaa04c69 | ||
|
|
87a627631c | ||
|
|
9df60e164e | ||
|
|
afa06c518a | ||
|
|
e46aeb34f8 | ||
|
|
d8f187e990 | ||
|
|
7d93d2ab05 | ||
|
|
9ca2d68e94 | ||
|
|
60b0e3ff80 | ||
|
|
3a1c081c13 | ||
|
|
d5c0b03030 | ||
|
|
eb184d7711 | ||
|
|
bb110c1ebe | ||
|
|
d9ca529356 | ||
|
|
0958837ee1 | ||
|
|
94be3bd4bb | ||
|
|
deb1de6e1e | ||
|
|
6afdf15d85 | ||
|
|
6cda7b24e9 | ||
|
|
ad9befbc1d | ||
|
|
e86d3d95c2 | ||
|
|
6799dcfc0e | ||
|
|
0fdab0ec5e | ||
|
|
74ec5b8932 | ||
|
|
2913fc4cd0 | ||
|
|
7c412bb2fa | ||
|
|
651a0f1ddf | ||
|
|
45473ba48f | ||
|
|
0863c75a5a | ||
|
|
d94d99f657 | ||
|
|
84585908ac | ||
|
|
1611c04e6f | ||
|
|
d857ad6ed3 | ||
|
|
4dd2f8e40e | ||
|
|
dca8110da2 | ||
|
|
0d11497d21 | ||
|
|
22ac2e056e | ||
|
|
03af61fc7b | ||
|
|
560dffd247 | ||
|
|
e65ca21a6c | ||
|
|
6771626553 | ||
|
|
b9c922be53 | ||
|
|
7a44cad599 | ||
|
|
6dc09c5b1b | ||
|
|
209a125ea2 | ||
|
|
090216cf00 | ||
|
|
c96a358593 | ||
|
|
231456c409 | ||
|
|
1d09d4d31b | ||
|
|
02f08f3800 | ||
|
|
470afa1bd7 | ||
|
|
aa2ce39d14 | ||
|
|
d11a3b3377 | ||
|
|
d09e2f6af1 | ||
|
|
7b6af5a177 | ||
|
|
9bd1aa1c04 | ||
|
|
1393ce4b6b | ||
|
|
8e358ee056 | ||
|
|
5b5f4e74d9 | ||
|
|
7829850bf0 | ||
|
|
06b66efd59 | ||
|
|
7e5a590276 | ||
|
|
d17ca45063 | ||
|
|
df469fe1b4 | ||
|
|
b64475aeac | ||
|
|
fca9709d94 | ||
|
|
62b4813b8a | ||
|
|
b38b101c77 | ||
|
|
ac90316e35 | ||
|
|
a6467f880a | ||
|
|
004bb35694 | ||
|
|
cd6c190967 | ||
|
|
d5139228e5 | ||
|
|
bb16ba6311 | ||
|
|
5e85f2577b | ||
|
|
ca23a170f7 | ||
|
|
223d7d9846 | ||
|
|
e4bce86111 | ||
|
|
15fa77cdb3 | ||
|
|
c3f97513d6 | ||
|
|
c21b9b20cf | ||
|
|
ce145c6a2a | ||
|
|
a383d5c4e9 | ||
|
|
64317bda9f | ||
|
|
7f3a0f0828 | ||
|
|
49f36c7dcd | ||
|
|
83b4fdb8d6 | ||
|
|
8b57d78b96 | ||
|
|
a2d8c49d6f | ||
|
|
6ffb4b7466 | ||
|
|
198d1fede9 | ||
|
|
667b9a7d62 | ||
|
|
1f528f1641 | ||
|
|
ab64da73ab | ||
|
|
1266de3d4c | ||
|
|
bf51058eb2 | ||
|
|
3dc6fe6f05 | ||
|
|
06438d5360 | ||
|
|
ae6f871491 | ||
|
|
ed059559cd | ||
|
|
b75526bd7f | ||
|
|
507801c1f2 | ||
|
|
2a9d007261 | ||
|
|
0ee0b160b5 | ||
|
|
b4781e2f91 | ||
|
|
8cb03941e6 | ||
|
|
6b15ce2342 | ||
|
|
4c0b0c6c9d | ||
|
|
6c8b1e93d5 | ||
|
|
ebdb7c1d4c | ||
|
|
58bd0c67da | ||
|
|
1503b3175f | ||
|
|
0345e089aa | ||
|
|
0911ab1546 | ||
|
|
c4dd927a13 | ||
|
|
34abed597f | ||
|
|
835600794f | ||
|
|
07713fb5c5 | ||
|
|
d7c9323a68 | ||
|
|
b7d29d126f | ||
|
|
42b8132d0a | ||
|
|
cd08707c7c | ||
|
|
c2e97cd858 | ||
|
|
1f70e9187c | ||
|
|
7120f32258 | ||
|
|
00520b30f5 | ||
|
|
11a8f0eaf0 | ||
|
|
27fc9f2fd3 | ||
|
|
96f73293c0 | ||
|
|
b006943c01 | ||
|
|
91d0756f62 | ||
|
|
54256515b4 | ||
|
|
e2516ed095 | ||
|
|
c0c80e0209 | ||
|
|
dbf6f15625 | ||
|
|
9163aaac27 | ||
|
|
9d7448bfc0 | ||
|
|
b98585b429 | ||
|
|
f5411b992c | ||
|
|
492effc7be | ||
|
|
4889d2d37c | ||
|
|
354996a16f | ||
|
|
cbebb010a7 | ||
|
|
7098daf6a8 | ||
|
|
17d09c0882 | ||
|
|
c8e9f25b85 | ||
|
|
9305f89f39 | ||
|
|
9c216ad9a4 | ||
|
|
6862e07870 | ||
|
|
a6d09b2d42 | ||
|
|
ab1b877c20 | ||
|
|
2b5c488814 | ||
|
|
cb47be938e | ||
|
|
fe9be658f4 | ||
|
|
8c800adab7 | ||
|
|
d65966efbc | ||
|
|
597bf04a56 | ||
|
|
c78ab9e669 | ||
|
|
d57fc58081 | ||
|
|
d09538c974 | ||
|
|
94768881e1 | ||
|
|
f3a9ced82c | ||
|
|
18f549d289 | ||
|
|
c749b604dc | ||
|
|
d6748a3445 | ||
|
|
9b7f420faa | ||
|
|
361698b90a | ||
|
|
b71a110ccf | ||
|
|
5c1af3c25d | ||
|
|
ad3f55b0e5 | ||
|
|
b8e6d50bbe | ||
|
|
81afe8c5a0 | ||
|
|
c4e0d4bd7b | ||
|
|
23d1b91ead | ||
|
|
ac83ed4992 | ||
|
|
555fbd1201 | ||
|
|
f3146f8316 | ||
|
|
56341973ee |
8
.cargo/config.toml
Normal file
8
.cargo/config.toml
Normal file
@@ -0,0 +1,8 @@
|
||||
# On Windows MSVC, statically link the C runtime so that the resulting EXE does
|
||||
# not depend on the vcruntime DLL.
|
||||
#
|
||||
# See: https://github.com/BurntSushi/ripgrep/pull/1613
|
||||
[target.x86_64-pc-windows-msvc]
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
[target.i686-pc-windows-msvc]
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
@@ -1,23 +1,31 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: An issue with ripgrep or any of its crates (ignore, globset, etc.)
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
|
||||
#### What version of ripgrep are you using?
|
||||
|
||||
Replace this text with the output of `rg --version`.
|
||||
|
||||
#### How did you install ripgrep?
|
||||
|
||||
If you installed ripgrep with snap and are getting strange file permission or
|
||||
file not found errors, then please do not file a bug. Instead, use one of the
|
||||
Github binary releases.
|
||||
|
||||
#### What operating system are you using ripgrep on?
|
||||
|
||||
Replace this text with your operating system and version.
|
||||
|
||||
#### Describe your question, feature request, or bug.
|
||||
#### Describe your bug.
|
||||
|
||||
If a question, please describe the problem you're trying to solve and give
|
||||
as much context as possible.
|
||||
Give a high level description of the bug.
|
||||
|
||||
If a feature request, please describe the behavior you want and the motivation.
|
||||
Please also provide an example of how ripgrep would be used if your feature
|
||||
request were added.
|
||||
|
||||
If a bug, please see below.
|
||||
|
||||
#### If this is a bug, what are the steps to reproduce the behavior?
|
||||
#### What are the steps to reproduce the behavior?
|
||||
|
||||
If possible, please include both your search patterns and the corpus on which
|
||||
you are searching. Unless the bug is very obvious, then it is unlikely that it
|
||||
@@ -26,7 +34,7 @@ will be fixed if the ripgrep maintainers cannot reproduce it.
|
||||
If the corpus is too big and you cannot decrease its size, file the bug anyway
|
||||
and the ripgrep maintainers will help figure out next steps.
|
||||
|
||||
#### If this is a bug, what is the actual behavior?
|
||||
#### What is the actual behavior?
|
||||
|
||||
Show the command you ran and the actual output. Include the `--debug` flag in
|
||||
your invocation of ripgrep.
|
||||
@@ -42,6 +50,6 @@ goes
|
||||
here
|
||||
```
|
||||
|
||||
#### If this is a bug, what is the expected behavior?
|
||||
#### What is the expected behavior?
|
||||
|
||||
What do you think ripgrep should have done?
|
||||
6
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
6
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Ask a question
|
||||
about: |
|
||||
You've come to seek help or want to discuss something related to ripgrep.
|
||||
url: https://github.com/BurntSushi/ripgrep/discussions/new
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest a new feature for ripgrep
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
|
||||
#### Describe your feature request
|
||||
|
||||
Please describe the behavior you want and the motivation. Please also provide
|
||||
examples of how ripgrep would be used if your feature request were added.
|
||||
|
||||
If you're not sure what to write here, then try imagining what the ideal
|
||||
documentation of your new feature would look like in ripgrep's man page. Then
|
||||
try to write it.
|
||||
|
||||
If you're requesting the addition or change of default file types, please open
|
||||
a PR. We can discuss it there if necessary.
|
||||
6
.github/dependabot.yml
vendored
Normal file
6
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
206
.github/workflows/ci.yml
vendored
Normal file
206
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,206 @@
|
||||
name: ci
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
schedule:
|
||||
- cron: '00 01 * * *'
|
||||
jobs:
|
||||
test:
|
||||
name: test
|
||||
env:
|
||||
# For some builds, we use cross to test on 32-bit and big-endian
|
||||
# systems.
|
||||
CARGO: cargo
|
||||
# When CARGO is set to CROSS, this is set to `--target matrix.target`.
|
||||
TARGET_FLAGS:
|
||||
# When CARGO is set to CROSS, TARGET_DIR includes matrix.target.
|
||||
TARGET_DIR: ./target
|
||||
# Emit backtraces on panics.
|
||||
RUST_BACKTRACE: 1
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
build:
|
||||
# We test ripgrep on a pinned version of Rust, along with the moving
|
||||
# targets of 'stable' and 'beta' for good measure.
|
||||
- pinned
|
||||
- stable
|
||||
- beta
|
||||
# Our release builds are generated by a nightly compiler to take
|
||||
# advantage of the latest optimizations/compile time improvements. So
|
||||
# we test all of them here. (We don't do mips releases, but test on
|
||||
# mips for big-endian coverage.)
|
||||
- nightly
|
||||
- nightly-musl
|
||||
- nightly-32
|
||||
- nightly-mips
|
||||
- nightly-arm
|
||||
- macos
|
||||
- win-msvc
|
||||
- win-gnu
|
||||
include:
|
||||
- build: pinned
|
||||
os: ubuntu-22.04
|
||||
rust: 1.65.0
|
||||
- build: stable
|
||||
os: ubuntu-22.04
|
||||
rust: stable
|
||||
- build: beta
|
||||
os: ubuntu-22.04
|
||||
rust: beta
|
||||
- build: nightly
|
||||
os: ubuntu-22.04
|
||||
rust: nightly
|
||||
- build: nightly-musl
|
||||
os: ubuntu-22.04
|
||||
rust: nightly
|
||||
target: x86_64-unknown-linux-musl
|
||||
- build: nightly-32
|
||||
os: ubuntu-22.04
|
||||
rust: nightly
|
||||
target: i686-unknown-linux-gnu
|
||||
- build: nightly-mips
|
||||
os: ubuntu-22.04
|
||||
rust: nightly
|
||||
target: mips64-unknown-linux-gnuabi64
|
||||
- build: nightly-arm
|
||||
os: ubuntu-22.04
|
||||
rust: nightly
|
||||
# For stripping release binaries:
|
||||
# docker run --rm -v $PWD/target:/target:Z \
|
||||
# rustembedded/cross:arm-unknown-linux-gnueabihf \
|
||||
# arm-linux-gnueabihf-strip \
|
||||
# /target/arm-unknown-linux-gnueabihf/debug/rg
|
||||
target: arm-unknown-linux-gnueabihf
|
||||
- build: macos
|
||||
os: macos-12
|
||||
rust: nightly
|
||||
- build: win-msvc
|
||||
os: windows-2022
|
||||
rust: nightly
|
||||
- build: win-gnu
|
||||
os: windows-2022
|
||||
rust: nightly-x86_64-gnu
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install packages (Ubuntu)
|
||||
if: matrix.os == 'ubuntu-22.04'
|
||||
run: |
|
||||
ci/ubuntu-install-packages
|
||||
|
||||
- name: Install packages (macOS)
|
||||
if: matrix.os == 'macos-12'
|
||||
run: |
|
||||
ci/macos-install-packages
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ matrix.rust }}
|
||||
|
||||
- name: Use Cross
|
||||
if: matrix.target != ''
|
||||
run: |
|
||||
cargo install cross
|
||||
echo "CARGO=cross" >> $GITHUB_ENV
|
||||
echo "TARGET_FLAGS=--target ${{ matrix.target }}" >> $GITHUB_ENV
|
||||
echo "TARGET_DIR=./target/${{ matrix.target }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Show command used for Cargo
|
||||
run: |
|
||||
echo "cargo command is: ${{ env.CARGO }}"
|
||||
echo "target flag is: ${{ env.TARGET_FLAGS }}"
|
||||
|
||||
- name: Build ripgrep and all crates
|
||||
run: ${{ env.CARGO }} build --verbose --workspace ${{ env.TARGET_FLAGS }}
|
||||
|
||||
- name: Build ripgrep with PCRE2
|
||||
run: ${{ env.CARGO }} build --verbose --workspace --features pcre2 ${{ env.TARGET_FLAGS }}
|
||||
|
||||
# This is useful for debugging problems when the expected build artifacts
|
||||
# (like shell completions and man pages) aren't generated.
|
||||
- name: Show build.rs stderr
|
||||
shell: bash
|
||||
run: |
|
||||
set +x
|
||||
stderr="$(find "${{ env.TARGET_DIR }}/debug" -name stderr -print0 | xargs -0 ls -t | head -n1)"
|
||||
if [ -s "$stderr" ]; then
|
||||
echo "===== $stderr ===== "
|
||||
cat "$stderr"
|
||||
echo "====="
|
||||
fi
|
||||
set -x
|
||||
|
||||
- name: Run tests with PCRE2 (sans cross)
|
||||
if: matrix.target == ''
|
||||
run: ${{ env.CARGO }} test --verbose --workspace --features pcre2 ${{ env.TARGET_FLAGS }}
|
||||
|
||||
- name: Run tests without PCRE2 (with cross)
|
||||
# These tests should actually work, but they almost double the runtime.
|
||||
# Every integration test spins up qemu to run 'rg', and when PCRE2 is
|
||||
# enabled, every integration test is run twice: one with the default
|
||||
# regex engine and once with PCRE2.
|
||||
if: matrix.target != ''
|
||||
run: ${{ env.CARGO }} test --verbose --workspace ${{ env.TARGET_FLAGS }}
|
||||
|
||||
- name: Test for existence of build artifacts (Windows)
|
||||
if: matrix.os == 'windows-2022'
|
||||
shell: bash
|
||||
run: |
|
||||
outdir="$(ci/cargo-out-dir "${{ env.TARGET_DIR }}")"
|
||||
ls "$outdir/_rg.ps1" && file "$outdir/_rg.ps1"
|
||||
|
||||
- name: Test for existence of build artifacts (Unix)
|
||||
if: matrix.os != 'windows-2022'
|
||||
shell: bash
|
||||
run: |
|
||||
outdir="$(ci/cargo-out-dir "${{ env.TARGET_DIR }}")"
|
||||
# TODO: Check for the man page generation here. For whatever reason,
|
||||
# it seems to be intermittently failing in CI. No idea why.
|
||||
# for f in rg.bash rg.fish rg.1; do
|
||||
for f in rg.bash rg.fish; do
|
||||
# We could use file -E here, but it isn't supported on macOS.
|
||||
ls "$outdir/$f" && file "$outdir/$f"
|
||||
done
|
||||
|
||||
- name: Test zsh shell completions (Unix, sans cross)
|
||||
# We could test this when using Cross, but we'd have to execute the
|
||||
# 'rg' binary (done in test-complete) with qemu, which is a pain and
|
||||
# doesn't really gain us much. If shell completion works in one place,
|
||||
# it probably works everywhere.
|
||||
if: matrix.target == '' && matrix.os != 'windows-2022'
|
||||
shell: bash
|
||||
run: ci/test-complete
|
||||
|
||||
rustfmt:
|
||||
name: rustfmt
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: stable
|
||||
components: rustfmt
|
||||
- name: Check formatting
|
||||
run: cargo fmt --all --check
|
||||
|
||||
docs:
|
||||
name: Docs
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: stable
|
||||
- name: Check documentation
|
||||
env:
|
||||
RUSTDOCFLAGS: -D warnings
|
||||
run: cargo doc --no-deps --document-private-items --workspace
|
||||
180
.github/workflows/release.yml
vendored
Normal file
180
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,180 @@
|
||||
# The way this works is the following:
|
||||
#
|
||||
# The create-release job runs purely to initialize the GitHub release itself
|
||||
# and to output upload_url for the following job.
|
||||
#
|
||||
# The build-release job runs only once create-release is finished. It gets the
|
||||
# release upload URL from create-release job outputs, then builds the release
|
||||
# executables for each supported platform and attaches them as release assets
|
||||
# to the previously created release.
|
||||
#
|
||||
# The key here is that we create the release only once.
|
||||
#
|
||||
# Reference:
|
||||
# https://eugene-babichenko.github.io/blog/2020/05/09/github-actions-cross-platform-auto-releases/
|
||||
|
||||
name: release
|
||||
on:
|
||||
push:
|
||||
# Enable when testing release infrastructure on a branch.
|
||||
# branches:
|
||||
# - ag/work
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
jobs:
|
||||
create-release:
|
||||
name: create-release
|
||||
runs-on: ubuntu-22.04
|
||||
# env:
|
||||
# Set to force version number, e.g., when no tag exists.
|
||||
# RG_VERSION: TEST-0.0.0
|
||||
outputs:
|
||||
upload_url: ${{ steps.release.outputs.upload_url }}
|
||||
rg_version: ${{ env.RG_VERSION }}
|
||||
steps:
|
||||
- name: Get the release version from the tag
|
||||
shell: bash
|
||||
if: env.RG_VERSION == ''
|
||||
run: |
|
||||
# Apparently, this is the right way to get a tag name. Really?
|
||||
#
|
||||
# See: https://github.community/t5/GitHub-Actions/How-to-get-just-the-tag-name/m-p/32167/highlight/true#M1027
|
||||
echo "RG_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
echo "version is: ${{ env.RG_VERSION }}"
|
||||
- name: Create GitHub release
|
||||
id: release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ env.RG_VERSION }}
|
||||
release_name: ${{ env.RG_VERSION }}
|
||||
|
||||
build-release:
|
||||
name: build-release
|
||||
needs: ['create-release']
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
# For some builds, we use cross to test on 32-bit and big-endian
|
||||
# systems.
|
||||
CARGO: cargo
|
||||
# When CARGO is set to CROSS, this is set to `--target matrix.target`.
|
||||
TARGET_FLAGS: ""
|
||||
# When CARGO is set to CROSS, TARGET_DIR includes matrix.target.
|
||||
TARGET_DIR: ./target
|
||||
# Emit backtraces on panics.
|
||||
RUST_BACKTRACE: 1
|
||||
# Build static releases with PCRE2.
|
||||
PCRE2_SYS_STATIC: 1
|
||||
strategy:
|
||||
matrix:
|
||||
build: [linux, linux-arm, macos, win-msvc, win-gnu, win32-msvc]
|
||||
include:
|
||||
- build: linux
|
||||
os: ubuntu-22.04
|
||||
rust: nightly
|
||||
target: x86_64-unknown-linux-musl
|
||||
- build: linux-arm
|
||||
os: ubuntu-22.04
|
||||
rust: nightly
|
||||
target: arm-unknown-linux-gnueabihf
|
||||
- build: macos
|
||||
os: macos-12
|
||||
rust: nightly
|
||||
target: x86_64-apple-darwin
|
||||
- build: win-msvc
|
||||
os: windows-2022
|
||||
rust: nightly
|
||||
target: x86_64-pc-windows-msvc
|
||||
- build: win-gnu
|
||||
os: windows-2022
|
||||
rust: nightly-x86_64-gnu
|
||||
target: x86_64-pc-windows-gnu
|
||||
- build: win32-msvc
|
||||
os: windows-2022
|
||||
rust: nightly
|
||||
target: i686-pc-windows-msvc
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install packages (Ubuntu)
|
||||
if: matrix.os == 'ubuntu-22.04'
|
||||
run: |
|
||||
ci/ubuntu-install-packages
|
||||
|
||||
- name: Install packages (macOS)
|
||||
if: matrix.os == 'macos-12'
|
||||
run: |
|
||||
ci/macos-install-packages
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ matrix.rust }}
|
||||
target: ${{ matrix.target }}
|
||||
|
||||
- name: Use Cross
|
||||
shell: bash
|
||||
run: |
|
||||
cargo install cross
|
||||
echo "CARGO=cross" >> $GITHUB_ENV
|
||||
echo "TARGET_FLAGS=--target ${{ matrix.target }}" >> $GITHUB_ENV
|
||||
echo "TARGET_DIR=./target/${{ matrix.target }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Show command used for Cargo
|
||||
run: |
|
||||
echo "cargo command is: ${{ env.CARGO }}"
|
||||
echo "target flag is: ${{ env.TARGET_FLAGS }}"
|
||||
echo "target dir is: ${{ env.TARGET_DIR }}"
|
||||
|
||||
- name: Build release binary
|
||||
run: ${{ env.CARGO }} build --verbose --release --features pcre2 ${{ env.TARGET_FLAGS }}
|
||||
|
||||
- name: Strip release binary (linux and macos)
|
||||
if: matrix.build == 'linux' || matrix.build == 'macos'
|
||||
run: strip "target/${{ matrix.target }}/release/rg"
|
||||
|
||||
- name: Strip release binary (arm)
|
||||
if: matrix.build == 'linux-arm'
|
||||
run: |
|
||||
docker run --rm -v \
|
||||
"$PWD/target:/target:Z" \
|
||||
rustembedded/cross:arm-unknown-linux-gnueabihf \
|
||||
arm-linux-gnueabihf-strip \
|
||||
/target/arm-unknown-linux-gnueabihf/release/rg
|
||||
|
||||
- name: Build archive
|
||||
shell: bash
|
||||
run: |
|
||||
outdir="$(ci/cargo-out-dir "${{ env.TARGET_DIR }}")"
|
||||
staging="ripgrep-${{ needs.create-release.outputs.rg_version }}-${{ matrix.target }}"
|
||||
mkdir -p "$staging"/{complete,doc}
|
||||
|
||||
cp {README.md,COPYING,UNLICENSE,LICENSE-MIT} "$staging/"
|
||||
cp {CHANGELOG.md,FAQ.md,GUIDE.md} "$staging/doc/"
|
||||
cp "$outdir"/{rg.bash,rg.fish,_rg.ps1} "$staging/complete/"
|
||||
cp complete/_rg "$staging/complete/"
|
||||
|
||||
if [ "${{ matrix.os }}" = "windows-2022" ]; then
|
||||
cp "target/${{ matrix.target }}/release/rg.exe" "$staging/"
|
||||
7z a "$staging.zip" "$staging"
|
||||
echo "ASSET=$staging.zip" >> $GITHUB_ENV
|
||||
else
|
||||
# The man page is only generated on Unix systems. ¯\_(ツ)_/¯
|
||||
cp "$outdir"/rg.1 "$staging/doc/"
|
||||
cp "target/${{ matrix.target }}/release/rg" "$staging/"
|
||||
tar czf "$staging.tar.gz" "$staging"
|
||||
echo "ASSET=$staging.tar.gz" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Upload release archive
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ needs.create-release.outputs.upload_url }}
|
||||
asset_path: ${{ env.ASSET }}
|
||||
asset_name: ${{ env.ASSET }}
|
||||
asset_content_type: application/octet-stream
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -15,3 +15,7 @@ parts
|
||||
*.snap
|
||||
*.pyc
|
||||
ripgrep*_source.tar.bz2
|
||||
|
||||
# Cargo timings
|
||||
cargo-timing-*.html
|
||||
cargo-timing.html
|
||||
|
||||
93
.travis.yml
93
.travis.yml
@@ -1,93 +0,0 @@
|
||||
language: rust
|
||||
env:
|
||||
global:
|
||||
- PROJECT_NAME: ripgrep
|
||||
- RUST_BACKTRACE: full
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
# For generating man page.
|
||||
- libxslt1-dev
|
||||
- asciidoc
|
||||
- docbook-xsl
|
||||
- xsltproc
|
||||
- libxml2-utils
|
||||
# Needed for completion-function test.
|
||||
- zsh
|
||||
# Needed for testing decompression search.
|
||||
- xz-utils
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
# Nightly channel.
|
||||
# All *nix releases are done on the nightly channel to take advantage
|
||||
# of the regex library's multiple pattern SIMD search.
|
||||
- os: linux
|
||||
rust: nightly
|
||||
env: TARGET=i686-unknown-linux-musl
|
||||
- os: linux
|
||||
rust: nightly
|
||||
env: TARGET=x86_64-unknown-linux-musl
|
||||
- os: osx
|
||||
rust: nightly
|
||||
env: TARGET=x86_64-apple-darwin
|
||||
- os: linux
|
||||
rust: nightly
|
||||
env: TARGET=arm-unknown-linux-gnueabihf GCC_VERSION=4.8
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- gcc-4.8-arm-linux-gnueabihf
|
||||
- binutils-arm-linux-gnueabihf
|
||||
- libc6-armhf-cross
|
||||
- libc6-dev-armhf-cross
|
||||
# Beta channel. We enable these to make sure there are no regressions in
|
||||
# Rust beta releases.
|
||||
- os: linux
|
||||
rust: beta
|
||||
env: TARGET=x86_64-unknown-linux-musl
|
||||
- os: linux
|
||||
rust: beta
|
||||
env: TARGET=x86_64-unknown-linux-gnu
|
||||
# Minimum Rust supported channel. We enable these to make sure ripgrep
|
||||
# continues to work on the advertised minimum Rust version.
|
||||
- os: linux
|
||||
rust: 1.20.0
|
||||
env: TARGET=x86_64-unknown-linux-gnu
|
||||
- os: linux
|
||||
rust: 1.20.0
|
||||
env: TARGET=x86_64-unknown-linux-musl
|
||||
- os: linux
|
||||
rust: 1.20.0
|
||||
env: TARGET=arm-unknown-linux-gnueabihf GCC_VERSION=4.8
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- gcc-4.8-arm-linux-gnueabihf
|
||||
- binutils-arm-linux-gnueabihf
|
||||
- libc6-armhf-cross
|
||||
- libc6-dev-armhf-cross
|
||||
install: ci/install.sh
|
||||
script: ci/script.sh
|
||||
before_deploy: ci/before_deploy.sh
|
||||
deploy:
|
||||
provider: releases
|
||||
file_glob: true
|
||||
file: deployment/${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}.tar.gz
|
||||
skip_cleanup: true
|
||||
on:
|
||||
condition: $TRAVIS_RUST_VERSION = nightly
|
||||
branch: master
|
||||
tags: true
|
||||
api_key:
|
||||
secure: "IbSnsbGkxSydR/sozOf1/SRvHplzwRUHzcTjM7BKnr7GccL86gRPUrsrvD103KjQUGWIc1TnK1YTq5M0Onswg/ORDjqa1JEJPkPdPnVh9ipbF7M2De/7IlB4X4qXLKoApn8+bx2x/mfYXu4G+G1/2QdbaKK2yfXZKyjz0YFx+6CNrVCT2Nk8q7aHvOOzAL58vsG8iPDpupuhxlMDDn/UhyOWVInmPPQ0iJR1ZUJN8xJwXvKvBbfp3AhaBiAzkhXHNLgBR8QC5noWWMXnuVDMY3k4f3ic0V+p/qGUCN/nhptuceLxKFicMCYObSZeUzE5RAI0/OBW7l3z2iCoc+TbAnn+JrX/ObJCfzgAOXAU3tLaBFMiqQPGFKjKg1ltSYXomOFP/F7zALjpvFp4lYTBajRR+O3dqaxA9UQuRjw27vOeUpMcga4ZzL4VXFHzrxZKBHN//XIGjYAVhJ1NSSeGpeJV5/+jYzzWKfwSagRxQyVCzMooYFFXzn8Yxdm3PJlmp3GaAogNkdB9qKcrEvRINCelalzALPi0hD/HUDi8DD2PNTCLLMo6VSYtvc685Zbe+KgNzDV1YyTrRCUW6JotrS0r2ULLwnsh40hSB//nNv3XmwNmC/CmW5QAnIGj8cBMF4S2t6ohADIndojdAfNiptmaZOIT6owK7bWMgPMyopo="
|
||||
branches:
|
||||
only:
|
||||
# Pushes and PR to the master branch
|
||||
- master
|
||||
# Ruby regex to match tags. Required, or travis won't trigger deploys when
|
||||
# a new tag is pushed.
|
||||
- /^\d+\.\d+\.\d+.*$/
|
||||
notifications:
|
||||
email:
|
||||
on_success: never
|
||||
850
CHANGELOG.md
850
CHANGELOG.md
@@ -1,3 +1,851 @@
|
||||
TBD
|
||||
===
|
||||
Unreleased changes. Release notes have not yet been written.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1891](https://github.com/BurntSushi/ripgrep/issues/1891):
|
||||
Fix bug when using `-w` with a regex that can match the empty string.
|
||||
* [BUG #1911](https://github.com/BurntSushi/ripgrep/issues/1911):
|
||||
Disable mmap searching in all non-64-bit environments.
|
||||
* [BUG #2236](https://github.com/BurntSushi/ripgrep/issues/2236):
|
||||
Fix gitignore parsing bug where a trailing `\/` resulted in an error.
|
||||
|
||||
|
||||
13.0.0 (2021-06-12)
|
||||
===================
|
||||
ripgrep 13 is a new major version release of ripgrep that primarily contains
|
||||
bug fixes, some performance improvements and a few minor breaking changes.
|
||||
There is also a fix for a security vulnerability on Windows
|
||||
([CVE-2021-3013](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3013)).
|
||||
|
||||
Some highlights:
|
||||
|
||||
A new short flag, `-.`, has been added. It is an alias for the `--hidden` flag,
|
||||
which instructs ripgrep to search hidden files and directories.
|
||||
|
||||
ripgrep is now using a new
|
||||
[vectorized implementation of `memmem`](https://github.com/BurntSushi/memchr/pull/82),
|
||||
which accelerates many common searches. If you notice any performance
|
||||
regressions (or major improvements), I'd love to hear about them through an
|
||||
issue report!
|
||||
|
||||
Also, for Windows users targeting MSVC, Cargo will now build fully static
|
||||
executables of ripgrep. The release binaries for ripgrep 13 have been compiled
|
||||
using this configuration.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
**Binary detection output has changed slightly.**
|
||||
|
||||
In this release, a small tweak has been made to the output format when a binary
|
||||
file is detected. Previously, it looked like this:
|
||||
|
||||
```
|
||||
Binary file FOO matches (found "\0" byte around offset XXX)
|
||||
```
|
||||
|
||||
Now it looks like this:
|
||||
|
||||
```
|
||||
FOO: binary file matches (found "\0" byte around offset XXX)
|
||||
```
|
||||
|
||||
**vimgrep output in multi-line now only prints the first line for each match.**
|
||||
|
||||
See [issue 1866](https://github.com/BurntSushi/ripgrep/issues/1866) for more
|
||||
discussion on this. Previously, every line in a match was duplicated, even
|
||||
when it spanned multiple lines. There are no changes to vimgrep output when
|
||||
multi-line mode is disabled.
|
||||
|
||||
**In multi-line mode, --count is now equivalent to --count-matches.**
|
||||
|
||||
This appears to match how `pcre2grep` implements `--count`. Previously, ripgrep
|
||||
would produce outright incorrect counts. Another alternative would be to simply
|
||||
count the number of lines---even if it's more than the number of matches---but
|
||||
that seems highly unintuitive.
|
||||
|
||||
**FULL LIST OF FIXES AND IMPROVEMENTS:**
|
||||
|
||||
Security fixes:
|
||||
|
||||
* [CVE-2021-3013](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3013):
|
||||
Fixes a security hole on Windows where running ripgrep with either the
|
||||
`-z/--search-zip` or `--pre` flags can result in running arbitrary
|
||||
executables from the current directory.
|
||||
* [VULN #1773](https://github.com/BurntSushi/ripgrep/issues/1773):
|
||||
This is the public facing issue tracking CVE-2021-3013. ripgrep's README
|
||||
now contains a section describing how to report a vulnerability.
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* [PERF #1657](https://github.com/BurntSushi/ripgrep/discussions/1657):
|
||||
Check if a file should be ignored first before issuing stat calls.
|
||||
* [PERF memchr#82](https://github.com/BurntSushi/memchr/pull/82):
|
||||
ripgrep now uses a new vectorized implementation of `memmem`.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for ASP, Bazel, dvc, FlatBuffers,
|
||||
Futhark, minified files, Mint, pofiles (from GNU gettext) Racket, Red, Ruby,
|
||||
VCL, Yang.
|
||||
* [FEATURE #1404](https://github.com/BurntSushi/ripgrep/pull/1404):
|
||||
ripgrep now prints a warning if nothing is searched.
|
||||
* [FEATURE #1613](https://github.com/BurntSushi/ripgrep/pull/1613):
|
||||
Cargo will now produce static executables on Windows when using MSVC.
|
||||
* [FEATURE #1680](https://github.com/BurntSushi/ripgrep/pull/1680):
|
||||
Add `-.` as a short flag alias for `--hidden`.
|
||||
* [FEATURE #1842](https://github.com/BurntSushi/ripgrep/issues/1842):
|
||||
Add `--field-{context,match}-separator` for customizing field delimiters.
|
||||
* [FEATURE #1856](https://github.com/BurntSushi/ripgrep/pull/1856):
|
||||
The README now links to a
|
||||
[Spanish translation](https://github.com/UltiRequiem/traducciones/tree/master/ripgrep).
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1277](https://github.com/BurntSushi/ripgrep/issues/1277):
|
||||
Document cygwin path translation behavior in the FAQ.
|
||||
* [BUG #1739](https://github.com/BurntSushi/ripgrep/issues/1739):
|
||||
Fix bug where replacements were buggy if the regex matched a line terminator.
|
||||
* [BUG #1311](https://github.com/BurntSushi/ripgrep/issues/1311):
|
||||
Fix multi-line bug where a search & replace for `\n` didn't work as expected.
|
||||
* [BUG #1401](https://github.com/BurntSushi/ripgrep/issues/1401):
|
||||
Fix buggy interaction between PCRE2 look-around and `-o/--only-matching`.
|
||||
* [BUG #1412](https://github.com/BurntSushi/ripgrep/issues/1412):
|
||||
Fix multi-line bug with searches using look-around past matching lines.
|
||||
* [BUG #1577](https://github.com/BurntSushi/ripgrep/issues/1577):
|
||||
Fish shell completions will continue to be auto-generated.
|
||||
* [BUG #1642](https://github.com/BurntSushi/ripgrep/issues/1642):
|
||||
Fixes a bug where using `-m` and `-A` printed more matches than the limit.
|
||||
* [BUG #1703](https://github.com/BurntSushi/ripgrep/issues/1703):
|
||||
Clarify the function of `-u/--unrestricted`.
|
||||
* [BUG #1708](https://github.com/BurntSushi/ripgrep/issues/1708):
|
||||
Clarify how `-S/--smart-case` works.
|
||||
* [BUG #1730](https://github.com/BurntSushi/ripgrep/issues/1730):
|
||||
Clarify that CLI invocation must always be valid, regardless of config file.
|
||||
* [BUG #1741](https://github.com/BurntSushi/ripgrep/issues/1741):
|
||||
Fix stdin detection when using PowerShell in UNIX environments.
|
||||
* [BUG #1756](https://github.com/BurntSushi/ripgrep/pull/1756):
|
||||
Fix bug where `foo/**` would match `foo`, but it shouldn't.
|
||||
* [BUG #1765](https://github.com/BurntSushi/ripgrep/issues/1765):
|
||||
Fix panic when `--crlf` is used in some cases.
|
||||
* [BUG #1638](https://github.com/BurntSushi/ripgrep/issues/1638):
|
||||
Correctly sniff UTF-8 and do transcoding, like we do for UTF-16.
|
||||
* [BUG #1816](https://github.com/BurntSushi/ripgrep/issues/1816):
|
||||
Add documentation for glob alternate syntax, e.g., `{a,b,..}`.
|
||||
* [BUG #1847](https://github.com/BurntSushi/ripgrep/issues/1847):
|
||||
Clarify how the `--hidden` flag works.
|
||||
* [BUG #1866](https://github.com/BurntSushi/ripgrep/issues/1866#issuecomment-841635553):
|
||||
Fix bug when computing column numbers in `--vimgrep` mode.
|
||||
* [BUG #1868](https://github.com/BurntSushi/ripgrep/issues/1868):
|
||||
Fix bug where `--passthru` and `-A/-B/-C` did not override each other.
|
||||
* [BUG #1869](https://github.com/BurntSushi/ripgrep/pull/1869):
|
||||
Clarify docs for `--files-with-matches` and `--files-without-match`.
|
||||
* [BUG #1878](https://github.com/BurntSushi/ripgrep/issues/1878):
|
||||
Fix bug where `\A` could produce unanchored matches in multiline search.
|
||||
* [BUG 94e4b8e3](https://github.com/BurntSushi/ripgrep/commit/94e4b8e3):
|
||||
Fix column numbers with `--vimgrep` is used with `-U/--multiline`.
|
||||
|
||||
|
||||
12.1.1 (2020-05-29)
|
||||
===================
|
||||
ripgrep 12.1.1 is a patch release that fixes a couple small bugs. In
|
||||
particular, the ripgrep 12.1.0 release did not tag new releases for all of its
|
||||
in-tree dependencies. As a result, ripgrep built dependencies from crates.io
|
||||
would produce a different build than compiling ripgrep from source on the
|
||||
`12.1.0` tag. Namely, some crates like `grep-cli` had unreleased changes.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1581](https://github.com/BurntSushi/ripgrep/issues/1581):
|
||||
Corrects some egregious markup output in `--help`.
|
||||
* [BUG #1591](https://github.com/BurntSushi/ripgrep/issues/1591):
|
||||
Mention the special `$0` capture group in docs for the `-r/--replace` flag.
|
||||
* [BUG #1602](https://github.com/BurntSushi/ripgrep/issues/1602):
|
||||
Fix failing test resulting from out-of-sync dependencies.
|
||||
|
||||
|
||||
12.1.0 (2020-05-09)
|
||||
===================
|
||||
ripgrep 12.1.0 is a small minor version release that mostly includes bug fixes
|
||||
and documentation improvements. This release also contains some important
|
||||
notices for downstream packagers.
|
||||
|
||||
**Notices for downstream ripgrep package maintainers:**
|
||||
|
||||
* Fish shell completions will be removed in the ripgrep 13 release.
|
||||
See [#1577](https://github.com/BurntSushi/ripgrep/issues/1577)
|
||||
for more details.
|
||||
* ripgrep has switched from `a2x` to `asciidoctor` to generate the man page.
|
||||
If `asciidoctor` is not present, then ripgrep will currently fall back to
|
||||
`a2x`. Support for `a2x` will be dropped in the ripgrep 13 release.
|
||||
See [#1544](https://github.com/BurntSushi/ripgrep/issues/1544)
|
||||
for more details.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* [FEATURE #1547](https://github.com/BurntSushi/ripgrep/pull/1547):
|
||||
Support decompressing `.Z` files via `uncompress`.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1252](https://github.com/BurntSushi/ripgrep/issues/1252):
|
||||
Add a section on the `--pre` flag to the GUIDE.
|
||||
* [BUG #1339](https://github.com/BurntSushi/ripgrep/issues/1339):
|
||||
Improve error message when a pattern with invalid UTF-8 is provided.
|
||||
* [BUG #1524](https://github.com/BurntSushi/ripgrep/issues/1524):
|
||||
Note how to escape a `$` when using `--replace`.
|
||||
* [BUG #1537](https://github.com/BurntSushi/ripgrep/issues/1537):
|
||||
Fix match bug caused by inner literal optimization.
|
||||
* [BUG #1544](https://github.com/BurntSushi/ripgrep/issues/1544):
|
||||
ripgrep now uses `asciidoctor` instead of `a2x` to generate its man page.
|
||||
* [BUG #1550](https://github.com/BurntSushi/ripgrep/issues/1550):
|
||||
Substantially reduce peak memory usage when searching wide directories.
|
||||
* [BUG #1571](https://github.com/BurntSushi/ripgrep/issues/1571):
|
||||
Add note about configuration files in `--type-{add,clear}` docs.
|
||||
* [BUG #1573](https://github.com/BurntSushi/ripgrep/issues/1573):
|
||||
Fix incorrect `--count-matches` output when using look-around.
|
||||
|
||||
|
||||
12.0.1 (2020-03-29)
|
||||
===================
|
||||
ripgrep 12.0.1 is a small patch release that includes a minor bug fix relating
|
||||
to superfluous error messages when searching git repositories with sub-modules.
|
||||
This was a regression introduced in the 12.0.0 release.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1520](https://github.com/BurntSushi/ripgrep/issues/1520):
|
||||
Don't emit spurious error messages in git repositories with submodules.
|
||||
|
||||
|
||||
12.0.0 (2020-03-15)
|
||||
===================
|
||||
ripgrep 12 is a new major version release of ripgrep that contains many bug
|
||||
fixes, several important performance improvements and a few minor new features.
|
||||
|
||||
In a near future release, I am hoping to add an
|
||||
[indexing feature](https://github.com/BurntSushi/ripgrep/issues/1497)
|
||||
to ripgrep, which will dramatically speed up searching by building an index.
|
||||
Feedback would very much be appreciated, especially on the user experience
|
||||
which will be difficult to get right.
|
||||
|
||||
This release has no known breaking changes.
|
||||
|
||||
Deprecations:
|
||||
|
||||
* The `--no-pcre2-unicode` flag is deprecated. Instead, use the `--no-unicode`
|
||||
flag, which applies to both the default regex engine and PCRE2. For now,
|
||||
`--no-pcre2-unicode` and `--pcre2-unicode` are aliases to `--no-unicode`
|
||||
and `--unicode`, respectively. The `--[no-]pcre2-unicode` flags may be
|
||||
removed in a future release.
|
||||
* The `--auto-hybrid-regex` flag is deprecated. Instead, use the new `--engine`
|
||||
flag with the `auto` value.
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* [PERF #1087](https://github.com/BurntSushi/ripgrep/pull/1087):
|
||||
ripgrep is smarter when detected literals are whitespace.
|
||||
* [PERF #1381](https://github.com/BurntSushi/ripgrep/pull/1381):
|
||||
Directory traversal is sped up with speculative ignore-file existence checks.
|
||||
* [PERF cd8ec38a](https://github.com/BurntSushi/ripgrep/commit/cd8ec38a):
|
||||
Improve inner literal detection to cover more cases more effectively.
|
||||
e.g., ` +Sherlock Holmes +` now has ` Sherlock Holmes ` extracted instead
|
||||
of ` `.
|
||||
* [PERF 6a0e0147](https://github.com/BurntSushi/ripgrep/commit/6a0e0147):
|
||||
Improve literal detection when the `-w/--word-regexp` flag is used.
|
||||
* [PERF ad97e9c9](https://github.com/BurntSushi/ripgrep/commit/ad97e9c9):
|
||||
Improve overall performance of the `-w/--word-regexp` flag.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for erb, diff, Gradle, HAML, Org,
|
||||
Postscript, Skim, Slim, Slime, RPM Spec files, Typoscript, xml.
|
||||
* [FEATURE #1370](https://github.com/BurntSushi/ripgrep/pull/1370):
|
||||
Add `--include-zero` flag that shows files searched without matches.
|
||||
* [FEATURE #1390](https://github.com/BurntSushi/ripgrep/pull/1390):
|
||||
Add `--no-context-separator` flag that always hides context separators.
|
||||
* [FEATURE #1414](https://github.com/BurntSushi/ripgrep/pull/1414):
|
||||
Add `--no-require-git` flag to allow ripgrep to respect gitignores anywhere.
|
||||
* [FEATURE #1420](https://github.com/BurntSushi/ripgrep/pull/1420):
|
||||
Add `--no-ignore-exclude` to disregard rules in `.git/info/exclude` files.
|
||||
* [FEATURE #1466](https://github.com/BurntSushi/ripgrep/pull/1466):
|
||||
Add `--no-ignore-files` flag to disable all `--ignore-file` flags.
|
||||
* [FEATURE #1488](https://github.com/BurntSushi/ripgrep/pull/1488):
|
||||
Add '--engine' flag for easier switching between regex engines.
|
||||
* [FEATURE 75cbe88f](https://github.com/BurntSushi/ripgrep/commit/75cbe88f):
|
||||
Add `--no-unicode` flag. This works on all supported regex engines.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1291](https://github.com/BurntSushi/ripgrep/issues/1291):
|
||||
ripgrep now works in non-existent directories.
|
||||
* [BUG #1319](https://github.com/BurntSushi/ripgrep/issues/1319):
|
||||
Fix match bug due to errant literal detection.
|
||||
* [**BUG #1335**](https://github.com/BurntSushi/ripgrep/issues/1335):
|
||||
Fixes a performance bug when searching plain text files with very long lines.
|
||||
This was a serious performance regression in some cases.
|
||||
* [BUG #1344](https://github.com/BurntSushi/ripgrep/issues/1344):
|
||||
Document usage of `--type all`.
|
||||
* [BUG #1389](https://github.com/BurntSushi/ripgrep/issues/1389):
|
||||
Fixes a bug where ripgrep would panic when searching a symlinked directory.
|
||||
* [BUG #1439](https://github.com/BurntSushi/ripgrep/issues/1439):
|
||||
Improve documentation for ripgrep's automatic stdin detection.
|
||||
* [BUG #1441](https://github.com/BurntSushi/ripgrep/issues/1441):
|
||||
Remove CPU features from man page.
|
||||
* [BUG #1442](https://github.com/BurntSushi/ripgrep/issues/1442),
|
||||
[BUG #1478](https://github.com/BurntSushi/ripgrep/issues/1478):
|
||||
Improve documentation of the `-g/--glob` flag.
|
||||
* [BUG #1445](https://github.com/BurntSushi/ripgrep/issues/1445):
|
||||
ripgrep now respects ignore rules from .git/info/exclude in worktrees.
|
||||
* [BUG #1485](https://github.com/BurntSushi/ripgrep/issues/1485):
|
||||
Fish shell completions from the release Debian package are now installed to
|
||||
`/usr/share/fish/vendor_completions.d/rg.fish`.
|
||||
|
||||
|
||||
11.0.2 (2019-08-01)
|
||||
===================
|
||||
ripgrep 11.0.2 is a new patch release that fixes a few bugs, including a
|
||||
performance regression and a matching bug when using the `-F/--fixed-strings`
|
||||
flag.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* [FEATURE #1293](https://github.com/BurntSushi/ripgrep/issues/1293):
|
||||
Added `--glob-case-insensitive` flag that makes `--glob` behave as `--iglob`.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1246](https://github.com/BurntSushi/ripgrep/issues/1246):
|
||||
Add translations to README, starting with an unofficial Chinese translation.
|
||||
* [BUG #1259](https://github.com/BurntSushi/ripgrep/issues/1259):
|
||||
Fix bug where the last byte of a `-f file` was stripped if it wasn't a `\n`.
|
||||
* [BUG #1261](https://github.com/BurntSushi/ripgrep/issues/1261):
|
||||
Document that no error is reported when searching for `\n` with `-P/--pcre2`.
|
||||
* [BUG #1284](https://github.com/BurntSushi/ripgrep/issues/1284):
|
||||
Mention `.ignore` and `.rgignore` more prominently in the README.
|
||||
* [BUG #1292](https://github.com/BurntSushi/ripgrep/issues/1292):
|
||||
Fix bug where `--with-filename` was sometimes enabled incorrectly.
|
||||
* [BUG #1268](https://github.com/BurntSushi/ripgrep/issues/1268):
|
||||
Fix major performance regression in GitHub `x86_64-linux` binary release.
|
||||
* [BUG #1302](https://github.com/BurntSushi/ripgrep/issues/1302):
|
||||
Show better error messages when a non-existent preprocessor command is given.
|
||||
* [BUG #1334](https://github.com/BurntSushi/ripgrep/issues/1334):
|
||||
Fix match regression with `-F` flag when patterns contain meta characters.
|
||||
|
||||
|
||||
11.0.1 (2019-04-16)
|
||||
===================
|
||||
ripgrep 11.0.1 is a new patch release that fixes a search regression introduced
|
||||
in the previous 11.0.0 release. In particular, ripgrep can enter an infinite
|
||||
loop for some search patterns when searching invalid UTF-8.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #1247](https://github.com/BurntSushi/ripgrep/issues/1247):
|
||||
Fix search bug that can cause ripgrep to enter an infinite loop.
|
||||
|
||||
|
||||
11.0.0 (2019-04-15)
|
||||
===================
|
||||
ripgrep 11 is a new major version release of ripgrep that contains many bug
|
||||
fixes, some performance improvements and a few feature enhancements. Notably,
|
||||
ripgrep's user experience for binary file filtering has been improved. See the
|
||||
[guide's new section on binary data](GUIDE.md#binary-data) for more details.
|
||||
|
||||
This release also marks a change in ripgrep's versioning. Where as the previous
|
||||
version was `0.10.0`, this version is `11.0.0`. Moving forward, ripgrep's
|
||||
major version will be increased a few times per year. ripgrep will continue to
|
||||
be conservative with respect to backwards compatibility, but may occasionally
|
||||
introduce breaking changes, which will always be documented in this CHANGELOG.
|
||||
See [issue 1172](https://github.com/BurntSushi/ripgrep/issues/1172) for a bit
|
||||
more detail on why this versioning change was made.
|
||||
|
||||
This release increases the **minimum supported Rust version** from 1.28.0 to
|
||||
1.34.0.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
* ripgrep has tweaked its exit status codes to be more like GNU grep's. Namely,
|
||||
if a non-fatal error occurs during a search, then ripgrep will now always
|
||||
emit a `2` exit status code, regardless of whether a match is found or not.
|
||||
Previously, ripgrep would only emit a `2` exit status code for a catastrophic
|
||||
error (e.g., regex syntax error). One exception to this is if ripgrep is run
|
||||
with `-q/--quiet`. In that case, if an error occurs and a match is found,
|
||||
then ripgrep will exit with a `0` exit status code.
|
||||
* Supplying the `-u/--unrestricted` flag three times is now equivalent to
|
||||
supplying `--no-ignore --hidden --binary`. Previously, `-uuu` was equivalent
|
||||
to `--no-ignore --hidden --text`. The difference is that `--binary` disables
|
||||
binary file filtering without potentially dumping binary data into your
|
||||
terminal. That is, `rg -uuu foo` should now be equivalent to `grep -r foo`.
|
||||
* The `avx-accel` feature of ripgrep has been removed since it is no longer
|
||||
necessary. All uses of AVX in ripgrep are now enabled automatically via
|
||||
runtime CPU feature detection. The `simd-accel` feature does remain available
|
||||
(only for enabling SIMD for transcoding), however, it does increase
|
||||
compilation times substantially at the moment.
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* [PERF #497](https://github.com/BurntSushi/ripgrep/issues/497),
|
||||
[PERF #838](https://github.com/BurntSushi/ripgrep/issues/838):
|
||||
Make `rg -F -f dictionary-of-literals` much faster.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for Apache Thrift, ASP, Bazel, Brotli,
|
||||
BuildStream, bzip2, C, C++, Cython, gzip, Java, Make, Postscript, QML, Tex,
|
||||
XML, xz, zig and zstd.
|
||||
* [FEATURE #855](https://github.com/BurntSushi/ripgrep/issues/855):
|
||||
Add `--binary` flag for disabling binary file filtering.
|
||||
* [FEATURE #1078](https://github.com/BurntSushi/ripgrep/pull/1078):
|
||||
Add `--max-columns-preview` flag for showing a preview of long lines.
|
||||
* [FEATURE #1099](https://github.com/BurntSushi/ripgrep/pull/1099):
|
||||
Add support for Brotli and Zstd to the `-z/--search-zip` flag.
|
||||
* [FEATURE #1138](https://github.com/BurntSushi/ripgrep/pull/1138):
|
||||
Add `--no-ignore-dot` flag for ignoring `.ignore` files.
|
||||
* [FEATURE #1155](https://github.com/BurntSushi/ripgrep/pull/1155):
|
||||
Add `--auto-hybrid-regex` flag for automatically falling back to PCRE2.
|
||||
* [FEATURE #1159](https://github.com/BurntSushi/ripgrep/pull/1159):
|
||||
ripgrep's exit status logic should now match GNU grep. See updated man page.
|
||||
* [FEATURE #1164](https://github.com/BurntSushi/ripgrep/pull/1164):
|
||||
Add `--ignore-file-case-insensitive` for case insensitive ignore globs.
|
||||
* [FEATURE #1185](https://github.com/BurntSushi/ripgrep/pull/1185):
|
||||
Add `-I` flag as a short option for the `--no-filename` flag.
|
||||
* [FEATURE #1207](https://github.com/BurntSushi/ripgrep/pull/1207):
|
||||
Add `none` value to `-E/--encoding` to forcefully disable all transcoding.
|
||||
* [FEATURE da9d7204](https://github.com/BurntSushi/ripgrep/commit/da9d7204):
|
||||
Add `--pcre2-version` for querying showing PCRE2 version information.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #306](https://github.com/BurntSushi/ripgrep/issues/306),
|
||||
[BUG #855](https://github.com/BurntSushi/ripgrep/issues/855):
|
||||
Improve the user experience for ripgrep's binary file filtering.
|
||||
* [BUG #373](https://github.com/BurntSushi/ripgrep/issues/373),
|
||||
[BUG #1098](https://github.com/BurntSushi/ripgrep/issues/1098):
|
||||
`**` is now accepted as valid syntax anywhere in a glob.
|
||||
* [BUG #916](https://github.com/BurntSushi/ripgrep/issues/916):
|
||||
ripgrep no longer hangs when searching `/proc` with a zombie process present.
|
||||
* [BUG #1052](https://github.com/BurntSushi/ripgrep/issues/1052):
|
||||
Fix bug where ripgrep could panic when transcoding UTF-16 files.
|
||||
* [BUG #1055](https://github.com/BurntSushi/ripgrep/issues/1055):
|
||||
Suggest `-U/--multiline` when a pattern contains a `\n`.
|
||||
* [BUG #1063](https://github.com/BurntSushi/ripgrep/issues/1063):
|
||||
Always strip a BOM if it's present, even for UTF-8.
|
||||
* [BUG #1064](https://github.com/BurntSushi/ripgrep/issues/1064):
|
||||
Fix inner literal detection that could lead to incorrect matches.
|
||||
* [BUG #1079](https://github.com/BurntSushi/ripgrep/issues/1079):
|
||||
Fixes a bug where the order of globs could result in missing a match.
|
||||
* [BUG #1089](https://github.com/BurntSushi/ripgrep/issues/1089):
|
||||
Fix another bug where ripgrep could panic when transcoding UTF-16 files.
|
||||
* [BUG #1091](https://github.com/BurntSushi/ripgrep/issues/1091):
|
||||
Add note about inverted flags to the man page.
|
||||
* [BUG #1093](https://github.com/BurntSushi/ripgrep/pull/1093):
|
||||
Fix handling of literal slashes in gitignore patterns.
|
||||
* [BUG #1095](https://github.com/BurntSushi/ripgrep/issues/1095):
|
||||
Fix corner cases involving the `--crlf` flag.
|
||||
* [BUG #1101](https://github.com/BurntSushi/ripgrep/issues/1101):
|
||||
Fix AsciiDoc escaping for man page output.
|
||||
* [BUG #1103](https://github.com/BurntSushi/ripgrep/issues/1103):
|
||||
Clarify what `--encoding auto` does.
|
||||
* [BUG #1106](https://github.com/BurntSushi/ripgrep/issues/1106):
|
||||
`--files-with-matches` and `--files-without-match` work with one file.
|
||||
* [BUG #1121](https://github.com/BurntSushi/ripgrep/issues/1121):
|
||||
Fix bug that was triggering Windows antimalware when using the `--files`
|
||||
flag.
|
||||
* [BUG #1125](https://github.com/BurntSushi/ripgrep/issues/1125),
|
||||
[BUG #1159](https://github.com/BurntSushi/ripgrep/issues/1159):
|
||||
ripgrep shouldn't panic for `rg -h | rg` and should emit correct exit status.
|
||||
* [BUG #1144](https://github.com/BurntSushi/ripgrep/issues/1144):
|
||||
Fixes a bug where line numbers could be wrong on big-endian machines.
|
||||
* [BUG #1154](https://github.com/BurntSushi/ripgrep/issues/1154):
|
||||
Windows files with "hidden" attribute are now treated as hidden.
|
||||
* [BUG #1173](https://github.com/BurntSushi/ripgrep/issues/1173):
|
||||
Fix handling of `**` patterns in gitignore files.
|
||||
* [BUG #1174](https://github.com/BurntSushi/ripgrep/issues/1174):
|
||||
Fix handling of repeated `**` patterns in gitignore files.
|
||||
* [BUG #1176](https://github.com/BurntSushi/ripgrep/issues/1176):
|
||||
Fix bug where `-F`/`-x` weren't applied to patterns given via `-f`.
|
||||
* [BUG #1189](https://github.com/BurntSushi/ripgrep/issues/1189):
|
||||
Document cases where ripgrep may use a lot of memory.
|
||||
* [BUG #1203](https://github.com/BurntSushi/ripgrep/issues/1203):
|
||||
Fix a matching bug related to the suffix literal optimization.
|
||||
* [BUG 8f14cb18](https://github.com/BurntSushi/ripgrep/commit/8f14cb18):
|
||||
Increase the default stack size for PCRE2's JIT.
|
||||
|
||||
|
||||
0.10.0 (2018-09-07)
|
||||
===================
|
||||
This is a new minor version release of ripgrep that contains some major new
|
||||
features, a huge number of bug fixes, and is the first release based on
|
||||
libripgrep. The entirety of ripgrep's core search and printing code has been
|
||||
rewritten and generalized so that anyone can make use of it.
|
||||
|
||||
Major new features include PCRE2 support, multi-line search and a JSON output
|
||||
format.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
* The minimum version required to compile Rust has now changed to track the
|
||||
latest stable version of Rust. Patch releases will continue to compile with
|
||||
the same version of Rust as the previous patch release, but new minor
|
||||
versions will use the current stable version of the Rust compile as its
|
||||
minimum supported version.
|
||||
* The match semantics of `-w/--word-regexp` have changed slightly. They used
|
||||
to be `\b(?:<your pattern>)\b`, but now it's
|
||||
`(?:^|\W)(?:<your pattern>)(?:$|\W)`. This matches the behavior of GNU grep
|
||||
and is believed to be closer to the intended semantics of the flag. See
|
||||
[#389](https://github.com/BurntSushi/ripgrep/issues/389) for more details.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* [FEATURE #162](https://github.com/BurntSushi/ripgrep/issues/162):
|
||||
libripgrep is now a thing. The primary crate is
|
||||
[`grep`](https://docs.rs/grep).
|
||||
* [FEATURE #176](https://github.com/BurntSushi/ripgrep/issues/176):
|
||||
Add `-U/--multiline` flag that permits matching over multiple lines.
|
||||
* [FEATURE #188](https://github.com/BurntSushi/ripgrep/issues/188):
|
||||
Add `-P/--pcre2` flag that gives support for look-around and backreferences.
|
||||
* [FEATURE #244](https://github.com/BurntSushi/ripgrep/issues/244):
|
||||
Add `--json` flag that prints results in a JSON Lines format.
|
||||
* [FEATURE #321](https://github.com/BurntSushi/ripgrep/issues/321):
|
||||
Add `--one-file-system` flag to skip directories on different file systems.
|
||||
* [FEATURE #404](https://github.com/BurntSushi/ripgrep/issues/404):
|
||||
Add `--sort` and `--sortr` flag for more sorting. Deprecate `--sort-files`.
|
||||
* [FEATURE #416](https://github.com/BurntSushi/ripgrep/issues/416):
|
||||
Add `--crlf` flag to permit `$` to work with carriage returns on Windows.
|
||||
* [FEATURE #917](https://github.com/BurntSushi/ripgrep/issues/917):
|
||||
The `--trim` flag strips prefix whitespace from all lines printed.
|
||||
* [FEATURE #993](https://github.com/BurntSushi/ripgrep/issues/993):
|
||||
Add `--null-data` flag, which makes ripgrep use NUL as a line terminator.
|
||||
* [FEATURE #997](https://github.com/BurntSushi/ripgrep/issues/997):
|
||||
The `--passthru` flag now works with the `--replace` flag.
|
||||
* [FEATURE #1038-1](https://github.com/BurntSushi/ripgrep/issues/1038):
|
||||
Add `--line-buffered` and `--block-buffered` for forcing a buffer strategy.
|
||||
* [FEATURE #1038-2](https://github.com/BurntSushi/ripgrep/issues/1038):
|
||||
Add `--pre-glob` for filtering files through the `--pre` flag.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #2](https://github.com/BurntSushi/ripgrep/issues/2):
|
||||
Searching with non-zero context can now use memory maps if appropriate.
|
||||
* [BUG #200](https://github.com/BurntSushi/ripgrep/issues/200):
|
||||
ripgrep will now stop correctly when its output pipe is closed.
|
||||
* [BUG #389](https://github.com/BurntSushi/ripgrep/issues/389):
|
||||
The `-w/--word-regexp` flag now works more intuitively.
|
||||
* [BUG #643](https://github.com/BurntSushi/ripgrep/issues/643):
|
||||
Detection of readable stdin has improved on Windows.
|
||||
* [BUG #441](https://github.com/BurntSushi/ripgrep/issues/441),
|
||||
[BUG #690](https://github.com/BurntSushi/ripgrep/issues/690),
|
||||
[BUG #980](https://github.com/BurntSushi/ripgrep/issues/980):
|
||||
Matching empty lines now works correctly in several corner cases.
|
||||
* [BUG #764](https://github.com/BurntSushi/ripgrep/issues/764):
|
||||
Color escape sequences now coalesce, which reduces output size.
|
||||
* [BUG #842](https://github.com/BurntSushi/ripgrep/issues/842):
|
||||
Add man page to binary Debian package.
|
||||
* [BUG #922](https://github.com/BurntSushi/ripgrep/issues/922):
|
||||
ripgrep is now more robust with respect to memory maps failing.
|
||||
* [BUG #937](https://github.com/BurntSushi/ripgrep/issues/937):
|
||||
Color escape sequences are no longer emitted for empty matches.
|
||||
* [BUG #940](https://github.com/BurntSushi/ripgrep/issues/940):
|
||||
Context from the `--passthru` flag should not impact process exit status.
|
||||
* [BUG #984](https://github.com/BurntSushi/ripgrep/issues/984):
|
||||
Fixes bug in `ignore` crate where first path was always treated as a symlink.
|
||||
* [BUG #990](https://github.com/BurntSushi/ripgrep/issues/990):
|
||||
Read stderr asynchronously when running a process.
|
||||
* [BUG #1013](https://github.com/BurntSushi/ripgrep/issues/1013):
|
||||
Add compile time and runtime CPU features to `--version` output.
|
||||
* [BUG #1028](https://github.com/BurntSushi/ripgrep/pull/1028):
|
||||
Don't complete bare pattern after `-f` in zsh.
|
||||
|
||||
|
||||
0.9.0 (2018-08-03)
|
||||
==================
|
||||
This is a new minor version release of ripgrep that contains some minor new
|
||||
features and a panoply of bug fixes.
|
||||
|
||||
Releases provided on Github for `x86_64` will now work on all target CPUs, and
|
||||
will also automatically take advantage of features found on modern CPUs (such
|
||||
as AVX2) for additional optimizations.
|
||||
|
||||
This release increases the **minimum supported Rust version** from 1.20.0 to
|
||||
1.23.0.
|
||||
|
||||
It is anticipated that the next release of ripgrep (0.10.0) will provide
|
||||
multi-line search support and a JSON output format.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
* When `--count` and `--only-matching` are provided simultaneously, the
|
||||
behavior of ripgrep is as if the `--count-matches` flag was given. That is,
|
||||
the total number of matches is reported, where there may be multiple matches
|
||||
per line. Previously, the behavior of ripgrep was to report the total number
|
||||
of matching lines. (Note that this behavior diverges from the behavior of
|
||||
GNU grep.)
|
||||
* Octal syntax is no longer supported. ripgrep previously accepted expressions
|
||||
like `\1` as syntax for matching `U+0001`, but ripgrep will now report an
|
||||
error instead.
|
||||
* The `--line-number-width` flag has been removed. Its functionality was not
|
||||
carefully considered with all ripgrep output formats.
|
||||
See [#795](https://github.com/BurntSushi/ripgrep/issues/795) for more
|
||||
details.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for Android, Bazel, Fuchsia, Haskell,
|
||||
Java and Puppet.
|
||||
* [FEATURE #411](https://github.com/BurntSushi/ripgrep/issues/411):
|
||||
Add a `--stats` flag, which emits aggregate statistics after search results.
|
||||
* [FEATURE #646](https://github.com/BurntSushi/ripgrep/issues/646):
|
||||
Add a `--no-ignore-messages` flag, which suppresses parse errors from reading
|
||||
`.ignore` and `.gitignore` files.
|
||||
* [FEATURE #702](https://github.com/BurntSushi/ripgrep/issues/702):
|
||||
Support `\u{..}` Unicode escape sequences.
|
||||
* [FEATURE #812](https://github.com/BurntSushi/ripgrep/issues/812):
|
||||
Add `-b/--byte-offset` flag that shows the byte offset of each matching line.
|
||||
* [FEATURE #814](https://github.com/BurntSushi/ripgrep/issues/814):
|
||||
Add `--count-matches` flag, which is like `--count`, but for each match.
|
||||
* [FEATURE #880](https://github.com/BurntSushi/ripgrep/issues/880):
|
||||
Add a `--no-column` flag, which disables column numbers in the output.
|
||||
* [FEATURE #898](https://github.com/BurntSushi/ripgrep/issues/898):
|
||||
Add support for `lz4` when using the `-z/--search-zip` flag.
|
||||
* [FEATURE #924](https://github.com/BurntSushi/ripgrep/issues/924):
|
||||
`termcolor` has moved to its own repository:
|
||||
https://github.com/BurntSushi/termcolor
|
||||
* [FEATURE #934](https://github.com/BurntSushi/ripgrep/issues/934):
|
||||
Add a new flag, `--no-ignore-global`, that permits disabling global
|
||||
gitignores.
|
||||
* [FEATURE #967](https://github.com/BurntSushi/ripgrep/issues/967):
|
||||
Rename `--maxdepth` to `--max-depth` for consistency. Keep `--maxdepth` for
|
||||
backwards compatibility.
|
||||
* [FEATURE #978](https://github.com/BurntSushi/ripgrep/issues/978):
|
||||
Add a `--pre` option to filter inputs with an arbitrary program.
|
||||
* [FEATURE fca9709d](https://github.com/BurntSushi/ripgrep/commit/fca9709d):
|
||||
Improve zsh completion.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #135](https://github.com/BurntSushi/ripgrep/issues/135):
|
||||
Release portable binaries that conditionally use SSSE3, AVX2, etc., at
|
||||
runtime.
|
||||
* [BUG #268](https://github.com/BurntSushi/ripgrep/issues/268):
|
||||
Print descriptive error message when trying to use look-around or
|
||||
backreferences.
|
||||
* [BUG #395](https://github.com/BurntSushi/ripgrep/issues/395):
|
||||
Show comprehensible error messages for regexes like `\s*{`.
|
||||
* [BUG #526](https://github.com/BurntSushi/ripgrep/issues/526):
|
||||
Support backslash escapes in globs.
|
||||
* [BUG #795](https://github.com/BurntSushi/ripgrep/issues/795):
|
||||
Fix problems with `--line-number-width` by removing it.
|
||||
* [BUG #832](https://github.com/BurntSushi/ripgrep/issues/832):
|
||||
Clarify usage instructions for `-f/--file` flag.
|
||||
* [BUG #835](https://github.com/BurntSushi/ripgrep/issues/835):
|
||||
Fix small performance regression while crawling very large directory trees.
|
||||
* [BUG #851](https://github.com/BurntSushi/ripgrep/issues/851):
|
||||
Fix `-S/--smart-case` detection once and for all.
|
||||
* [BUG #852](https://github.com/BurntSushi/ripgrep/issues/852):
|
||||
Be robust with respect to `ENOMEM` errors returned by `mmap`.
|
||||
* [BUG #853](https://github.com/BurntSushi/ripgrep/issues/853):
|
||||
Upgrade `grep` crate to `regex-syntax 0.6.0`.
|
||||
* [BUG #893](https://github.com/BurntSushi/ripgrep/issues/893):
|
||||
Improve support for git submodules.
|
||||
* [BUG #900](https://github.com/BurntSushi/ripgrep/issues/900):
|
||||
When no patterns are given, ripgrep should never match anything.
|
||||
* [BUG #907](https://github.com/BurntSushi/ripgrep/issues/907):
|
||||
ripgrep will now stop traversing after the first file when `--quiet --files`
|
||||
is used.
|
||||
* [BUG #918](https://github.com/BurntSushi/ripgrep/issues/918):
|
||||
Don't skip tar archives when `-z/--search-zip` is used.
|
||||
* [BUG #934](https://github.com/BurntSushi/ripgrep/issues/934):
|
||||
Don't respect gitignore files when searching outside git repositories.
|
||||
* [BUG #948](https://github.com/BurntSushi/ripgrep/issues/948):
|
||||
Use exit code 2 to indicate error, and use exit code 1 to indicate no
|
||||
matches.
|
||||
* [BUG #951](https://github.com/BurntSushi/ripgrep/issues/951):
|
||||
Add stdin example to ripgrep usage documentation.
|
||||
* [BUG #955](https://github.com/BurntSushi/ripgrep/issues/955):
|
||||
Use buffered writing when not printing to a tty, which fixes a performance
|
||||
regression.
|
||||
* [BUG #957](https://github.com/BurntSushi/ripgrep/issues/957):
|
||||
Improve the error message shown for `--path separator /` in some Windows
|
||||
shells.
|
||||
* [BUG #964](https://github.com/BurntSushi/ripgrep/issues/964):
|
||||
Add a `--no-fixed-strings` flag to disable `-F/--fixed-strings`.
|
||||
* [BUG #988](https://github.com/BurntSushi/ripgrep/issues/988):
|
||||
Fix a bug in the `ignore` crate that prevented the use of explicit ignore
|
||||
files after disabling all other ignore rules.
|
||||
* [BUG #995](https://github.com/BurntSushi/ripgrep/issues/995):
|
||||
Respect `$XDG_CONFIG_DIR/git/config` for detecting `core.excludesFile`.
|
||||
|
||||
|
||||
0.8.1 (2018-02-20)
|
||||
==================
|
||||
This is a patch release of ripgrep that primarily fixes regressions introduced
|
||||
in 0.8.0 (#820 and #824) in directory traversal on Windows. These regressions
|
||||
do not impact non-Windows users.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for csv and VHDL.
|
||||
* [FEATURE #798](https://github.com/BurntSushi/ripgrep/issues/798):
|
||||
Add `underline` support to `termcolor` and ripgrep. See documentation on the
|
||||
`--colors` flag for details.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #684](https://github.com/BurntSushi/ripgrep/issues/684):
|
||||
Improve documentation for the `--ignore-file` flag.
|
||||
* [BUG #789](https://github.com/BurntSushi/ripgrep/issues/789):
|
||||
Don't show `(rev )` if the revision wasn't available during the build.
|
||||
* [BUG #791](https://github.com/BurntSushi/ripgrep/issues/791):
|
||||
Add man page to ARM release.
|
||||
* [BUG #797](https://github.com/BurntSushi/ripgrep/issues/797):
|
||||
Improve documentation for "intense" setting in `termcolor`.
|
||||
* [BUG #800](https://github.com/BurntSushi/ripgrep/issues/800):
|
||||
Fix a bug in the `ignore` crate for custom ignore files. This had no impact
|
||||
on ripgrep.
|
||||
* [BUG #807](https://github.com/BurntSushi/ripgrep/issues/807):
|
||||
Fix a bug where `rg --hidden .` behaved differently from `rg --hidden ./`.
|
||||
* [BUG #815](https://github.com/BurntSushi/ripgrep/issues/815):
|
||||
Clarify a common failure mode in user guide.
|
||||
* [BUG #820](https://github.com/BurntSushi/ripgrep/issues/820):
|
||||
Fixes a bug on Windows where symlinks were followed even if not requested.
|
||||
* [BUG #824](https://github.com/BurntSushi/ripgrep/issues/824):
|
||||
Fix a performance regression in directory traversal on Windows.
|
||||
|
||||
|
||||
0.8.0 (2018-02-11)
|
||||
==================
|
||||
This is a new minor version release of ripgrep that satisfies several popular
|
||||
feature requests (config files, search compressed files, true colors), fixes
|
||||
many bugs and improves the quality of life for ripgrep maintainers. This
|
||||
release also includes greatly improved documentation in the form of a
|
||||
[User Guide](GUIDE.md) and a [FAQ](FAQ.md).
|
||||
|
||||
This release increases the **minimum supported Rust version** from 1.17 to
|
||||
1.20.
|
||||
|
||||
**BREAKING CHANGES**:
|
||||
|
||||
Note that these are all very minor and unlikely to impact most users.
|
||||
|
||||
* In order to support configuration files, flag overrides needed to be
|
||||
rethought. In some cases, this changed ripgrep's behavior. For example,
|
||||
in ripgrep 0.7.1, `rg foo -s -i` will perform a case sensitive search
|
||||
since the `-s/--case-sensitive` flag was defined to always take precedence
|
||||
over the `-i/--ignore-case` flag, regardless of position. In ripgrep 0.8.0
|
||||
however, the override rule for all flags has changed to "the most recent
|
||||
flag wins among competing flags." That is, `rg foo -s -i` now performs a
|
||||
case insensitive search.
|
||||
* The `-M/--max-columns` flag was tweaked so that specifying a value of `0`
|
||||
now makes ripgrep behave as if the flag was absent. This makes it possible
|
||||
to set a default value in a configuration file and then override it. The
|
||||
previous ripgrep behavior was to suppress all matching non-empty lines.
|
||||
* In all globs, `[^...]` is now equivalent to `[!...]` (indicating class
|
||||
negation). Previously, `^` had no special significance in a character class.
|
||||
* For **downstream packagers**, the directory hierarchy in ripgrep's archive
|
||||
releases has changed. The root directory now only contains the executable,
|
||||
README and license. There is now a new directory called `doc` which contains
|
||||
the man page (previously in the root), a user guide (new), a FAQ (new) and
|
||||
the CHANGELOG (previously not included in release). The `complete`
|
||||
directory remains the same.
|
||||
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for
|
||||
Apache Avro, C++, GN, Google Closure Templates, Jupyter notebooks, man pages,
|
||||
Protocol Buffers, Smarty and Web IDL.
|
||||
* [FEATURE #196](https://github.com/BurntSushi/ripgrep/issues/196):
|
||||
Support a configuration file. See
|
||||
[the new user guide](GUIDE.md#configuration-file)
|
||||
for details.
|
||||
* [FEATURE #261](https://github.com/BurntSushi/ripgrep/issues/261):
|
||||
Add extended or "true" color support. Works in Windows 10!
|
||||
[See the FAQ for details.](FAQ.md#colors)
|
||||
* [FEATURE #539](https://github.com/BurntSushi/ripgrep/issues/539):
|
||||
Search gzip, bzip2, lzma or xz files when given `-z/--search-zip` flag.
|
||||
* [FEATURE #544](https://github.com/BurntSushi/ripgrep/issues/544):
|
||||
Add support for line number alignment via a new `--line-number-width` flag.
|
||||
* [FEATURE #654](https://github.com/BurntSushi/ripgrep/pull/654):
|
||||
Support linuxbrew in ripgrep's Brew tap.
|
||||
* [FEATURE #673](https://github.com/BurntSushi/ripgrep/issues/673):
|
||||
Bring back `.rgignore` files. (A higher precedent, application specific
|
||||
version of `.ignore`.)
|
||||
* [FEATURE #676](https://github.com/BurntSushi/ripgrep/issues/676):
|
||||
Provide ARM binaries. **WARNING:** This will be provided on a best effort
|
||||
basis.
|
||||
* [FEATURE #709](https://github.com/BurntSushi/ripgrep/issues/709):
|
||||
Suggest `-F/--fixed-strings` flag on a regex syntax error.
|
||||
* [FEATURE #740](https://github.com/BurntSushi/ripgrep/issues/740):
|
||||
Add a `--passthru` flag that causes ripgrep to print every line it reads.
|
||||
* [FEATURE #785](https://github.com/BurntSushi/ripgrep/pull/785):
|
||||
Overhaul documentation. Cleaned up README, added user guide and FAQ.
|
||||
* [FEATURE 7f5c07](https://github.com/BurntSushi/ripgrep/commit/7f5c07434be92103b5bf7e216b9c7494aed2d8cb):
|
||||
Add hidden flags for convenient overrides (e.g., `--no-text`).
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* [BUG #553](https://github.com/BurntSushi/ripgrep/issues/553):
|
||||
Permit flags to be repeated.
|
||||
* [BUG #633](https://github.com/BurntSushi/ripgrep/issues/633):
|
||||
Fix a bug where ripgrep would panic on Windows while following symlinks.
|
||||
* [BUG #649](https://github.com/BurntSushi/ripgrep/issues/649):
|
||||
Fix handling of `!**/` in `.gitignore`.
|
||||
* [BUG #663](https://github.com/BurntSushi/ripgrep/issues/663):
|
||||
**BREAKING CHANGE:** Support `[^...]` glob syntax (as identical to `[!...]`).
|
||||
* [BUG #693](https://github.com/BurntSushi/ripgrep/issues/693):
|
||||
Don't display context separators when not printing matches.
|
||||
* [BUG #705](https://github.com/BurntSushi/ripgrep/issues/705):
|
||||
Fix a bug that prevented ripgrep from searching OneDrive directories.
|
||||
* [BUG #717](https://github.com/BurntSushi/ripgrep/issues/717):
|
||||
Improve `--smart-case` uppercase character detection.
|
||||
* [BUG #725](https://github.com/BurntSushi/ripgrep/issues/725):
|
||||
Clarify that globs do not override explicitly given paths to search.
|
||||
* [BUG #742](https://github.com/BurntSushi/ripgrep/pull/742):
|
||||
Write ANSI reset code as `\x1B[0m` instead of `\x1B[m`.
|
||||
* [BUG #747](https://github.com/BurntSushi/ripgrep/issues/747):
|
||||
Remove `yarn.lock` from YAML file type.
|
||||
* [BUG #760](https://github.com/BurntSushi/ripgrep/issues/760):
|
||||
ripgrep can now search `/sys/devices/system/cpu/vulnerabilities/*` files.
|
||||
* [BUG #761](https://github.com/BurntSushi/ripgrep/issues/761):
|
||||
Fix handling of gitignore patterns that contain a `/`.
|
||||
* [BUG #776](https://github.com/BurntSushi/ripgrep/pull/776):
|
||||
**BREAKING CHANGE:** `--max-columns=0` now disables the limit.
|
||||
* [BUG #779](https://github.com/BurntSushi/ripgrep/issues/779):
|
||||
Clarify documentation for `--files-without-match`.
|
||||
* [BUG #780](https://github.com/BurntSushi/ripgrep/issues/780),
|
||||
[BUG #781](https://github.com/BurntSushi/ripgrep/issues/781):
|
||||
Fix bug where ripgrep missed some matching lines.
|
||||
|
||||
Maintenance fixes:
|
||||
|
||||
* [MAINT #772](https://github.com/BurntSushi/ripgrep/pull/772):
|
||||
Drop `env_logger` in favor of simpler logger to avoid many new dependencies.
|
||||
* [MAINT #772](https://github.com/BurntSushi/ripgrep/pull/772):
|
||||
Add git revision hash to ripgrep's version string.
|
||||
* [MAINT #772](https://github.com/BurntSushi/ripgrep/pull/772):
|
||||
(Seemingly) improve compile times.
|
||||
* [MAINT #776](https://github.com/BurntSushi/ripgrep/pull/776):
|
||||
Automatically generate man page during build.
|
||||
* [MAINT #786](https://github.com/BurntSushi/ripgrep/pull/786):
|
||||
Remove use of `unsafe` in `globset`. :tada:
|
||||
* [MAINT e9d448](https://github.com/BurntSushi/ripgrep/commit/e9d448e93bb4e1fb3b0c1afc29adb5af6ed5283d):
|
||||
Add an issue template (has already drastically improved bug reports).
|
||||
* [MAINT ae2d03](https://github.com/BurntSushi/ripgrep/commit/ae2d036dd4ba2a46acac9c2d77c32e7c667eb850):
|
||||
Remove the `compile` script.
|
||||
|
||||
Friends of ripgrep:
|
||||
|
||||
I'd like to extend my gratitude to
|
||||
[@balajisivaraman](https://github.com/balajisivaraman)
|
||||
for their recent hard work in a number of areas, and in particular, for
|
||||
implementing the "search compressed files" feature. Their work in sketching out
|
||||
a specification for that and other work has been exemplary.
|
||||
|
||||
Thanks
|
||||
[@balajisivaraman](https://github.com/balajisivaraman)!
|
||||
|
||||
|
||||
0.7.1 (2017-10-22)
|
||||
==================
|
||||
This is a patch release of ripgrep that includes a fix to very bad regression
|
||||
@@ -566,7 +1414,7 @@ Bug fixes:
|
||||
=====
|
||||
Feature enhancements:
|
||||
|
||||
* Added or improved file type filtering for VB, R, F#, Swift, Nim, Javascript,
|
||||
* Added or improved file type filtering for VB, R, F#, Swift, Nim, JavaScript,
|
||||
TypeScript
|
||||
* [FEATURE #20](https://github.com/BurntSushi/ripgrep/issues/20):
|
||||
Adds a --no-filename flag.
|
||||
|
||||
627
Cargo.lock
generated
627
Cargo.lock
generated
@@ -1,407 +1,590 @@
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.10.2"
|
||||
name = "aho-corasick"
|
||||
version = "0.7.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.6"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
dependencies = [
|
||||
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.0.1"
|
||||
name = "base64"
|
||||
version = "0.20.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "bstr"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b45ea9b00a7b3f2988e9a65ad3917e62123c38dba709b666506207be96d1790b"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"once_cell",
|
||||
"regex-automata",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bytecount"
|
||||
version = "0.3.1"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.78"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d"
|
||||
dependencies = [
|
||||
"simd 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jobserver",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "0.1.2"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.29.4"
|
||||
version = "2.34.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
|
||||
dependencies = [
|
||||
"ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"atty 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bitflags",
|
||||
"strsim",
|
||||
"textwrap",
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam"
|
||||
version = "0.3.2"
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.8.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs"
|
||||
version = "0.7.2"
|
||||
version = "0.8.31"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"simd 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if",
|
||||
"packed_simd_2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "encoding_rs_io"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1cc3c5651fb62ab8aa3103998dade57efdd028544bd300516baa31840c252a83"
|
||||
dependencies = [
|
||||
"encoding_rs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.6"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
|
||||
|
||||
[[package]]
|
||||
name = "fuchsia-zircon"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fuchsia-zircon-sys"
|
||||
version = "0.3.3"
|
||||
name = "fs_extra"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394"
|
||||
|
||||
[[package]]
|
||||
name = "glob"
|
||||
version = "0.2.11"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
|
||||
|
||||
[[package]]
|
||||
name = "globset"
|
||||
version = "0.3.0"
|
||||
version = "0.4.10"
|
||||
dependencies = [
|
||||
"aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"aho-corasick",
|
||||
"bstr",
|
||||
"fnv",
|
||||
"glob",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep"
|
||||
version = "0.1.8"
|
||||
version = "0.2.10"
|
||||
dependencies = [
|
||||
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"grep-cli",
|
||||
"grep-matcher",
|
||||
"grep-pcre2",
|
||||
"grep-printer",
|
||||
"grep-regex",
|
||||
"grep-searcher",
|
||||
"termcolor",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-cli"
|
||||
version = "0.1.7"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"bstr",
|
||||
"globset",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"regex",
|
||||
"same-file",
|
||||
"termcolor",
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-matcher"
|
||||
version = "0.1.6"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-pcre2"
|
||||
version = "0.1.6"
|
||||
dependencies = [
|
||||
"grep-matcher",
|
||||
"pcre2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-printer"
|
||||
version = "0.1.7"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"bstr",
|
||||
"grep-matcher",
|
||||
"grep-regex",
|
||||
"grep-searcher",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-regex"
|
||||
version = "0.1.11"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"bstr",
|
||||
"grep-matcher",
|
||||
"log",
|
||||
"regex",
|
||||
"regex-syntax",
|
||||
"thread_local",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "grep-searcher"
|
||||
version = "0.1.11"
|
||||
dependencies = [
|
||||
"bstr",
|
||||
"bytecount",
|
||||
"encoding_rs",
|
||||
"encoding_rs_io",
|
||||
"grep-matcher",
|
||||
"grep-regex",
|
||||
"log",
|
||||
"memmap2",
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ignore"
|
||||
version = "0.3.1"
|
||||
version = "0.4.19"
|
||||
dependencies = [
|
||||
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.3.0",
|
||||
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam-channel",
|
||||
"globset",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"memchr",
|
||||
"regex",
|
||||
"same-file",
|
||||
"thread_local",
|
||||
"walkdir",
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440"
|
||||
|
||||
[[package]]
|
||||
name = "jemalloc-sys"
|
||||
version = "0.5.2+5.3.0-patched"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "134163979b6eed9564c98637b710b40979939ba351f59952708234ea11b5f3f8"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"fs_extra",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jemallocator"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "16c2514137880c52b0b4822b563fadd38257c1f380858addb74a400889696ea6"
|
||||
dependencies = [
|
||||
"jemalloc-sys",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jobserver"
|
||||
version = "0.1.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.0.0"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.36"
|
||||
version = "0.2.139"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79"
|
||||
|
||||
[[package]]
|
||||
name = "libm"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.1"
|
||||
version = "0.4.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.0.1"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
|
||||
|
||||
[[package]]
|
||||
name = "memmap2"
|
||||
version = "0.5.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4b182332558b18d807c4ce1ca8ca983b34c3ee32765e47b3f0f69b90355cc1dc"
|
||||
dependencies = [
|
||||
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memmap"
|
||||
version = "0.6.2"
|
||||
name = "once_cell"
|
||||
version = "1.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66"
|
||||
|
||||
[[package]]
|
||||
name = "packed_simd_2"
|
||||
version = "0.3.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282"
|
||||
dependencies = [
|
||||
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if",
|
||||
"libm",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.8.0"
|
||||
name = "pcre2"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85b30f2f69903b439dd9dc9e824119b82a55bf113b29af8d70948a03c1b11ab1"
|
||||
dependencies = [
|
||||
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc",
|
||||
"log",
|
||||
"pcre2-sys",
|
||||
"thread_local",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.3.22"
|
||||
name = "pcre2-sys"
|
||||
version = "0.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dec30e5e9ec37eb8fbf1dea5989bc957fd3df56fbee5061aa7b7a99dbb37b722"
|
||||
dependencies = [
|
||||
"fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.4.2"
|
||||
name = "pkg-config"
|
||||
version = "0.3.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.49"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5"
|
||||
dependencies = [
|
||||
"fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.1.37"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "redox_termios"
|
||||
version = "0.1.1"
|
||||
name = "quote"
|
||||
version = "1.0.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"
|
||||
dependencies = [
|
||||
"redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "0.2.6"
|
||||
version = "1.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
|
||||
dependencies = [
|
||||
"aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"simd 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.4.2"
|
||||
version = "0.6.28"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
|
||||
|
||||
[[package]]
|
||||
name = "ripgrep"
|
||||
version = "0.7.1"
|
||||
version = "13.0.0"
|
||||
dependencies = [
|
||||
"atty 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bytecount 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.29.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"encoding_rs 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"globset 0.3.0",
|
||||
"grep 0.1.8",
|
||||
"ignore 0.3.1",
|
||||
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 0.3.4",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bstr",
|
||||
"clap",
|
||||
"grep",
|
||||
"ignore",
|
||||
"jemallocator",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"termcolor",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde"
|
||||
|
||||
[[package]]
|
||||
name = "same-file"
|
||||
version = "1.0.2"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
|
||||
dependencies = [
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simd"
|
||||
version = "0.2.1"
|
||||
name = "serde"
|
||||
version = "1.0.152"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.152"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.91"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.7.0"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
|
||||
[[package]]
|
||||
name = "tempdir"
|
||||
version = "0.3.5"
|
||||
name = "syn"
|
||||
version = "1.0.107"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5"
|
||||
dependencies = [
|
||||
"rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "0.3.4"
|
||||
dependencies = [
|
||||
"wincolor 0.1.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termion"
|
||||
version = "1.5.1"
|
||||
version = "1.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
|
||||
dependencies = [
|
||||
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.9.0"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
dependencies = [
|
||||
"unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thread_local"
|
||||
version = "0.3.5"
|
||||
version = "1.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180"
|
||||
dependencies = [
|
||||
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "unreachable"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "utf8-ranges"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "void"
|
||||
version = "1.0.2"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
|
||||
|
||||
[[package]]
|
||||
name = "walkdir"
|
||||
version = "2.1.3"
|
||||
version = "2.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56"
|
||||
dependencies = [
|
||||
"same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"same-file",
|
||||
"winapi",
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.4"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-i686-pc-windows-gnu",
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "wincolor"
|
||||
version = "0.1.6"
|
||||
dependencies = [
|
||||
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[metadata]
|
||||
"checksum aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d6531d44de723825aa81398a6415283229725a00fa30713812ab9323faa82fc4"
|
||||
"checksum ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6b3568b48b7cefa6b8ce125f9bb4989e52fbcc29ebea88df04cc7c5f12f70455"
|
||||
"checksum atty 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "8352656fd42c30a0c3c89d26dea01e3b77c0ab2af18230835c15e2e13cd51859"
|
||||
"checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf"
|
||||
"checksum bytecount 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "882585cd7ec84e902472df34a5e01891202db3bf62614e1f0afe459c1afcf744"
|
||||
"checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de"
|
||||
"checksum clap 2.29.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7b8f59bcebcfe4269b09f71dab0da15b355c75916a8f975d3876ce81561893ee"
|
||||
"checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19"
|
||||
"checksum encoding_rs 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "98fd0f24d1fb71a4a6b9330c8ca04cbd4e7cc5d846b54ca74ff376bc7c9f798d"
|
||||
"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
|
||||
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
|
||||
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
|
||||
"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
|
||||
"checksum lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c8f31047daa365f19be14b47c29df4f7c3b581832407daabe6ae77397619237d"
|
||||
"checksum libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "1e5d97d6708edaa407429faa671b942dc0f2727222fb6b6539bf1db936e4b121"
|
||||
"checksum log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "89f010e843f2b1a31dbd316b3b8d443758bc634bed37aabade59c686d644e0a2"
|
||||
"checksum memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "796fba70e76612589ed2ce7f45282f5af869e0fdd7cc6199fa1aa1f1d591ba9d"
|
||||
"checksum memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e2ffa2c986de11a9df78620c01eeaaf27d94d3ff02bf81bfcca953102dd0c6ff"
|
||||
"checksum num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a3322e4bca9d212ad9a158a02abc6934d005490c054a2778df73a70aa0a30"
|
||||
"checksum rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)" = "15a732abf9d20f0ad8eeb6f909bf6868722d9a06e1e50802b6a70351f40b4eb1"
|
||||
"checksum rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "eba5f8cb59cc50ed56be8880a5c7b496bfd9bd26394e176bc67884094145c2c5"
|
||||
"checksum redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "0d92eecebad22b767915e4d529f89f28ee96dbbf5a4810d2b844373f136417fd"
|
||||
"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76"
|
||||
"checksum regex 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "5be5347bde0c48cfd8c3fdc0766cdfe9d8a755ef84d620d6794c778c91de8b2b"
|
||||
"checksum regex-syntax 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8e931c58b93d86f080c734bfd2bce7dd0079ae2331235818133c8be7f422e20e"
|
||||
"checksum same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cfb6eded0b06a0b512c8ddbcf04089138c9b4362c2f696f3c3d76039d68f3637"
|
||||
"checksum simd 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3dd0805c7363ab51a829a1511ad24b6ed0349feaa756c4bc2f977f9f496e6673"
|
||||
"checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550"
|
||||
"checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6"
|
||||
"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096"
|
||||
"checksum textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c0b59b6b4b44d867f1370ef1bd91bfb262bf07bf0ae65c202ea2fbc16153b693"
|
||||
"checksum thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "279ef31c19ededf577bfd12dfae728040a21f635b06a24cd670ff510edd38963"
|
||||
"checksum unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bf3a113775714a22dcb774d8ea3655c53a32debae63a063acc00a91cc586245f"
|
||||
"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56"
|
||||
"checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122"
|
||||
"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
|
||||
"checksum walkdir 2.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b167e9a4420d8dddb260e70c90a4a375a1e5691f21f70e715553da87b6c2503a"
|
||||
"checksum winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "04e3bd221fcbe8a271359c04f21a76db7d0c6028862d1bb5512d85e1e2eb5bb3"
|
||||
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
135
Cargo.toml
135
Cargo.toml
@@ -1,28 +1,27 @@
|
||||
[package]
|
||||
name = "ripgrep"
|
||||
version = "0.7.1" #:version
|
||||
version = "13.0.0" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Line oriented search tool using Rust's regex library. Combines the raw
|
||||
performance of grep with the usability of the silver searcher.
|
||||
ripgrep is a line-oriented search tool that recursively searches the current
|
||||
directory for a regex pattern while respecting gitignore rules. ripgrep has
|
||||
first class support on Windows, macOS and Linux.
|
||||
"""
|
||||
documentation = "https://github.com/BurntSushi/ripgrep"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep"
|
||||
repository = "https://github.com/BurntSushi/ripgrep"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "grep", "egrep", "search", "pattern"]
|
||||
categories = ["command-line-utilities", "text-processing"]
|
||||
license = "Unlicense/MIT"
|
||||
license = "Unlicense OR MIT"
|
||||
exclude = ["HomebrewFormula"]
|
||||
build = "build.rs"
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "BurntSushi/ripgrep" }
|
||||
appveyor = { repository = "BurntSushi/ripgrep" }
|
||||
autotests = false
|
||||
edition = "2018"
|
||||
rust-version = "1.65"
|
||||
|
||||
[[bin]]
|
||||
bench = false
|
||||
path = "src/main.rs"
|
||||
path = "crates/core/main.rs"
|
||||
name = "rg"
|
||||
|
||||
[[test]]
|
||||
@@ -30,49 +29,79 @@ name = "integration"
|
||||
path = "tests/tests.rs"
|
||||
|
||||
[workspace]
|
||||
members = ["grep", "globset", "ignore", "termcolor", "wincolor"]
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2.2"
|
||||
bytecount = "0.3.1"
|
||||
encoding_rs = "0.7"
|
||||
globset = { version = "0.3.0", path = "globset" }
|
||||
grep = { version = "0.1.8", path = "grep" }
|
||||
ignore = { version = "0.3.1", path = "ignore" }
|
||||
lazy_static = "1"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
memchr = "2"
|
||||
memmap = "0.6"
|
||||
num_cpus = "1"
|
||||
regex = "0.2.4"
|
||||
same-file = "1"
|
||||
termcolor = { version = "0.3.4", path = "termcolor" }
|
||||
|
||||
[dependencies.clap]
|
||||
version = "2.29.4"
|
||||
default-features = false
|
||||
features = ["suggestions", "color"]
|
||||
|
||||
[target.'cfg(windows)'.dependencies.winapi]
|
||||
version = "0.3"
|
||||
features = ["std", "winnt"]
|
||||
|
||||
[build-dependencies]
|
||||
lazy_static = "1"
|
||||
|
||||
[build-dependencies.clap]
|
||||
version = "2.29.4"
|
||||
default-features = false
|
||||
features = ["suggestions", "color"]
|
||||
|
||||
[features]
|
||||
avx-accel = ["bytecount/avx-accel"]
|
||||
simd-accel = [
|
||||
"bytecount/simd-accel",
|
||||
"regex/simd-accel",
|
||||
"encoding_rs/simd-accel",
|
||||
members = [
|
||||
"crates/globset",
|
||||
"crates/grep",
|
||||
"crates/cli",
|
||||
"crates/matcher",
|
||||
"crates/pcre2",
|
||||
"crates/printer",
|
||||
"crates/regex",
|
||||
"crates/searcher",
|
||||
"crates/ignore",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
bstr = "1.1.0"
|
||||
grep = { version = "0.2.8", path = "crates/grep" }
|
||||
ignore = { version = "0.4.19", path = "crates/ignore" }
|
||||
lazy_static = "1.1.0"
|
||||
log = "0.4.5"
|
||||
regex = "1.3.5"
|
||||
serde_json = "1.0.23"
|
||||
termcolor = "1.1.0"
|
||||
|
||||
[dependencies.clap]
|
||||
version = "2.33.0"
|
||||
default-features = false
|
||||
features = ["suggestions"]
|
||||
|
||||
[target.'cfg(all(target_env = "musl", target_pointer_width = "64"))'.dependencies.jemallocator]
|
||||
version = "0.5.0"
|
||||
|
||||
[build-dependencies]
|
||||
lazy_static = "1.1.0"
|
||||
|
||||
[build-dependencies.clap]
|
||||
version = "2.33.0"
|
||||
default-features = false
|
||||
features = ["suggestions"]
|
||||
|
||||
[dev-dependencies]
|
||||
serde = "1.0.77"
|
||||
serde_derive = "1.0.77"
|
||||
walkdir = "2"
|
||||
|
||||
[features]
|
||||
simd-accel = ["grep/simd-accel"]
|
||||
pcre2 = ["grep/pcre2"]
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
debug = 1
|
||||
|
||||
[package.metadata.deb]
|
||||
features = ["pcre2"]
|
||||
section = "utils"
|
||||
assets = [
|
||||
["target/release/rg", "usr/bin/", "755"],
|
||||
["COPYING", "usr/share/doc/ripgrep/", "644"],
|
||||
["LICENSE-MIT", "usr/share/doc/ripgrep/", "644"],
|
||||
["UNLICENSE", "usr/share/doc/ripgrep/", "644"],
|
||||
["CHANGELOG.md", "usr/share/doc/ripgrep/CHANGELOG", "644"],
|
||||
["README.md", "usr/share/doc/ripgrep/README", "644"],
|
||||
["FAQ.md", "usr/share/doc/ripgrep/FAQ", "644"],
|
||||
# The man page is automatically generated by ripgrep's build process, so
|
||||
# this file isn't actually committed. Instead, to create a dpkg, either
|
||||
# create a deployment/deb directory and copy the man page to it, or use the
|
||||
# 'ci/build-deb' script.
|
||||
["deployment/deb/rg.1", "usr/share/man/man1/rg.1", "644"],
|
||||
# Similarly for shell completions.
|
||||
["deployment/deb/rg.bash", "usr/share/bash-completion/completions/rg", "644"],
|
||||
["deployment/deb/rg.fish", "usr/share/fish/vendor_completions.d/rg.fish", "644"],
|
||||
["deployment/deb/_rg", "usr/share/zsh/vendor-completions/", "644"],
|
||||
]
|
||||
extended-description = """\
|
||||
ripgrep (rg) recursively searches your current directory for a regex pattern.
|
||||
By default, ripgrep will respect your .gitignore and automatically skip hidden
|
||||
files/directories and binary files.
|
||||
"""
|
||||
|
||||
11
Cross.toml
Normal file
11
Cross.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[target.x86_64-unknown-linux-musl]
|
||||
image = "burntsushi/cross:x86_64-unknown-linux-musl"
|
||||
|
||||
[target.i686-unknown-linux-gnu]
|
||||
image = "burntsushi/cross:i686-unknown-linux-gnu"
|
||||
|
||||
[target.mips64-unknown-linux-gnuabi64]
|
||||
image = "burntsushi/cross:mips64-unknown-linux-gnuabi64"
|
||||
|
||||
[target.arm-unknown-linux-gnueabihf]
|
||||
image = "burntsushi/cross:arm-unknown-linux-gnueabihf"
|
||||
614
FAQ.md
614
FAQ.md
@@ -5,21 +5,28 @@
|
||||
* [When is the next release?](#release)
|
||||
* [Does ripgrep have a man page?](#manpage)
|
||||
* [Does ripgrep have support for shell auto-completion?](#complete)
|
||||
* [How do I use lookaround and/or backreferences?](#fancy)
|
||||
* [How do I configure ripgrep's colors?](#colors)
|
||||
* [How do I enable true colors on Windows?](#truecolors-windows)
|
||||
* [How do I stop ripgrep from messing up colors when I kill it?](#stop-ripgrep)
|
||||
* [How can I get results in a consistent order?](#order)
|
||||
* [How do I search files that aren't UTF-8?](#encoding)
|
||||
* [How do I search compressed files?](#compressed)
|
||||
* [How do I search over multiple lines?](#multiline)
|
||||
* [How do I use lookaround and/or backreferences?](#fancy)
|
||||
* [How do I configure ripgrep's colors?](#colors)
|
||||
* [How do I enable true colors on Windows?](#truecolors-windows)
|
||||
* [How do I stop ripgrep from messing up colors when I kill it?](#stop-ripgrep)
|
||||
* [Why does using a leading `/` on Windows fail?](#because-cygwin)
|
||||
* [How do I get around the regex size limit?](#size-limit)
|
||||
* [How do I make the `-f/--file` flag faster?](#dfa-size)
|
||||
* [How do I make the output look like The Silver Searcher's output?](#silver-searcher-output)
|
||||
* [Why does ripgrep get slower when I enabled PCRE2 regexes?](#pcre2-slow)
|
||||
* [When I run `rg`, why does it execute some other command?](#rg-other-cmd)
|
||||
* [How do I create an alias for ripgrep on Windows?](#rg-alias-windows)
|
||||
* [How do I create a PowerShell profile?](#powershell-profile)
|
||||
* [How do I pipe non-ASCII content to ripgrep on Windows?](#pipe-non-ascii-windows)
|
||||
* [How can I search and replace with ripgrep?](#search-and-replace)
|
||||
* [How is ripgrep licensed?](#license)
|
||||
* [Can ripgrep replace grep?](#posix4ever)
|
||||
* [What does the "rip" in ripgrep mean?](#intentcountsforsomething)
|
||||
* [How can I donate to ripgrep or its maintainers?](#donations)
|
||||
|
||||
|
||||
<h3 name="config">
|
||||
@@ -45,18 +52,19 @@ ripgrep is a project whose contributors are volunteers. A release schedule
|
||||
adds undue stress to said volunteers. Therefore, releases are made on a best
|
||||
effort basis and no dates **will ever be given**.
|
||||
|
||||
One exception to this is high impact bugs. If a ripgrep release contains a
|
||||
significant regression, then there will generally be a strong push to get a
|
||||
patch release out with a fix.
|
||||
An exception to this _can be_ high impact bugs. If a ripgrep release contains
|
||||
a significant regression, then there will generally be a strong push to get a
|
||||
patch release out with a fix. However, no promises are made.
|
||||
|
||||
|
||||
<h3 name="manpage">
|
||||
Does ripgrep have a man page?
|
||||
</h3>
|
||||
|
||||
Yes! Whenever ripgrep is compiled on a system with `asciidoc` present, then a
|
||||
man page is generated from ripgrep's argv parser. After compiling ripgrep, you
|
||||
can find the man page like so from the root of the repository:
|
||||
Yes! Whenever ripgrep is compiled on a system with `asciidoctor` or `asciidoc`
|
||||
present, then a man page is generated from ripgrep's argv parser. After
|
||||
compiling ripgrep, you can find the man page like so from the root of the
|
||||
repository:
|
||||
|
||||
```
|
||||
$ find ./target -name rg.1 -print0 | xargs -0 ls -t | head -n1
|
||||
@@ -113,7 +121,7 @@ from run to run of ripgrep.
|
||||
The only way to make the order of results consistent is to ask ripgrep to
|
||||
sort the output. Currently, this will disable all parallelism. (On smaller
|
||||
repositories, you might not notice much of a performance difference!) You
|
||||
can achieve this with the `--sort-files` flag.
|
||||
can achieve this with the `--sort path` flag.
|
||||
|
||||
There is more discussion on this topic here:
|
||||
https://github.com/BurntSushi/ripgrep/issues/152
|
||||
@@ -131,10 +139,10 @@ How do I search compressed files?
|
||||
</h3>
|
||||
|
||||
ripgrep's `-z/--search-zip` flag will cause it to search compressed files
|
||||
automatically. Currently, this supports gzip, bzip2, lzma and xz only and
|
||||
requires the corresponding `gzip`, `bzip2` and `xz` binaries to be installed on
|
||||
your system. (That is, ripgrep does decompression by shelling out to another
|
||||
process.)
|
||||
automatically. Currently, this supports gzip, bzip2, xz, lzma, lz4, Brotli and
|
||||
Zstd. Each of these requires the corresponding `gzip`, `bzip2`, `xz`,
|
||||
`lz4`, `brotli` and `zstd` binaries to be installed on your system. (That is,
|
||||
ripgrep does decompression by shelling out to another process.)
|
||||
|
||||
ripgrep currently does not search archive formats, so `*.tar.gz` files, for
|
||||
example, are skipped.
|
||||
@@ -144,22 +152,45 @@ example, are skipped.
|
||||
How do I search over multiple lines?
|
||||
</h3>
|
||||
|
||||
This isn't currently possible. ripgrep is fundamentally a line-oriented search
|
||||
tool. With that said,
|
||||
[multiline search is a planned opt-in feature](https://github.com/BurntSushi/ripgrep/issues/176).
|
||||
The `-U/--multiline` flag enables ripgrep to report results that span over
|
||||
multiple lines.
|
||||
|
||||
|
||||
<h3 name="fancy">
|
||||
How do I use lookaround and/or backreferences?
|
||||
</h3>
|
||||
|
||||
This isn't currently possible. ripgrep uses finite automata to implement
|
||||
regular expression search, and in turn, guarantees linear time searching on all
|
||||
inputs. It is difficult to efficiently support lookaround and backreferences in
|
||||
finite automata engines, so ripgrep does not provide these features.
|
||||
ripgrep's default regex engine does not support lookaround or backreferences.
|
||||
This is primarily because the default regex engine is implemented using finite
|
||||
state machines in order to guarantee a linear worst case time complexity on all
|
||||
inputs. Backreferences are not possible to implement in this paradigm, and
|
||||
lookaround appears difficult to do efficiently.
|
||||
|
||||
If a production quality regular expression engine with these features is ever
|
||||
written in Rust, then it is possible ripgrep will provide it as an opt-in
|
||||
However, ripgrep optionally supports using PCRE2 as the regex engine instead of
|
||||
the default one based on finite state machines. You can enable PCRE2 with the
|
||||
`-P/--pcre2` flag. For example, in the root of the ripgrep repo, you can easily
|
||||
find all palindromes:
|
||||
|
||||
```
|
||||
$ rg -P '(\w{10})\1'
|
||||
tests/misc.rs
|
||||
483: cmd.arg("--max-filesize").arg("44444444444444444444");
|
||||
globset/src/glob.rs
|
||||
1206: matches!(match7, "a*a*a*a*a*a*a*a*a", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
|
||||
```
|
||||
|
||||
If your version of ripgrep doesn't support PCRE2, then you'll get an error
|
||||
message when you try to use the `-P/--pcre2` flag:
|
||||
|
||||
```
|
||||
$ rg -P '(\w{10})\1'
|
||||
PCRE2 is not available in this build of ripgrep
|
||||
```
|
||||
|
||||
Most of the releases distributed by the ripgrep project here on GitHub will
|
||||
come bundled with PCRE2 enabled. If you installed ripgrep through a different
|
||||
means (like your system's package manager), then please reach out to the
|
||||
maintainer of that package to see whether it's possible to enable the PCRE2
|
||||
feature.
|
||||
|
||||
|
||||
@@ -177,7 +208,7 @@ The `--color` flag accepts one of the following possible values: `never`,
|
||||
ripgrep to only enable colors when it is printing to a terminal. But if you
|
||||
pipe ripgrep to a file or some other process, then it will suppress colors.
|
||||
|
||||
The --colors` flag is a bit more complicated. The general format is:
|
||||
The `--colors` flag is a bit more complicated. The general format is:
|
||||
|
||||
```
|
||||
--colors '{type}:{attribute}:{value}'
|
||||
@@ -189,10 +220,10 @@ The --colors` flag is a bit more complicated. The general format is:
|
||||
* `{attribute}` should be one of `fg`, `bg` or `style`, corresponding to
|
||||
foreground color, background color, or miscellaneous styling (such as whether
|
||||
to bold the output or not).
|
||||
* `{value}` is determined by the value of `{attribute}`. If `{attribute}` is
|
||||
`style`, then `{value}` should be one of `nobold`, `bold`, `nointense` or
|
||||
`intense`. If `{attribute}` is `fg` or `bg`, then `{value}` should be a
|
||||
color.
|
||||
* `{value}` is determined by the value of `{attribute}`. If
|
||||
`{attribute}` is `style`, then `{value}` should be one of `nobold`,
|
||||
`bold`, `nointense`, `intense`, `nounderline` or `underline`. If
|
||||
`{attribute}` is `fg` or `bg`, then `{value}` should be a color.
|
||||
|
||||
A color is specified by either one of eight of English names, a single 256-bit
|
||||
number or an RGB triple (with over 16 million possible values, or "true
|
||||
@@ -285,6 +316,26 @@ available
|
||||
[here](https://github.com/BurntSushi/ripgrep/issues/281#issuecomment-269093893).
|
||||
|
||||
|
||||
<h3 name="because-cygwin">
|
||||
Why does using a leading `/` on Windows fail?
|
||||
</h3>
|
||||
|
||||
If you're using cygwin on Windows and try to search for a pattern beginning
|
||||
with a `/`, then it's possible that cygwin is mangling that pattern without
|
||||
your knowledge. For example, if you tried running `rg /foo` in a cygwin shell
|
||||
on Windows, then cygwin might mistakenly perform path translation on `/foo`,
|
||||
which would result in `rg C:/msys64/foo` being searched instead.
|
||||
|
||||
You can fix this in one of three ways:
|
||||
|
||||
1. Stop using cygwin.
|
||||
2. Escape the leading slash with an additional slash. e.g., `rg //foo`.
|
||||
3. Temporarily disable path translation by setting `MSYS_NO_PATHCONV=1`. e.g.,
|
||||
`MSYS_NO_PATHCONV=1 rg /foo`.
|
||||
|
||||
For more details, see https://github.com/BurntSushi/ripgrep/issues/1277
|
||||
|
||||
|
||||
<h3 name="size-limit">
|
||||
How do I get around the regex size limit?
|
||||
</h3>
|
||||
@@ -364,6 +415,301 @@ $ RIPGREP_CONFIG_PATH=$HOME/.config/ripgrep/rc rg foo
|
||||
```
|
||||
|
||||
|
||||
<h3 name="pcre2-slow">
|
||||
Why does ripgrep get slower when I enable PCRE2 regexes?
|
||||
</h3>
|
||||
|
||||
When you use the `--pcre2` (`-P` for short) flag, ripgrep will use the PCRE2
|
||||
regex engine instead of the default. Both regex engines are quite fast,
|
||||
but PCRE2 provides a number of additional features such as look-around and
|
||||
backreferences that many enjoy using. This is largely because PCRE2 uses
|
||||
a backtracking implementation where as the default regex engine uses a finite
|
||||
automaton based implementation. The former provides the ability to add lots of
|
||||
bells and whistles over the latter, but the latter executes with worst case
|
||||
linear time complexity.
|
||||
|
||||
With that out of the way, if you've used `-P` with ripgrep, you may have
|
||||
noticed that it can be slower. The reasons for why this is are quite complex,
|
||||
and they are complex because the optimizations that ripgrep uses to implement
|
||||
fast search are complex.
|
||||
|
||||
The task ripgrep has before it is somewhat simple; all it needs to do is search
|
||||
a file for occurrences of some pattern and then print the lines containing
|
||||
those occurrences. The problem lies in what is considered a valid match and how
|
||||
exactly we read the bytes from a file.
|
||||
|
||||
In terms of what is considered a valid match, remember that ripgrep will only
|
||||
report matches spanning a single line by default. The problem here is that
|
||||
some patterns can match across multiple lines, and ripgrep needs to prevent
|
||||
that from happening. For example, `foo\sbar` will match `foo\nbar`. The most
|
||||
obvious way to achieve this is to read the data from a file, and then apply
|
||||
the pattern search to that data for each line. The problem with this approach
|
||||
is that it can be quite slow; it would be much faster to let the pattern
|
||||
search across as much data as possible. It's faster because it gets rid of the
|
||||
overhead of finding the boundaries of every line, and also because it gets rid
|
||||
of the overhead of starting and stopping the pattern search for every single
|
||||
line. (This is operating under the general assumption that matching lines are
|
||||
much rarer than non-matching lines.)
|
||||
|
||||
It turns out that we can use the faster approach by applying a very simple
|
||||
restriction to the pattern: *statically prevent* the pattern from matching
|
||||
through a `\n` character. Namely, when given a pattern like `foo\sbar`,
|
||||
ripgrep will remove `\n` from the `\s` character class automatically. In some
|
||||
cases, a simple removal is not so easy. For example, ripgrep will return an
|
||||
error when your pattern includes a `\n` literal:
|
||||
|
||||
```
|
||||
$ rg '\n'
|
||||
the literal '"\n"' is not allowed in a regex
|
||||
```
|
||||
|
||||
So what does this have to do with PCRE2? Well, ripgrep's default regex engine
|
||||
exposes APIs for doing syntactic analysis on the pattern in a way that makes
|
||||
it quite easy to strip `\n` from the pattern (or otherwise detect it and report
|
||||
an error if stripping isn't possible). PCRE2 seemingly does not provide a
|
||||
similar API, so ripgrep does not do any stripping when PCRE2 is enabled. This
|
||||
forces ripgrep to use the "slow" search strategy of searching each line
|
||||
individually.
|
||||
|
||||
OK, so if enabling PCRE2 slows down the default method of searching because it
|
||||
forces matches to be limited to a single line, then why is PCRE2 also sometimes
|
||||
slower when performing multiline searches? Well, that's because there are
|
||||
*multiple* reasons why using PCRE2 in ripgrep can be slower than the default
|
||||
regex engine. This time, blame PCRE2's Unicode support, which ripgrep enables
|
||||
by default. In particular, PCRE2 cannot simultaneously enable Unicode support
|
||||
and search arbitrary data. That is, when PCRE2's Unicode support is enabled,
|
||||
the data **must** be valid UTF-8 (to do otherwise is to invoke undefined
|
||||
behavior). This is in contrast to ripgrep's default regex engine, which can
|
||||
enable Unicode support and still search arbitrary data. ripgrep's default
|
||||
regex engine simply won't match invalid UTF-8 for a pattern that can otherwise
|
||||
only match valid UTF-8. Why doesn't PCRE2 do the same? This author isn't
|
||||
familiar with its internals, so we can't comment on it here.
|
||||
|
||||
The bottom line here is that we can't enable PCRE2's Unicode support without
|
||||
simultaneously incurring a performance penalty for ensuring that we are
|
||||
searching valid UTF-8. In particular, ripgrep will transcode the contents
|
||||
of each file to UTF-8 while replacing invalid UTF-8 data with the Unicode
|
||||
replacement codepoint. ripgrep then disables PCRE2's own internal UTF-8
|
||||
checking, since we've guaranteed the data we hand it will be valid UTF-8. The
|
||||
reason why ripgrep takes this approach is because if we do hand PCRE2 invalid
|
||||
UTF-8, then it will report a match error if it comes across an invalid UTF-8
|
||||
sequence. This is not good news for ripgrep, since it will stop it from
|
||||
searching the rest of the file, and will also print potentially undesirable
|
||||
error messages to users.
|
||||
|
||||
All right, the above is a lot of information to swallow if you aren't already
|
||||
familiar with ripgrep internals. Let's make this concrete with some examples.
|
||||
First, let's get some data big enough to magnify the performance differences:
|
||||
|
||||
```
|
||||
$ curl -O 'https://burntsushi.net/stuff/subtitles2016-sample.gz'
|
||||
$ gzip -d subtitles2016-sample
|
||||
$ md5sum subtitles2016-sample
|
||||
e3cb796a20bbc602fbfd6bb43bda45f5 subtitles2016-sample
|
||||
```
|
||||
|
||||
To search this data, we will use the pattern `^\w{42}$`, which contains exactly
|
||||
one hit in the file and has no literals. Having no literals is important,
|
||||
because it ensures that the regex engine won't use literal optimizations to
|
||||
speed up the search. In other words, it lets us reason coherently about the
|
||||
actual task that the regex engine is performing.
|
||||
|
||||
Let's now walk through a few examples in light of the information above. First,
|
||||
let's consider the default search using ripgrep's default regex engine and
|
||||
then the same search with PCRE2:
|
||||
|
||||
```
|
||||
$ time rg '^\w{42}$' subtitles2016-sample
|
||||
21225780:EverymajordevelopmentinthehistoryofAmerica
|
||||
|
||||
real 0m1.783s
|
||||
user 0m1.731s
|
||||
sys 0m0.051s
|
||||
|
||||
$ time rg -P '^\w{42}$' subtitles2016-sample
|
||||
21225780:EverymajordevelopmentinthehistoryofAmerica
|
||||
|
||||
real 0m2.458s
|
||||
user 0m2.419s
|
||||
sys 0m0.038s
|
||||
```
|
||||
|
||||
In this particular example, both pattern searches are using a Unicode aware
|
||||
`\w` character class and both are counting lines in order to report line
|
||||
numbers. The key difference here is that the first search will not search
|
||||
line by line, but the second one will. We can observe which strategy ripgrep
|
||||
uses by passing the `--trace` flag:
|
||||
|
||||
```
|
||||
$ rg '^\w{42}$' subtitles2016-sample --trace
|
||||
[... snip ...]
|
||||
TRACE|grep_searcher::searcher|grep-searcher/src/searcher/mod.rs:622: Some("subtitles2016-sample"): searching via memory map
|
||||
TRACE|grep_searcher::searcher|grep-searcher/src/searcher/mod.rs:712: slice reader: searching via slice-by-line strategy
|
||||
TRACE|grep_searcher::searcher::core|grep-searcher/src/searcher/core.rs:61: searcher core: will use fast line searcher
|
||||
[... snip ...]
|
||||
|
||||
$ rg -P '^\w{42}$' subtitles2016-sample --trace
|
||||
[... snip ...]
|
||||
TRACE|grep_searcher::searcher|grep-searcher/src/searcher/mod.rs:622: Some("subtitles2016-sample"): searching via memory map
|
||||
TRACE|grep_searcher::searcher|grep-searcher/src/searcher/mod.rs:705: slice reader: needs transcoding, using generic reader
|
||||
TRACE|grep_searcher::searcher|grep-searcher/src/searcher/mod.rs:685: generic reader: searching via roll buffer strategy
|
||||
TRACE|grep_searcher::searcher::core|grep-searcher/src/searcher/core.rs:63: searcher core: will use slow line searcher
|
||||
[... snip ...]
|
||||
```
|
||||
|
||||
The first says it is using the "fast line searcher" where as the latter says
|
||||
it is using the "slow line searcher." The latter also shows that we are
|
||||
decoding the contents of the file, which also impacts performance.
|
||||
|
||||
Interestingly, in this case, the pattern does not match a `\n` and the file
|
||||
we're searching is valid UTF-8, so neither the slow line-by-line search
|
||||
strategy nor the decoding are necessary. We could fix the former issue with
|
||||
better PCRE2 introspection APIs. We can actually fix the latter issue with
|
||||
ripgrep's `--no-encoding` flag, which prevents the automatic UTF-8 decoding,
|
||||
but will enable PCRE2's own UTF-8 validity checking. Unfortunately, it's slower
|
||||
in my build of ripgrep:
|
||||
|
||||
```
|
||||
$ time rg -P '^\w{42}$' subtitles2016-sample --no-encoding
|
||||
21225780:EverymajordevelopmentinthehistoryofAmerica
|
||||
|
||||
real 0m3.074s
|
||||
user 0m3.021s
|
||||
sys 0m0.051s
|
||||
```
|
||||
|
||||
(Tip: use the `--trace` flag to verify that no decoding in ripgrep is
|
||||
happening.)
|
||||
|
||||
A possible reason why PCRE2's UTF-8 checking is slower is because it might
|
||||
not be better than the highly optimized UTF-8 checking routines found in the
|
||||
[`encoding_rs`](https://github.com/hsivonen/encoding_rs) library, which is what
|
||||
ripgrep uses for UTF-8 decoding. Moreover, my build of ripgrep enables
|
||||
`encoding_rs`'s SIMD optimizations, which may be in play here.
|
||||
|
||||
Also, note that using the `--no-encoding` flag can cause PCRE2 to report
|
||||
invalid UTF-8 errors, which causes ripgrep to stop searching the file:
|
||||
|
||||
```
|
||||
$ cat invalid-utf8
|
||||
foobar
|
||||
|
||||
$ xxd invalid-utf8
|
||||
00000000: 666f 6fff 6261 720a foo.bar.
|
||||
|
||||
$ rg foo invalid-utf8
|
||||
1:foobar
|
||||
|
||||
$ rg -P foo invalid-utf8
|
||||
1:foo<6F>bar
|
||||
|
||||
$ rg -P foo invalid-utf8 --no-encoding
|
||||
invalid-utf8: PCRE2: error matching: UTF-8 error: illegal byte (0xfe or 0xff)
|
||||
```
|
||||
|
||||
All right, so at this point, you might think that we could remove the penalty
|
||||
for line-by-line searching by enabling multiline search. After all, our
|
||||
particular pattern can't match across multiple lines anyway, so we'll still get
|
||||
the results we want. Let's try it:
|
||||
|
||||
```
|
||||
$ time rg -U '^\w{42}$' subtitles2016-sample
|
||||
21225780:EverymajordevelopmentinthehistoryofAmerica
|
||||
|
||||
real 0m1.803s
|
||||
user 0m1.748s
|
||||
sys 0m0.054s
|
||||
|
||||
$ time rg -P -U '^\w{42}$' subtitles2016-sample
|
||||
21225780:EverymajordevelopmentinthehistoryofAmerica
|
||||
|
||||
real 0m2.962s
|
||||
user 0m2.246s
|
||||
sys 0m0.713s
|
||||
```
|
||||
|
||||
Search times remain the same with the default regex engine, but the PCRE2
|
||||
search gets _slower_. What happened? The secrets can be revealed with the
|
||||
`--trace` flag once again. In the former case, ripgrep actually detects that
|
||||
the pattern can't match across multiple lines, and so will fall back to the
|
||||
"fast line search" strategy as with our search without `-U`.
|
||||
|
||||
However, for PCRE2, things are much worse. Namely, since Unicode mode is still
|
||||
enabled, ripgrep is still going to decode UTF-8 to ensure that it hands only
|
||||
valid UTF-8 to PCRE2. Unfortunately, one key downside of multiline search is
|
||||
that ripgrep cannot do it incrementally. Since matches can be arbitrarily long,
|
||||
ripgrep actually needs the entire file in memory at once. Normally, we can use
|
||||
a memory map for this, but because we need to UTF-8 decode the file before
|
||||
searching it, ripgrep winds up reading the entire contents of the file on to
|
||||
the heap before executing a search. Owch.
|
||||
|
||||
OK, so Unicode is killing us here. The file we're searching is _mostly_ ASCII,
|
||||
so maybe we're OK with missing some data. (Try `rg '[\w--\p{ascii}]'` to see
|
||||
non-ASCII word characters that an ASCII-only `\w` character class would miss.)
|
||||
We can disable Unicode in both searches, but this is done differently depending
|
||||
on the regex engine we use:
|
||||
|
||||
```
|
||||
$ time rg '(?-u)^\w{42}$' subtitles2016-sample
|
||||
21225780:EverymajordevelopmentinthehistoryofAmerica
|
||||
|
||||
real 0m1.714s
|
||||
user 0m1.669s
|
||||
sys 0m0.044s
|
||||
|
||||
$ time rg -P '^\w{42}$' subtitles2016-sample --no-pcre2-unicode
|
||||
21225780:EverymajordevelopmentinthehistoryofAmerica
|
||||
|
||||
real 0m1.997s
|
||||
user 0m1.958s
|
||||
sys 0m0.037s
|
||||
```
|
||||
|
||||
For the most part, ripgrep's default regex engine performs about the same.
|
||||
PCRE2 does improve a little bit, and is now almost as fast as the default
|
||||
regex engine. If you look at the output of `--trace`, you'll see that ripgrep
|
||||
will no longer perform UTF-8 decoding, but it does still use the slow
|
||||
line-by-line searcher.
|
||||
|
||||
At this point, we can combine all of our insights above: let's try to get off
|
||||
of the slow line-by-line searcher by enabling multiline mode, and let's stop
|
||||
UTF-8 decoding by disabling Unicode support:
|
||||
|
||||
```
|
||||
$ time rg -U '(?-u)^\w{42}$' subtitles2016-sample
|
||||
21225780:EverymajordevelopmentinthehistoryofAmerica
|
||||
|
||||
real 0m1.714s
|
||||
user 0m1.655s
|
||||
sys 0m0.058s
|
||||
|
||||
$ time rg -P -U '^\w{42}$' subtitles2016-sample --no-pcre2-unicode
|
||||
21225780:EverymajordevelopmentinthehistoryofAmerica
|
||||
|
||||
real 0m1.121s
|
||||
user 0m1.071s
|
||||
sys 0m0.048s
|
||||
```
|
||||
|
||||
Ah, there's PCRE2's JIT shining! ripgrep's default regex engine once again
|
||||
remains about the same, but PCRE2 no longer needs to search line-by-line and it
|
||||
no longer needs to do any kind of UTF-8 checks. This allows the file to get
|
||||
memory mapped and passed right through PCRE2's JIT at impressive speeds. (As
|
||||
a brief and interesting historical note, the configuration of "memory map +
|
||||
multiline + no-Unicode" is exactly the configuration used by The Silver
|
||||
Searcher. This analysis perhaps sheds some reasoning as to why that
|
||||
configuration is useful!)
|
||||
|
||||
In summary, if you want PCRE2 to go as fast as possible and you don't care
|
||||
about Unicode and you don't care about matches possibly spanning across
|
||||
multiple lines, then enable multiline mode with `-U` and disable PCRE2's
|
||||
Unicode support with the `--no-pcre2-unicode` flag.
|
||||
|
||||
Caveat emptor: This author is not a PCRE2 expert, so there may be APIs that can
|
||||
improve performance that the author missed. Similarly, there may be alternative
|
||||
designs for a searching tool that are more amenable to how PCRE2 works.
|
||||
|
||||
|
||||
<h3 name="rg-other-cmd">
|
||||
When I run <code>rg</code>, why does it execute some other command?
|
||||
</h3>
|
||||
@@ -466,3 +812,213 @@ that the console will use for printing to UTF-8 with
|
||||
`[System.Console]::OutputEncoding = [System.Text.Encoding]::UTF8`. This
|
||||
will also reset when PowerShell is restarted, so you can add that line
|
||||
to your profile as well if you want to make the setting permanent.
|
||||
|
||||
<h3 name="search-and-replace">
|
||||
How can I search and replace with ripgrep?
|
||||
</h3>
|
||||
|
||||
Using ripgrep alone, you can't. ripgrep is a search tool that will never
|
||||
touch your files. However, the output of ripgrep can be piped to other tools
|
||||
that do modify files on disk. See
|
||||
[this issue](https://github.com/BurntSushi/ripgrep/issues/74) for more
|
||||
information.
|
||||
|
||||
sed is one such tool that can modify files on disk. sed can take a filename
|
||||
and a substitution command to search and replace in the specified file.
|
||||
Files containing matching patterns can be provided to sed using
|
||||
|
||||
```
|
||||
rg foo --files-with-matches
|
||||
```
|
||||
|
||||
The output of this command is a list of filenames that contain a match for
|
||||
the `foo` pattern.
|
||||
|
||||
This list can be piped into `xargs`, which will split the filenames from
|
||||
standard input into arguments for the command following xargs. You can use this
|
||||
combination to pipe a list of filenames into sed for replacement. For example:
|
||||
|
||||
```
|
||||
rg foo --files-with-matches | xargs sed -i 's/foo/bar/g'
|
||||
```
|
||||
|
||||
will replace all instances of 'foo' with 'bar' in the files in which
|
||||
ripgrep finds the foo pattern. The `-i` flag to sed indicates that you are
|
||||
editing files in place, and `s/foo/bar/g` says that you are performing a
|
||||
**s**ubstitution of the pattern `foo` for `bar`, and that you are doing this
|
||||
substitution **g**lobally (all occurrences of the pattern in each file).
|
||||
|
||||
Note: the above command assumes that you are using GNU sed. If you are using
|
||||
BSD sed (the default on macOS and FreeBSD) then you must modify the above
|
||||
command to be the following:
|
||||
|
||||
```
|
||||
rg foo --files-with-matches | xargs sed -i '' 's/foo/bar/g'
|
||||
```
|
||||
|
||||
The `-i` flag in BSD sed requires a file extension to be given to make backups
|
||||
for all modified files. Specifying the empty string prevents file backups from
|
||||
being made.
|
||||
|
||||
Finally, if any of your file paths contain whitespace in them, then you might
|
||||
need to delimit your file paths with a NUL terminator. This requires telling
|
||||
ripgrep to output NUL bytes between each path, and telling xargs to read paths
|
||||
delimited by NUL bytes:
|
||||
|
||||
```
|
||||
rg foo --files-with-matches -0 | xargs -0 sed -i 's/foo/bar/g'
|
||||
```
|
||||
|
||||
To learn more about sed, see the sed manual
|
||||
[here](https://www.gnu.org/software/sed/manual/sed.html).
|
||||
|
||||
Additionally, Facebook has a tool called
|
||||
[fastmod](https://github.com/facebookincubator/fastmod)
|
||||
that uses some of the same libraries as ripgrep and might provide a more
|
||||
ergonomic search-and-replace experience.
|
||||
|
||||
|
||||
<h3 name="license">
|
||||
How is ripgrep licensed?
|
||||
</h3>
|
||||
|
||||
ripgrep is dual licensed under the
|
||||
[Unlicense](https://unlicense.org/)
|
||||
and MIT licenses. Specifically, you may use ripgrep under the terms of either
|
||||
license.
|
||||
|
||||
The reason why ripgrep is dual licensed this way is two-fold:
|
||||
|
||||
1. I, as ripgrep's author, would like to participate in a small bit of
|
||||
ideological activism by promoting the Unlicense's goal: to disclaim
|
||||
copyright monopoly interest.
|
||||
2. I, as ripgrep's author, would like as many people to use ripgrep as
|
||||
possible. Since the Unlicense is not a proven or well known license, ripgrep
|
||||
is also offered under the MIT license, which is ubiquitous and accepted by
|
||||
almost everyone.
|
||||
|
||||
More specifically, ripgrep and all its dependencies are compatible with this
|
||||
licensing choice. In particular, ripgrep's dependencies (direct and transitive)
|
||||
will always be limited to permissive licenses. That is, ripgrep will never
|
||||
depend on code that is not permissively licensed. This means rejecting any
|
||||
dependency that uses a copyleft license such as the GPL, LGPL, MPL or any of
|
||||
the Creative Commons ShareAlike licenses. Whether the license is "weak"
|
||||
copyleft or not does not matter; ripgrep will **not** depend on it.
|
||||
|
||||
|
||||
<h3 name="posix4ever">
|
||||
Can ripgrep replace grep?
|
||||
</h3>
|
||||
|
||||
Yes and no.
|
||||
|
||||
If, upon hearing that "ripgrep can replace grep," you *actually* hear, "ripgrep
|
||||
can be used in every instance grep can be used, in exactly the same way, for
|
||||
the same use cases, with exactly the same bug-for-bug behavior," then no,
|
||||
ripgrep trivially *cannot* replace grep. Moreover, ripgrep will *never* replace
|
||||
grep.
|
||||
|
||||
If, upon hearing that "ripgrep can replace grep," you *actually* hear, "ripgrep
|
||||
can replace grep in some cases and not in other use cases," then yes, that is
|
||||
indeed true!
|
||||
|
||||
Let's go over some of those use cases in favor of ripgrep. Some of these may
|
||||
not apply to you. That's OK. There may be other use cases not listed here that
|
||||
do apply to you. That's OK too.
|
||||
|
||||
(For all claims related to performance in the following words, see my
|
||||
[blog post](https://blog.burntsushi.net/ripgrep/)
|
||||
introducing ripgrep.)
|
||||
|
||||
* Are you frequently searching a repository of code? If so, ripgrep might be a
|
||||
good choice since there's likely a good chunk of your repository that you
|
||||
don't want to search. grep, can, of course, be made to filter files using
|
||||
recursive search, and if you don't mind writing out the requisite `--exclude`
|
||||
rules or writing wrapper scripts, then grep might be sufficient. (I'm not
|
||||
kidding, I myself did this with grep for almost a decade before writing
|
||||
ripgrep.) But if you instead enjoy having a search tool respect your
|
||||
`.gitignore`, then ripgrep might be perfect for you!
|
||||
* Are you frequently searching non-ASCII text that is UTF-8 encoded? One of
|
||||
ripgrep's key features is that it can handle Unicode features in your
|
||||
patterns in a way that tends to be faster than GNU grep. Unicode features
|
||||
in ripgrep are enabled by default; there is no need to configure your locale
|
||||
settings to use ripgrep properly because ripgrep doesn't respect your locale
|
||||
settings.
|
||||
* Do you need to search UTF-16 files and you don't want to bother explicitly
|
||||
transcoding them? Great. ripgrep does this for you automatically. No need
|
||||
to enable it.
|
||||
* Do you need to search a large directory of large files? ripgrep uses
|
||||
parallelism by default, which tends to make it faster than a standard
|
||||
`grep -r` search. However, if you're OK writing the occasional
|
||||
`find ./ -print0 | xargs -P8 -0 grep` command, then maybe grep is good
|
||||
enough.
|
||||
|
||||
Here are some cases where you might *not* want to use ripgrep. The same caveats
|
||||
for the previous section apply.
|
||||
|
||||
* Are you writing portable shell scripts intended to work in a variety of
|
||||
environments? Great, probably not a good idea to use ripgrep! ripgrep has
|
||||
nowhere near the ubiquity of grep, so if you do use ripgrep, you might need
|
||||
to futz with the installation process more than you would with grep.
|
||||
* Do you care about POSIX compatibility? If so, then you can't use ripgrep
|
||||
because it never was, isn't and never will be POSIX compatible.
|
||||
* Do you hate tools that try to do something smart? If so, ripgrep is all about
|
||||
being smart, so you might prefer to just stick with grep.
|
||||
* Is there a particular feature of grep you rely on that ripgrep either doesn't
|
||||
have or never will have? If the former, file a bug report, maybe ripgrep can
|
||||
do it! If the latter, well, then, just use grep.
|
||||
|
||||
|
||||
<h3 name="intentcountsforsomething">
|
||||
What does the "rip" in ripgrep mean?
|
||||
</h3>
|
||||
|
||||
When I first started writing ripgrep, I called it `rep`, intending it to be a
|
||||
shorter variant of `grep`. Soon after, I renamed it to `xrep` since `rep`
|
||||
wasn't obvious enough of a name for my taste. And also because adding `x` to
|
||||
anything always makes it better, right?
|
||||
|
||||
Before ripgrep's first public release, I decided that I didn't like `xrep`. I
|
||||
thought it was slightly awkward to type, and despite my previous praise of the
|
||||
letter `x`, I kind of thought it was pretty lame. Being someone who really
|
||||
likes Rust, I wanted to call it "rustgrep" or maybe "rgrep" for short. But I
|
||||
thought that was just as lame, and maybe a little too in-your-face. But I
|
||||
wanted to continue using `r` so I could at least pretend Rust had something to
|
||||
do with it.
|
||||
|
||||
I spent a couple of days trying to think of very short words that began with
|
||||
the letter `r` that were even somewhat related to the task of searching. I
|
||||
don't remember how it popped into my head, but "rip" came up as something that
|
||||
meant "fast," as in, "to rip through your text." The fact that RIP is also
|
||||
an initialism for "Rest in Peace" (as in, "ripgrep kills grep") never really
|
||||
dawned on me. Perhaps the coincidence is too striking to believe that, but
|
||||
I didn't realize it until someone explicitly pointed it out to me after the
|
||||
initial public release. I admit that I found it mildly amusing, but if I had
|
||||
realized it myself before the public release, I probably would have pressed on
|
||||
and chose a different name. Alas, renaming things after a release is hard, so I
|
||||
decided to mush on.
|
||||
|
||||
Given the fact that
|
||||
[ripgrep never was, is or will be a 100% drop-in replacement for
|
||||
grep](#posix4ever),
|
||||
ripgrep is neither actually a "grep killer" nor was it ever intended to be. It
|
||||
certainly does eat into some of its use cases, but that's nothing that other
|
||||
tools like ack or The Silver Searcher weren't already doing.
|
||||
|
||||
|
||||
<h3 name="donations">
|
||||
How can I donate to ripgrep or its maintainers?
|
||||
</h3>
|
||||
|
||||
As of now, you can't. While I believe the various efforts that are being
|
||||
undertaken to help fund FOSS are extremely important, they aren't a good fit
|
||||
for me. ripgrep is and I hope will remain a project of love that I develop in
|
||||
my free time. As such, involving money---even in the form of donations given
|
||||
without expectations---would severely change that dynamic for me personally.
|
||||
|
||||
Instead, I'd recommend donating to something else that is doing work that you
|
||||
find meaningful. If you would like suggestions, then my favorites are:
|
||||
|
||||
* [The Internet Archive](https://archive.org/donate/)
|
||||
* [Rails Girls](https://railsgirlssummerofcode.org/campaign/)
|
||||
* [Wikipedia](https://wikimediafoundation.org/support/)
|
||||
|
||||
383
GUIDE.md
383
GUIDE.md
@@ -18,6 +18,8 @@ translatable to any command line shell environment.
|
||||
* [Replacements](#replacements)
|
||||
* [Configuration file](#configuration-file)
|
||||
* [File encoding](#file-encoding)
|
||||
* [Binary data](#binary-data)
|
||||
* [Preprocessor](#preprocessor)
|
||||
* [Common options](#common-options)
|
||||
|
||||
|
||||
@@ -58,6 +60,10 @@ $ rg fast README.md
|
||||
129: optimizations to make searching very fast.
|
||||
```
|
||||
|
||||
(**Note:** If you see an error message from ripgrep saying that it didn't
|
||||
search any files, then re-run ripgrep with the `--debug` flag. One likely cause
|
||||
of this is that you have a `*` rule in a `$HOME/.gitignore` file.)
|
||||
|
||||
So what happened here? ripgrep read the contents of `README.md`, and for each
|
||||
line that contained `fast`, ripgrep printed it to your terminal. ripgrep also
|
||||
included the line number for each line by default. If your terminal supports
|
||||
@@ -105,7 +111,7 @@ colors, you'll notice that `faster` will be highlighted instead of just the
|
||||
|
||||
It is beyond the scope of this guide to provide a full tutorial on regular
|
||||
expressions, but ripgrep's specific syntax is documented here:
|
||||
https://docs.rs/regex/0.2.5/regex/#syntax
|
||||
https://docs.rs/regex/*/regex/#syntax
|
||||
|
||||
|
||||
### Recursive search
|
||||
@@ -171,16 +177,21 @@ After recursive search, ripgrep's most important feature is what it *doesn't*
|
||||
search. By default, when you search a directory, ripgrep will ignore all of
|
||||
the following:
|
||||
|
||||
1. Files and directories that match the rules in your `.gitignore` glob
|
||||
pattern.
|
||||
1. Files and directories that match glob patterns in these three categories:
|
||||
1. gitignore globs (including global and repo-specific globs).
|
||||
2. `.ignore` globs, which take precedence over all gitignore globs
|
||||
when there's a conflict.
|
||||
3. `.rgignore` globs, which take precedence over all `.ignore` globs
|
||||
when there's a conflict.
|
||||
2. Hidden files and directories.
|
||||
3. Binary files. (ripgrep considers any file with a `NUL` byte to be binary.)
|
||||
4. Symbolic links aren't followed.
|
||||
|
||||
All of these things can be toggled using various flags provided by ripgrep:
|
||||
|
||||
1. You can disable `.gitignore` handling with the `--no-ignore` flag.
|
||||
2. Hidden files and directories can be searched with the `--hidden` flag.
|
||||
1. You can disable all ignore-related filtering with the `--no-ignore` flag.
|
||||
2. Hidden files and directories can be searched with the `--hidden` (`-.` for
|
||||
short) flag.
|
||||
3. Binary files can be searched via the `--text` (`-a` for short) flag.
|
||||
Be careful with this flag! Binary files may emit control characters to your
|
||||
terminal, which might cause strange behavior.
|
||||
@@ -223,7 +234,7 @@ with the following contents:
|
||||
```
|
||||
|
||||
ripgrep treats `.ignore` files with higher precedence than `.gitignore` files
|
||||
(and treats `.rgignore` files with higher precdence than `.ignore` files).
|
||||
(and treats `.rgignore` files with higher precedence than `.ignore` files).
|
||||
This means ripgrep will see the `!log/` whitelist rule first and search that
|
||||
directory.
|
||||
|
||||
@@ -231,6 +242,11 @@ Like `.gitignore`, a `.ignore` file can be placed in any directory. Its rules
|
||||
will be processed with respect to the directory it resides in, just like
|
||||
`.gitignore`.
|
||||
|
||||
To process `.gitignore` and `.ignore` files case insensitively, use the flag
|
||||
`--ignore-file-case-insensitive`. This is especially useful on case insensitive
|
||||
file systems like those on Windows and macOS. Note though that this can come
|
||||
with a significant performance penalty, and is therefore disabled by default.
|
||||
|
||||
For a more in depth description of how glob patterns in a `.gitignore` file
|
||||
are interpreted, please see `man gitignore`.
|
||||
|
||||
@@ -366,7 +382,7 @@ make: *.mak, *.mk, GNUmakefile, Gnumakefile, Makefile, gnumakefile, makefile
|
||||
By default, ripgrep comes with a bunch of pre-defined types. Generally, these
|
||||
types correspond to well known public formats. But you can define your own
|
||||
types as well. For example, perhaps you frequently search "web" files, which
|
||||
consist of Javascript, HTML and CSS:
|
||||
consist of JavaScript, HTML and CSS:
|
||||
|
||||
```
|
||||
$ rg --type-add 'web:*.html' --type-add 'web:*.css' --type-add 'web:*.js' -tweb title
|
||||
@@ -401,6 +417,21 @@ alias rg="rg --type-add 'web:*.{html,css,js}'"
|
||||
or add `--type-add=web:*.{html,css,js}` to your ripgrep configuration file.
|
||||
([Configuration files](#configuration-file) are covered in more detail later.)
|
||||
|
||||
#### The special `all` file type
|
||||
|
||||
A special option supported by the `--type` flag is `all`. `--type all` looks
|
||||
for a match in any of the supported file types listed by `--type-list`,
|
||||
including those added on the command line using `--type-add`. It's equivalent
|
||||
to the command `rg --type agda --type asciidoc --type asm ...`, where `...`
|
||||
stands for a list of `--type` flags for the rest of the types in `--type-list`.
|
||||
|
||||
As an example, let's suppose you have a shell script in your current directory,
|
||||
`my-shell-script`, which includes a shell library, `my-shell-library.bash`.
|
||||
Both `rg --type sh` and `rg --type all` would only search for matches in
|
||||
`my-shell-library.bash`, not `my-shell-script`, because the globs matched
|
||||
by the `sh` file type don't include files without an extension. On the
|
||||
other hand, `rg --type-not all` would search `my-shell-script` but not
|
||||
`my-shell-library.bash`.
|
||||
|
||||
### Replacements
|
||||
|
||||
@@ -516,9 +547,9 @@ config file. Once the environment variable is set, open the file and just type
|
||||
in the flags you want set automatically. There are only two rules for
|
||||
describing the format of the config file:
|
||||
|
||||
1. Every line is a shell argument, after trimming ASCII whitespace.
|
||||
2. Lines starting with `#` (optionally preceded by any amount of
|
||||
ASCII whitespace) are ignored.
|
||||
1. Every line is a shell argument, after trimming whitespace.
|
||||
2. Lines starting with `#` (optionally preceded by any amount of whitespace)
|
||||
are ignored.
|
||||
|
||||
In particular, there is no escaping. Each line is given to ripgrep as a single
|
||||
command line argument verbatim.
|
||||
@@ -528,13 +559,21 @@ formatting peculiarities:
|
||||
|
||||
```
|
||||
$ cat $HOME/.ripgreprc
|
||||
# Don't let ripgrep vomit really long lines to my terminal.
|
||||
# Don't let ripgrep vomit really long lines to my terminal, and show a preview.
|
||||
--max-columns=150
|
||||
--max-columns-preview
|
||||
|
||||
# Add my 'web' type.
|
||||
--type-add
|
||||
web:*.{html,css,js}*
|
||||
|
||||
# Using glob patterns to include/exclude files or folders
|
||||
--glob=!git/*
|
||||
|
||||
# or
|
||||
--glob
|
||||
!git/*
|
||||
|
||||
# Set the colors.
|
||||
--colors=line:none
|
||||
--colors=line:style:bold
|
||||
@@ -569,7 +608,7 @@ override it.
|
||||
|
||||
If you're confused about what configuration file ripgrep is reading arguments
|
||||
from, then running ripgrep with the `--debug` flag should help clarify things.
|
||||
The debug output should note what config file is being loaded and the arugments
|
||||
The debug output should note what config file is being loaded and the arguments
|
||||
that have been read from the configuration.
|
||||
|
||||
Finally, if you want to make absolutely sure that ripgrep *isn't* reading a
|
||||
@@ -587,13 +626,14 @@ topic, but we can try to summarize its relevancy to ripgrep:
|
||||
* Files are generally just a bundle of bytes. There is no reliable way to know
|
||||
their encoding.
|
||||
* Either the encoding of the pattern must match the encoding of the files being
|
||||
searched, or a form of transcoding must be performed converts either the
|
||||
searched, or a form of transcoding must be performed that converts either the
|
||||
pattern or the file to the same encoding as the other.
|
||||
* ripgrep tends to work best on plain text files, and among plain text files,
|
||||
the most popular encodings likely consist of ASCII, latin1 or UTF-8. As
|
||||
a special exception, UTF-16 is prevalent in Windows environments
|
||||
|
||||
In light of the above, here is how ripgrep behaves:
|
||||
In light of the above, here is how ripgrep behaves when `--encoding auto` is
|
||||
given, which is the default:
|
||||
|
||||
* All input is assumed to be ASCII compatible (which means every byte that
|
||||
corresponds to an ASCII codepoint actually is an ASCII codepoint). This
|
||||
@@ -609,12 +649,15 @@ In light of the above, here is how ripgrep behaves:
|
||||
they correspond to a UTF-16 BOM, then ripgrep will transcode the contents of
|
||||
the file from UTF-16 to UTF-8, and then execute the search on the transcoded
|
||||
version of the file. (This incurs a performance penalty since transcoding
|
||||
is slower than regex searching.)
|
||||
is needed in addition to regex searching.) If the file contains invalid
|
||||
UTF-16, then the Unicode replacement codepoint is substituted in place of
|
||||
invalid code units.
|
||||
* To handle other cases, ripgrep provides a `-E/--encoding` flag, which permits
|
||||
you to specify an encoding from the
|
||||
[Encoding Standard](https://encoding.spec.whatwg.org/#concept-encoding-get).
|
||||
ripgrep will assume *all* files searched are the encoding specified and
|
||||
will perform a transcoding step just like in the UTF-16 case described above.
|
||||
ripgrep will assume *all* files searched are the encoding specified (unless
|
||||
the file has a BOM) and will perform a transcoding step just like in the
|
||||
UTF-16 case described above.
|
||||
|
||||
By default, ripgrep will not require its input be valid UTF-8. That is, ripgrep
|
||||
can and will search arbitrary bytes. The key here is that if you're searching
|
||||
@@ -624,9 +667,26 @@ pattern won't find anything. With all that said, this mode of operation is
|
||||
important, because it lets you find ASCII or UTF-8 *within* files that are
|
||||
otherwise arbitrary bytes.
|
||||
|
||||
As a special case, the `-E/--encoding` flag supports the value `none`, which
|
||||
will completely disable all encoding related logic, including BOM sniffing.
|
||||
When `-E/--encoding` is set to `none`, ripgrep will search the raw bytes of
|
||||
the underlying file with no transcoding step. For example, here's how you might
|
||||
search the raw UTF-16 encoding of the string `Шерлок`:
|
||||
|
||||
```
|
||||
$ rg '(?-u)\(\x045\x04@\x04;\x04>\x04:\x04' -E none -a some-utf16-file
|
||||
```
|
||||
|
||||
Of course, that's just an example meant to show how one can drop down into
|
||||
raw bytes. Namely, the simpler command works as you might expect automatically:
|
||||
|
||||
```
|
||||
$ rg 'Шерлок' some-utf16-file
|
||||
```
|
||||
|
||||
Finally, it is possible to disable ripgrep's Unicode support from within the
|
||||
pattern regular expression. For example, let's say you wanted `.` to match any
|
||||
byte rather than any Unicode codepoint. (You might want this while searching a
|
||||
regular expression. For example, let's say you wanted `.` to match any byte
|
||||
rather than any Unicode codepoint. (You might want this while searching a
|
||||
binary file, since `.` by default will not match invalid UTF-8.) You could do
|
||||
this by disabling Unicode via a regular expression flag:
|
||||
|
||||
@@ -643,6 +703,282 @@ $ rg '\w(?-u:\w)\w'
|
||||
```
|
||||
|
||||
|
||||
### Binary data
|
||||
|
||||
In addition to skipping hidden files and files in your `.gitignore` by default,
|
||||
ripgrep also attempts to skip binary files. ripgrep does this by default
|
||||
because binary files (like PDFs or images) are typically not things you want to
|
||||
search when searching for regex matches. Moreover, if content in a binary file
|
||||
did match, then it's possible for undesirable binary data to be printed to your
|
||||
terminal and wreak havoc.
|
||||
|
||||
Unfortunately, unlike skipping hidden files and respecting your `.gitignore`
|
||||
rules, a file cannot as easily be classified as binary. In order to figure out
|
||||
whether a file is binary, the most effective heuristic that balances
|
||||
correctness with performance is to simply look for `NUL` bytes. At that point,
|
||||
the determination is simple: a file is considered "binary" if and only if it
|
||||
contains a `NUL` byte somewhere in its contents.
|
||||
|
||||
The issue is that while most binary files will have a `NUL` byte toward the
|
||||
beginning of its contents, this is not necessarily true. The `NUL` byte might
|
||||
be the very last byte in a large file, but that file is still considered
|
||||
binary. While this leads to a fair amount of complexity inside ripgrep's
|
||||
implementation, it also results in some unintuitive user experiences.
|
||||
|
||||
At a high level, ripgrep operates in three different modes with respect to
|
||||
binary files:
|
||||
|
||||
1. The default mode is to attempt to remove binary files from a search
|
||||
completely. This is meant to mirror how ripgrep removes hidden files and
|
||||
files in your `.gitignore` automatically. That is, as soon as a file is
|
||||
detected as binary, searching stops. If a match was already printed (because
|
||||
it was detected long before a `NUL` byte), then ripgrep will print a warning
|
||||
message indicating that the search stopped prematurely. This default mode
|
||||
**only applies to files searched by ripgrep as a result of recursive
|
||||
directory traversal**, which is consistent with ripgrep's other automatic
|
||||
filtering. For example, `rg foo .file` will search `.file` even though it
|
||||
is hidden. Similarly, `rg foo binary-file` will search `binary-file` in
|
||||
"binary" mode automatically.
|
||||
2. Binary mode is similar to the default mode, except it will not always
|
||||
stop searching after it sees a `NUL` byte. Namely, in this mode, ripgrep
|
||||
will continue searching a file that is known to be binary until the first
|
||||
of two conditions is met: 1) the end of the file has been reached or 2) a
|
||||
match is or has been seen. This means that in binary mode, if ripgrep
|
||||
reports no matches, then there are no matches in the file. When a match does
|
||||
occur, ripgrep prints a message similar to one it prints when in its default
|
||||
mode indicating that the search has stopped prematurely. This mode can be
|
||||
forcefully enabled for all files with the `--binary` flag. The purpose of
|
||||
binary mode is to provide a way to discover matches in all files, but to
|
||||
avoid having binary data dumped into your terminal.
|
||||
3. Text mode completely disables all binary detection and searches all files
|
||||
as if they were text. This is useful when searching a file that is
|
||||
predominantly text but contains a `NUL` byte, or if you are specifically
|
||||
trying to search binary data. This mode can be enabled with the `-a/--text`
|
||||
flag. Note that when using this mode on very large binary files, it is
|
||||
possible for ripgrep to use a lot of memory.
|
||||
|
||||
Unfortunately, there is one additional complexity in ripgrep that can make it
|
||||
difficult to reason about binary files. That is, the way binary detection works
|
||||
depends on the way that ripgrep searches your files. Specifically:
|
||||
|
||||
* When ripgrep uses memory maps, then binary detection is only performed on the
|
||||
first few kilobytes of the file in addition to every matching line.
|
||||
* When ripgrep doesn't use memory maps, then binary detection is performed on
|
||||
all bytes searched.
|
||||
|
||||
This means that whether a file is detected as binary or not can change based
|
||||
on the internal search strategy used by ripgrep. If you prefer to keep
|
||||
ripgrep's binary file detection consistent, then you can disable memory maps
|
||||
via the `--no-mmap` flag. (The cost will be a small performance regression when
|
||||
searching very large files on some platforms.)
|
||||
|
||||
|
||||
### Preprocessor
|
||||
|
||||
In ripgrep, a preprocessor is any type of command that can be run to transform
|
||||
the input of every file before ripgrep searches it. This makes it possible to
|
||||
search virtually any kind of content that can be automatically converted to
|
||||
text without having to teach ripgrep how to read said content.
|
||||
|
||||
One common example is searching PDFs. PDFs are first and foremost meant to be
|
||||
displayed to users. But PDFs often have text streams in them that can be useful
|
||||
to search. In our case, we want to search Bruce Watson's excellent
|
||||
dissertation,
|
||||
[Taxonomies and Toolkits of Regular Language Algorithms](https://burntsushi.net/stuff/1995-watson.pdf).
|
||||
After downloading it, let's try searching it:
|
||||
|
||||
```
|
||||
$ rg 'The Commentz-Walter algorithm' 1995-watson.pdf
|
||||
$
|
||||
```
|
||||
|
||||
Surely, a dissertation on regular language algorithms would mention
|
||||
Commentz-Walter. Indeed it does, but our search isn't picking it up because
|
||||
PDFs are a binary format, and the text shown in the PDF may not be encoded as
|
||||
simple contiguous UTF-8. Namely, even passing the `-a/--text` flag to ripgrep
|
||||
will not make our search work.
|
||||
|
||||
One way to fix this is to convert the PDF to plain text first. This won't work
|
||||
well for all PDFs, but does great in a lot of cases. (Note that the tool we
|
||||
use, `pdftotext`, is part of the [poppler](https://poppler.freedesktop.org)
|
||||
PDF rendering library.)
|
||||
|
||||
```
|
||||
$ pdftotext 1995-watson.pdf > 1995-watson.txt
|
||||
$ rg 'The Commentz-Walter algorithm' 1995-watson.txt
|
||||
316:The Commentz-Walter algorithms : : : : : : : : : : : : : : :
|
||||
7165:4.4 The Commentz-Walter algorithms
|
||||
10062:in input string S , we obtain the Boyer-Moore algorithm. The Commentz-Walter algorithm
|
||||
17218:The Commentz-Walter algorithm (and its variants) displayed more interesting behaviour,
|
||||
17249:Aho-Corasick algorithms are used extensively. The Commentz-Walter algorithms are used
|
||||
17297: The Commentz-Walter algorithms (CW). In all versions of the CW algorithms, a common program skeleton is used with di erent shift functions. The CW algorithms are
|
||||
```
|
||||
|
||||
But having to explicitly convert every file can be a pain, especially when you
|
||||
have a directory full of PDF files. Instead, we can use ripgrep's preprocessor
|
||||
feature to search the PDF. ripgrep's `--pre` flag works by taking a single
|
||||
command name and then executing that command for every file that it searches.
|
||||
ripgrep passes the file path as the first and only argument to the command and
|
||||
also sends the contents of the file to stdin. So let's write a simple shell
|
||||
script that wraps `pdftotext` in a way that conforms to this interface:
|
||||
|
||||
```
|
||||
$ cat preprocess
|
||||
#!/bin/sh
|
||||
|
||||
exec pdftotext - -
|
||||
```
|
||||
|
||||
With `preprocess` in the same directory as `1995-watson.pdf`, we can now use it
|
||||
to search the PDF:
|
||||
|
||||
```
|
||||
$ rg --pre ./preprocess 'The Commentz-Walter algorithm' 1995-watson.pdf
|
||||
316:The Commentz-Walter algorithms : : : : : : : : : : : : : : :
|
||||
7165:4.4 The Commentz-Walter algorithms
|
||||
10062:in input string S , we obtain the Boyer-Moore algorithm. The Commentz-Walter algorithm
|
||||
17218:The Commentz-Walter algorithm (and its variants) displayed more interesting behaviour,
|
||||
17249:Aho-Corasick algorithms are used extensively. The Commentz-Walter algorithms are used
|
||||
17297: The Commentz-Walter algorithms (CW). In all versions of the CW algorithms, a common program skeleton is used with di erent shift functions. The CW algorithms are
|
||||
```
|
||||
|
||||
Note that `preprocess` must be resolvable to a command that ripgrep can read.
|
||||
The simplest way to do this is to put your preprocessor command in a directory
|
||||
that is in your `PATH` (or equivalent), or otherwise use an absolute path.
|
||||
|
||||
As a bonus, this turns out to be quite a bit faster than other specialized PDF
|
||||
grepping tools:
|
||||
|
||||
```
|
||||
$ time rg --pre ./preprocess 'The Commentz-Walter algorithm' 1995-watson.pdf -c
|
||||
6
|
||||
|
||||
real 0.697
|
||||
user 0.684
|
||||
sys 0.007
|
||||
maxmem 16 MB
|
||||
faults 0
|
||||
|
||||
$ time pdfgrep 'The Commentz-Walter algorithm' 1995-watson.pdf -c
|
||||
6
|
||||
|
||||
real 1.336
|
||||
user 1.310
|
||||
sys 0.023
|
||||
maxmem 16 MB
|
||||
faults 0
|
||||
```
|
||||
|
||||
If you wind up needing to search a lot of PDFs, then ripgrep's parallelism can
|
||||
make the speed difference even greater.
|
||||
|
||||
#### A more robust preprocessor
|
||||
|
||||
One of the problems with the aforementioned preprocessor is that it will fail
|
||||
if you try to search a file that isn't a PDF:
|
||||
|
||||
```
|
||||
$ echo foo > not-a-pdf
|
||||
$ rg --pre ./preprocess 'The Commentz-Walter algorithm' not-a-pdf
|
||||
not-a-pdf: preprocessor command failed: '"./preprocess" "not-a-pdf"':
|
||||
-------------------------------------------------------------------------------
|
||||
Syntax Warning: May not be a PDF file (continuing anyway)
|
||||
Syntax Error: Couldn't find trailer dictionary
|
||||
Syntax Error: Couldn't find trailer dictionary
|
||||
Syntax Error: Couldn't read xref table
|
||||
```
|
||||
|
||||
To fix this, we can make our preprocessor script a bit more robust by only
|
||||
running `pdftotext` when we think the input is a non-empty PDF:
|
||||
|
||||
```
|
||||
$ cat preprocessor
|
||||
#!/bin/sh
|
||||
|
||||
case "$1" in
|
||||
*.pdf)
|
||||
# The -s flag ensures that the file is non-empty.
|
||||
if [ -s "$1" ]; then
|
||||
exec pdftotext - -
|
||||
else
|
||||
exec cat
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
exec cat
|
||||
;;
|
||||
esac
|
||||
```
|
||||
|
||||
We can even extend our preprocessor to search other kinds of files. Sometimes
|
||||
we don't always know the file type from the file name, so we can use the `file`
|
||||
utility to "sniff" the type of the file based on its contents:
|
||||
|
||||
```
|
||||
$ cat processor
|
||||
#!/bin/sh
|
||||
|
||||
case "$1" in
|
||||
*.pdf)
|
||||
# The -s flag ensures that the file is non-empty.
|
||||
if [ -s "$1" ]; then
|
||||
exec pdftotext - -
|
||||
else
|
||||
exec cat
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
case $(file "$1") in
|
||||
*Zstandard*)
|
||||
exec pzstd -cdq
|
||||
;;
|
||||
*)
|
||||
exec cat
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
```
|
||||
|
||||
#### Reducing preprocessor overhead
|
||||
|
||||
There is one more problem with the above approach: it requires running a
|
||||
preprocessor for every single file that ripgrep searches. If every file needs
|
||||
a preprocessor, then this is OK. But if most don't, then this can substantially
|
||||
slow down searches because of the overhead of launching new processors. You
|
||||
can avoid this by telling ripgrep to only invoke the preprocessor when the file
|
||||
path matches a glob. For example, consider the performance difference even when
|
||||
searching a repository as small as ripgrep's:
|
||||
|
||||
```
|
||||
$ time rg --pre pre-rg 'fn is_empty' -c
|
||||
crates/globset/src/lib.rs:1
|
||||
crates/matcher/src/lib.rs:2
|
||||
crates/ignore/src/overrides.rs:1
|
||||
crates/ignore/src/gitignore.rs:1
|
||||
crates/ignore/src/types.rs:1
|
||||
|
||||
real 0.138
|
||||
user 0.485
|
||||
sys 0.209
|
||||
maxmem 7 MB
|
||||
faults 0
|
||||
|
||||
$ time rg --pre pre-rg --pre-glob '*.pdf' 'fn is_empty' -c
|
||||
crates/globset/src/lib.rs:1
|
||||
crates/ignore/src/types.rs:1
|
||||
crates/ignore/src/gitignore.rs:1
|
||||
crates/ignore/src/overrides.rs:1
|
||||
crates/matcher/src/lib.rs:2
|
||||
|
||||
real 0.008
|
||||
user 0.010
|
||||
sys 0.002
|
||||
maxmem 7 MB
|
||||
faults 0
|
||||
```
|
||||
|
||||
|
||||
### Common options
|
||||
|
||||
ripgrep has a lot of flags. Too many to keep in your head at once. This section
|
||||
@@ -657,6 +993,8 @@ used options that will likely impact how you use ripgrep on a regular basis.
|
||||
* `-S/--smart-case`: This is similar to `--ignore-case`, but disables itself
|
||||
if the pattern contains any uppercase letters. Usually this flag is put into
|
||||
alias or a config file.
|
||||
* `-F/--fixed-strings`: Disable regular expression matching and treat the pattern
|
||||
as a literal string.
|
||||
* `-w/--word-regexp`: Require that all matches of the pattern be surrounded
|
||||
by word boundaries. That is, given `pattern`, the `--word-regexp` flag will
|
||||
cause ripgrep to behave as if `pattern` were actually `\b(?:pattern)\b`.
|
||||
@@ -664,10 +1002,11 @@ used options that will likely impact how you use ripgrep on a regular basis.
|
||||
* `--files`: Print the files that ripgrep *would* search, but don't actually
|
||||
search them.
|
||||
* `-a/--text`: Search binary files as if they were plain text.
|
||||
* `-z/--search-zip`: Search compressed files (gzip, bzip2, lzma, xz). This is
|
||||
disabled by default.
|
||||
* `-U/--multiline`: Permit matches to span multiple lines.
|
||||
* `-z/--search-zip`: Search compressed files (gzip, bzip2, lzma, xz, lz4,
|
||||
brotli, zstd). This is disabled by default.
|
||||
* `-C/--context`: Show the lines surrounding a match.
|
||||
* `--sort-files`: Force ripgrep to sort its output by file name. (This disables
|
||||
* `--sort path`: Force ripgrep to sort its output by file name. (This disables
|
||||
parallelism, so it might be slower.)
|
||||
* `-L/--follow`: Follow symbolic links while recursively searching.
|
||||
* `-M/--max-columns`: Limit the length of lines printed by ripgrep.
|
||||
|
||||
373
README.md
373
README.md
@@ -1,17 +1,18 @@
|
||||
ripgrep (rg)
|
||||
------------
|
||||
ripgrep is a line-oriented search tool that recursively searches your current
|
||||
directory for a regex pattern while respecting your gitignore rules. ripgrep
|
||||
has first class support on Windows, macOS and Linux, with binary downloads
|
||||
available for [every release](https://github.com/BurntSushi/ripgrep/releases).
|
||||
ripgrep is similar to other popular search tools like The Silver Searcher,
|
||||
ack and grep.
|
||||
ripgrep is a line-oriented search tool that recursively searches the current
|
||||
directory for a regex pattern. By default, ripgrep will respect gitignore rules
|
||||
and automatically skip hidden files/directories and binary files. (To disable
|
||||
all automatic filtering by default, use `rg -uuu`.) ripgrep has first class
|
||||
support on Windows, macOS and Linux, with binary downloads available for [every
|
||||
release](https://github.com/BurntSushi/ripgrep/releases). ripgrep is similar to
|
||||
other popular search tools like The Silver Searcher, ack and grep.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://crates.io/crates/ripgrep)
|
||||
[](https://github.com/BurntSushi/ripgrep/actions)
|
||||
[](https://crates.io/crates/ripgrep)
|
||||
[](https://repology.org/project/ripgrep/badges)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org).
|
||||
|
||||
|
||||
### CHANGELOG
|
||||
@@ -23,126 +24,145 @@ Please see the [CHANGELOG](CHANGELOG.md) for a release history.
|
||||
* [Installation](#installation)
|
||||
* [User Guide](GUIDE.md)
|
||||
* [Frequently Asked Questions](FAQ.md)
|
||||
* [Regex syntax](https://docs.rs/regex/0.2.5/regex/#syntax)
|
||||
* [Regex syntax](https://docs.rs/regex/1/regex/#syntax)
|
||||
* [Configuration files](GUIDE.md#configuration-file)
|
||||
* [Shell completions](FAQ.md#complete)
|
||||
* [Building](#building)
|
||||
* [Translations](#translations)
|
||||
|
||||
|
||||
### Screenshot of search results
|
||||
|
||||
[](http://burntsushi.net/stuff/ripgrep1.png)
|
||||
[](https://burntsushi.net/stuff/ripgrep1.png)
|
||||
|
||||
|
||||
### Quick examples comparing tools
|
||||
|
||||
This example searches the entire Linux kernel source tree (after running
|
||||
`make defconfig && make -j8`) for `[A-Z]+_SUSPEND`, where all matches must be
|
||||
words. Timings were collected on a system with an Intel i7-6900K 3.2 GHz, and
|
||||
ripgrep was compiled with SIMD enabled.
|
||||
This example searches the entire
|
||||
[Linux kernel source tree](https://github.com/BurntSushi/linux)
|
||||
(after running `make defconfig && make -j8`) for `[A-Z]+_SUSPEND`, where
|
||||
all matches must be words. Timings were collected on a system with an Intel
|
||||
i7-6900K 3.2 GHz.
|
||||
|
||||
Please remember that a single benchmark is never enough! See my
|
||||
[blog post on ripgrep](http://blog.burntsushi.net/ripgrep/)
|
||||
[blog post on ripgrep](https://blog.burntsushi.net/ripgrep/)
|
||||
for a very detailed comparison with more benchmarks and analysis.
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep (Unicode) | `rg -n -w '[A-Z]+_SUSPEND'` | 450 | **0.106s** |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=C git grep -E -n -w '[A-Z]+_SUSPEND'` | 450 | 0.553s |
|
||||
| [The Silver Searcher](https://github.com/ggreer/the_silver_searcher) | `ag -w '[A-Z]+_SUSPEND'` | 450 | 0.589s |
|
||||
| [git grep (Unicode)](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=en_US.UTF-8 git grep -E -n -w '[A-Z]+_SUSPEND'` | 450 | 2.266s |
|
||||
| [sift](https://github.com/svent/sift) | `sift --git -n -w '[A-Z]+_SUSPEND'` | 450 | 3.505s |
|
||||
| [ack](https://github.com/petdance/ack2) | `ack -w '[A-Z]+_SUSPEND'` | 1878 | 6.823s |
|
||||
| [The Platinum Searcher](https://github.com/monochromegane/the_platinum_searcher) | `pt -w -e '[A-Z]+_SUSPEND'` | 450 | 14.208s |
|
||||
| ripgrep (Unicode) | `rg -n -w '[A-Z]+_SUSPEND'` | 452 | **0.136s** |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `git grep -P -n -w '[A-Z]+_SUSPEND'` | 452 | 0.348s |
|
||||
| [ugrep (Unicode)](https://github.com/Genivia/ugrep) | `ugrep -r --ignore-files --no-hidden -I -w '[A-Z]+_SUSPEND'` | 452 | 0.506s |
|
||||
| [The Silver Searcher](https://github.com/ggreer/the_silver_searcher) | `ag -w '[A-Z]+_SUSPEND'` | 452 | 0.654s |
|
||||
| [git grep](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=C git grep -E -n -w '[A-Z]+_SUSPEND'` | 452 | 1.150s |
|
||||
| [ack](https://github.com/beyondgrep/ack3) | `ack -w '[A-Z]+_SUSPEND'` | 452 | 4.054s |
|
||||
| [git grep (Unicode)](https://www.kernel.org/pub/software/scm/git/docs/git-grep.html) | `LC_ALL=en_US.UTF-8 git grep -E -n -w '[A-Z]+_SUSPEND'` | 452 | 4.205s |
|
||||
|
||||
(Yes, `ack` [has](https://github.com/petdance/ack2/issues/445) a
|
||||
[bug](https://github.com/petdance/ack2/issues/14).)
|
||||
|
||||
Here's another benchmark that disregards gitignore files and searches with a
|
||||
whitelist instead. The corpus is the same as in the previous benchmark, and the
|
||||
flags passed to each command ensure that they are doing equivalent work:
|
||||
Here's another benchmark on the same corpus as above that disregards gitignore
|
||||
files and searches with a whitelist instead. The corpus is the same as in the
|
||||
previous benchmark, and the flags passed to each command ensure that they are
|
||||
doing equivalent work:
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep | `rg -L -u -tc -n -w '[A-Z]+_SUSPEND'` | 404 | **0.079s** |
|
||||
| [ucg](https://github.com/gvansickle/ucg) | `ucg --type=cc -w '[A-Z]+_SUSPEND'` | 390 | 0.163s |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `egrep -R -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 404 | 0.611s |
|
||||
| ripgrep | `rg -uuu -tc -n -w '[A-Z]+_SUSPEND'` | 388 | **0.096s** |
|
||||
| [ugrep](https://github.com/Genivia/ugrep) | `ugrep -r -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 388 | 0.493s |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `egrep -r -n --include='*.c' --include='*.h' -w '[A-Z]+_SUSPEND'` | 388 | 0.806s |
|
||||
|
||||
(`ucg` [has slightly different behavior in the presence of symbolic links](https://github.com/gvansickle/ucg/issues/106).)
|
||||
|
||||
And finally, a straight-up comparison between ripgrep and GNU grep on a single
|
||||
large file (~9.3GB,
|
||||
[`OpenSubtitles2016.raw.en.gz`](http://opus.lingfil.uu.se/OpenSubtitles2016/mono/OpenSubtitles2016.raw.en.gz)):
|
||||
And finally, a straight-up comparison between ripgrep, ugrep and GNU grep on a
|
||||
single large file cached in memory
|
||||
(~13GB, [`OpenSubtitles.raw.en.gz`](http://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/mono/OpenSubtitles.raw.en.gz)):
|
||||
|
||||
| Tool | Command | Line count | Time |
|
||||
| ---- | ------- | ---------- | ---- |
|
||||
| ripgrep | `rg -w 'Sherlock [A-Z]\w+'` | 5268 | **2.108s** |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `LC_ALL=C egrep -w 'Sherlock [A-Z]\w+'` | 5268 | 7.014s |
|
||||
| ripgrep | `rg -w 'Sherlock [A-Z]\w+'` | 7882 | **2.769s** |
|
||||
| [ugrep](https://github.com/Genivia/ugrep) | `ugrep -w 'Sherlock [A-Z]\w+'` | 7882 | 6.802s |
|
||||
| [GNU grep](https://www.gnu.org/software/grep/) | `LC_ALL=en_US.UTF-8 egrep -w 'Sherlock [A-Z]\w+'` | 7882 | 9.027s |
|
||||
|
||||
In the above benchmark, passing the `-n` flag (for showing line numbers)
|
||||
increases the times to `2.640s` for ripgrep and `10.277s` for GNU grep.
|
||||
increases the times to `3.423s` for ripgrep and `13.031s` for GNU grep. ugrep
|
||||
times are unaffected by the presence or absence of `-n`.
|
||||
|
||||
|
||||
### Why should I use ripgrep?
|
||||
|
||||
* It can replace both The Silver Searcher and GNU grep because it is generally
|
||||
faster than both. (N.B. It is not, strictly speaking, a "drop-in" replacement
|
||||
for both, but the feature sets are far more similar than different.)
|
||||
* Like The Silver Searcher, ripgrep defaults to recursive directory search
|
||||
and won't search files ignored by your `.gitignore` files. It also ignores
|
||||
hidden and binary files by default. ripgrep also implements full support
|
||||
for `.gitignore`, whereas there are many bugs related to that functionality
|
||||
in The Silver Searcher.
|
||||
* ripgrep can search specific types of files. For example, `rg -tpy foo`
|
||||
limits your search to Python files and `rg -Tjs foo` excludes Javascript
|
||||
files from your search. ripgrep can be taught about new file types with
|
||||
custom matching rules.
|
||||
* It can replace many use cases served by other search tools
|
||||
because it contains most of their features and is generally faster. (See
|
||||
[the FAQ](FAQ.md#posix4ever) for more details on whether ripgrep can truly
|
||||
replace grep.)
|
||||
* Like other tools specialized to code search, ripgrep defaults to
|
||||
[recursive search](GUIDE.md#recursive-search) and does [automatic
|
||||
filtering](GUIDE.md#automatic-filtering). Namely, ripgrep won't search files
|
||||
ignored by your `.gitignore`/`.ignore`/`.rgignore` files, it won't search
|
||||
hidden files and it won't search binary files. Automatic filtering can be
|
||||
disabled with `rg -uuu`.
|
||||
* ripgrep can [search specific types of files](GUIDE.md#manual-filtering-file-types).
|
||||
For example, `rg -tpy foo` limits your search to Python files and `rg -Tjs
|
||||
foo` excludes JavaScript files from your search. ripgrep can be taught about
|
||||
new file types with custom matching rules.
|
||||
* ripgrep supports many features found in `grep`, such as showing the context
|
||||
of search results, searching multiple patterns, highlighting matches with
|
||||
color and full Unicode support. Unlike GNU grep, ripgrep stays fast while
|
||||
supporting Unicode (which is always on).
|
||||
* ripgrep supports searching files in text encodings other than UTF-8, such
|
||||
as UTF-16, latin-1, GBK, EUC-JP, Shift_JIS and more. (Some support for
|
||||
automatically detecting UTF-16 is provided. Other text encodings must be
|
||||
specifically specified with the `-E/--encoding` flag.)
|
||||
* ripgrep supports searching files compressed in a common format (gzip, xz,
|
||||
lzma or bzip2 current) with the `-z/--search-zip` flag.
|
||||
* ripgrep has optional support for switching its regex engine to use PCRE2.
|
||||
Among other things, this makes it possible to use look-around and
|
||||
backreferences in your patterns, which are not supported in ripgrep's default
|
||||
regex engine. PCRE2 support can be enabled with `-P/--pcre2` (use PCRE2
|
||||
always) or `--auto-hybrid-regex` (use PCRE2 only if needed). An alternative
|
||||
syntax is provided via the `--engine (default|pcre2|auto-hybrid)` option.
|
||||
* ripgrep has [rudimentary support for replacements](GUIDE.md#replacements),
|
||||
which permit rewriting output based on what was matched.
|
||||
* ripgrep supports [searching files in text encodings](GUIDE.md#file-encoding)
|
||||
other than UTF-8, such as UTF-16, latin-1, GBK, EUC-JP, Shift_JIS and more.
|
||||
(Some support for automatically detecting UTF-16 is provided. Other text
|
||||
encodings must be specifically specified with the `-E/--encoding` flag.)
|
||||
* ripgrep supports searching files compressed in a common format (brotli,
|
||||
bzip2, gzip, lz4, lzma, xz, or zstandard) with the `-z/--search-zip` flag.
|
||||
* ripgrep supports
|
||||
[arbitrary input preprocessing filters](GUIDE.md#preprocessor)
|
||||
which could be PDF text extraction, less supported decompression, decrypting,
|
||||
automatic encoding detection and so on.
|
||||
* ripgrep can be configured via a
|
||||
[configuration file](GUIDE.md#configuration-file).
|
||||
|
||||
In other words, use ripgrep if you like speed, filtering by default, fewer
|
||||
bugs, and Unicode support.
|
||||
bugs and Unicode support.
|
||||
|
||||
|
||||
### Why shouldn't I use ripgrep?
|
||||
|
||||
I'd like to try to convince you why you *shouldn't* use ripgrep. This should
|
||||
give you a glimpse at some important downsides or missing features of
|
||||
ripgrep.
|
||||
Despite initially not wanting to add every feature under the sun to ripgrep,
|
||||
over time, ripgrep has grown support for most features found in other file
|
||||
searching tools. This includes searching for results spanning across multiple
|
||||
lines, and opt-in support for PCRE2, which provides look-around and
|
||||
backreference support.
|
||||
|
||||
* ripgrep uses a regex engine based on finite automata, so if you want fancy
|
||||
regex features such as backreferences or lookaround, ripgrep won't provide
|
||||
them to you. ripgrep does support lots of things though, including, but not
|
||||
limited to: lazy quantification (e.g., `a+?`), repetitions (e.g., `a{2,5}`),
|
||||
begin/end assertions (e.g., `^\w+$`), word boundaries (e.g., `\bfoo\b`), and
|
||||
support for Unicode categories (e.g., `\p{Sc}` to match currency symbols or
|
||||
`\p{Lu}` to match any uppercase letter). (Fancier regexes will never be
|
||||
supported.)
|
||||
* ripgrep doesn't have multiline search. (Will happen as an opt-in feature.)
|
||||
At this point, the primary reasons not to use ripgrep probably consist of one
|
||||
or more of the following:
|
||||
|
||||
In other words, if you like fancy regexes or multiline search, then ripgrep
|
||||
may not quite meet your needs (yet).
|
||||
* You need a portable and ubiquitous tool. While ripgrep works on Windows,
|
||||
macOS and Linux, it is not ubiquitous and it does not conform to any
|
||||
standard such as POSIX. The best tool for this job is good old grep.
|
||||
* There still exists some other feature (or bug) not listed in this README that
|
||||
you rely on that's in another tool that isn't in ripgrep.
|
||||
* There is a performance edge case where ripgrep doesn't do well where another
|
||||
tool does do well. (Please file a bug report!)
|
||||
* ripgrep isn't possible to install on your machine or isn't available for your
|
||||
platform. (Please file a bug report!)
|
||||
|
||||
|
||||
### Is it really faster than everything else?
|
||||
|
||||
Generally, yes. A large number of benchmarks with detailed analysis for each is
|
||||
[available on my blog](http://blog.burntsushi.net/ripgrep/).
|
||||
[available on my blog](https://blog.burntsushi.net/ripgrep/).
|
||||
|
||||
Summarizing, ripgrep is fast because:
|
||||
|
||||
* It is built on top of
|
||||
[Rust's regex engine](https://github.com/rust-lang-nursery/regex).
|
||||
[Rust's regex engine](https://github.com/rust-lang/regex).
|
||||
Rust's regex engine uses finite automata, SIMD and aggressive literal
|
||||
optimizations to make searching very fast.
|
||||
optimizations to make searching very fast. (PCRE2 support can be opted into
|
||||
with the `-P/--pcre2` flag.)
|
||||
* Rust's regex library maintains performance with full Unicode support by
|
||||
building UTF-8 decoding directly into its deterministic finite automaton
|
||||
engine.
|
||||
@@ -151,7 +171,7 @@ Summarizing, ripgrep is fast because:
|
||||
latter is better for large directories. ripgrep chooses the best searching
|
||||
strategy for you automatically.
|
||||
* Applies your ignore patterns in `.gitignore` files using a
|
||||
[`RegexSet`](https://doc.rust-lang.org/regex/regex/struct.RegexSet.html).
|
||||
[`RegexSet`](https://docs.rs/regex/1/regex/struct.RegexSet.html).
|
||||
That means a single file path can be matched against multiple glob patterns
|
||||
simultaneously.
|
||||
* It uses a lock-free parallel recursive directory iterator, courtesy of
|
||||
@@ -165,70 +185,78 @@ Andy Lester, author of [ack](https://beyondgrep.com/), has published an
|
||||
excellent table comparing the features of ack, ag, git-grep, GNU grep and
|
||||
ripgrep: https://beyondgrep.com/feature-comparison/
|
||||
|
||||
Note that ripgrep has grown a few significant new features recently that
|
||||
are not yet present in Andy's table. This includes, but is not limited to,
|
||||
configuration files, passthru, support for searching compressed files,
|
||||
multiline search and opt-in fancy regex support via PCRE2.
|
||||
|
||||
|
||||
### Installation
|
||||
|
||||
The binary name for ripgrep is `rg`.
|
||||
|
||||
**[Archives of precompiled binaries for ripgrep are available for Windows,
|
||||
macOS and Linux.](https://github.com/BurntSushi/ripgrep/releases)** Users of
|
||||
platforms not explicitly mentioned below (such as Debian) are advised
|
||||
to download one of these archives.
|
||||
macOS and Linux.](https://github.com/BurntSushi/ripgrep/releases)** Linux and
|
||||
Windows binaries are static executables. Users of platforms not explicitly
|
||||
mentioned below are advised to download one of these archives.
|
||||
|
||||
Linux binaries are static executables. Windows binaries are available either as
|
||||
built with MinGW (GNU) or with Microsoft Visual C++ (MSVC). When possible,
|
||||
prefer MSVC over GNU, but you'll need to have the [Microsoft VC++ 2015
|
||||
redistributable](https://www.microsoft.com/en-us/download/details.aspx?id=48145)
|
||||
installed.
|
||||
|
||||
If you're a **macOS Homebrew** or a **Linuxbrew** user,
|
||||
then you can install ripgrep either
|
||||
from homebrew-core, (compiled with rust stable, no SIMD):
|
||||
If you're a **macOS Homebrew** or a **Linuxbrew** user, then you can install
|
||||
ripgrep from homebrew-core:
|
||||
|
||||
```
|
||||
$ brew install ripgrep
|
||||
```
|
||||
|
||||
or you can install a binary compiled with rust nightly (including SIMD and all
|
||||
optimizations) by utilizing a custom tap:
|
||||
If you're a **MacPorts** user, then you can install ripgrep from the
|
||||
[official ports](https://www.macports.org/ports.php?by=name&substr=ripgrep):
|
||||
|
||||
```
|
||||
$ brew tap burntsushi/ripgrep https://github.com/BurntSushi/ripgrep.git
|
||||
$ brew install burntsushi/ripgrep/ripgrep-bin
|
||||
$ sudo port install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Windows Chocolatey** user, then you can install ripgrep from the [official repo](https://chocolatey.org/packages/ripgrep):
|
||||
If you're a **Windows Chocolatey** user, then you can install ripgrep from the
|
||||
[official repo](https://chocolatey.org/packages/ripgrep):
|
||||
|
||||
```
|
||||
$ choco install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Windows Scoop** user, then you can install ripgrep from the
|
||||
[official bucket](https://github.com/ScoopInstaller/Main/blob/master/bucket/ripgrep.json):
|
||||
|
||||
```
|
||||
$ scoop install ripgrep
|
||||
```
|
||||
|
||||
If you're an **Arch Linux** user, then you can install ripgrep from the official repos:
|
||||
|
||||
```
|
||||
$ pacman -S ripgrep
|
||||
```
|
||||
|
||||
If you're a **Gentoo** user, you can install ripgrep from the [official repo](https://packages.gentoo.org/packages/sys-apps/ripgrep):
|
||||
If you're a **Gentoo** user, you can install ripgrep from the
|
||||
[official repo](https://packages.gentoo.org/packages/sys-apps/ripgrep):
|
||||
|
||||
```
|
||||
$ emerge sys-apps/ripgrep
|
||||
```
|
||||
|
||||
If you're a **Fedora 27+** user, you can install ripgrep from official repositories.
|
||||
If you're a **Fedora** user, you can install ripgrep from official
|
||||
repositories.
|
||||
|
||||
```
|
||||
$ sudo dnf install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Fedora 24+** user, you can install ripgrep from [copr](https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/):
|
||||
If you're an **openSUSE** user, ripgrep is included in **openSUSE Tumbleweed**
|
||||
and **openSUSE Leap** since 15.1.
|
||||
|
||||
```
|
||||
$ sudo dnf copr enable carlwgeorge/ripgrep
|
||||
$ sudo dnf install ripgrep
|
||||
$ sudo zypper install ripgrep
|
||||
```
|
||||
|
||||
If you're a **RHEL/CentOS 7** user, you can install ripgrep from [copr](https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/):
|
||||
If you're a **RHEL/CentOS 7/8** user, you can install ripgrep from
|
||||
[copr](https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/):
|
||||
|
||||
```
|
||||
$ sudo yum-config-manager --add-repo=https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/repo/epel-7/carlwgeorge-ripgrep-epel-7.repo
|
||||
@@ -243,17 +271,79 @@ $ nix-env --install ripgrep
|
||||
$ # (Or using the attribute name, which is also ripgrep.)
|
||||
```
|
||||
|
||||
If you're an **Ubuntu** user, ripgrep can be installed from the `snap` store.
|
||||
* Note that if you are using `16.04 LTS` or later, snap is already installed.
|
||||
* For older versions you can install snap using
|
||||
[this guide](https://docs.snapcraft.io/core/install-ubuntu).
|
||||
If you're a **Guix** user, you can install ripgrep from the official
|
||||
package collection:
|
||||
|
||||
```
|
||||
sudo snap install rg
|
||||
$ guix install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Debian** user (or a user of a Debian derivative like **Ubuntu**),
|
||||
then ripgrep can be installed using a binary `.deb` file provided in each
|
||||
[ripgrep release](https://github.com/BurntSushi/ripgrep/releases).
|
||||
|
||||
```
|
||||
$ curl -LO https://github.com/BurntSushi/ripgrep/releases/download/13.0.0/ripgrep_13.0.0_amd64.deb
|
||||
$ sudo dpkg -i ripgrep_13.0.0_amd64.deb
|
||||
```
|
||||
|
||||
If you run Debian Buster (currently Debian stable) or Debian sid, ripgrep is
|
||||
[officially maintained by Debian](https://tracker.debian.org/pkg/rust-ripgrep).
|
||||
```
|
||||
$ sudo apt-get install ripgrep
|
||||
```
|
||||
|
||||
If you're an **Ubuntu Cosmic (18.10)** (or newer) user, ripgrep is
|
||||
[available](https://launchpad.net/ubuntu/+source/rust-ripgrep) using the same
|
||||
packaging as Debian:
|
||||
|
||||
```
|
||||
$ sudo apt-get install ripgrep
|
||||
```
|
||||
|
||||
(N.B. Various snaps for ripgrep on Ubuntu are also available, but none of them
|
||||
seem to work right and generate a number of very strange bug reports that I
|
||||
don't know how to fix and don't have the time to fix. Therefore, it is no
|
||||
longer a recommended installation option.)
|
||||
|
||||
If you're a **FreeBSD** user, then you can install ripgrep from the
|
||||
[official ports](https://www.freshports.org/textproc/ripgrep/):
|
||||
|
||||
```
|
||||
# pkg install ripgrep
|
||||
```
|
||||
|
||||
If you're an **OpenBSD** user, then you can install ripgrep from the
|
||||
[official ports](https://openports.se/textproc/ripgrep):
|
||||
|
||||
```
|
||||
$ doas pkg_add ripgrep
|
||||
```
|
||||
|
||||
If you're a **NetBSD** user, then you can install ripgrep from
|
||||
[pkgsrc](https://pkgsrc.se/textproc/ripgrep):
|
||||
|
||||
```
|
||||
# pkgin install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Haiku x86_64** user, then you can install ripgrep from the
|
||||
[official ports](https://github.com/haikuports/haikuports/tree/master/sys-apps/ripgrep):
|
||||
|
||||
```
|
||||
$ pkgman install ripgrep
|
||||
```
|
||||
|
||||
If you're a **Haiku x86_gcc2** user, then you can install ripgrep from the
|
||||
same port as Haiku x86_64 using the x86 secondary architecture build:
|
||||
|
||||
```
|
||||
$ pkgman install ripgrep_x86
|
||||
```
|
||||
|
||||
If you're a **Rust programmer**, ripgrep can be installed with `cargo`.
|
||||
* Note that the minimum supported version of Rust for ripgrep is **1.20**,
|
||||
|
||||
* Note that the minimum supported version of Rust for ripgrep is **1.34.0**,
|
||||
although ripgrep may work with older versions.
|
||||
* Note that the binary may be bigger than expected because it contains debug
|
||||
symbols. This is intentional. To remove debug symbols and therefore reduce
|
||||
@@ -263,15 +353,15 @@ If you're a **Rust programmer**, ripgrep can be installed with `cargo`.
|
||||
$ cargo install ripgrep
|
||||
```
|
||||
|
||||
ripgrep isn't currently in any other package repositories.
|
||||
[I'd like to change that](https://github.com/BurntSushi/ripgrep/issues/10).
|
||||
|
||||
|
||||
### Building
|
||||
|
||||
ripgrep is written in Rust, so you'll need to grab a
|
||||
[Rust installation](https://www.rust-lang.org/) in order to compile it.
|
||||
ripgrep compiles with Rust 1.20 (stable) or newer. Building is easy:
|
||||
ripgrep compiles with Rust 1.65.0 (stable) or newer. In general, ripgrep tracks
|
||||
the latest stable release of the Rust compiler.
|
||||
|
||||
To build ripgrep:
|
||||
|
||||
```
|
||||
$ git clone https://github.com/BurntSushi/ripgrep
|
||||
@@ -282,14 +372,50 @@ $ ./target/release/rg --version
|
||||
```
|
||||
|
||||
If you have a Rust nightly compiler and a recent Intel CPU, then you can enable
|
||||
optional SIMD acceleration like so:
|
||||
additional optional SIMD acceleration like so:
|
||||
|
||||
```
|
||||
RUSTFLAGS="-C target-cpu=native" cargo build --release --features 'simd-accel avx-accel'
|
||||
RUSTFLAGS="-C target-cpu=native" cargo build --release --features 'simd-accel'
|
||||
```
|
||||
|
||||
If your machine doesn't support AVX instructions, then simply remove
|
||||
`avx-accel` from the features list. Similarly for SIMD.
|
||||
The `simd-accel` feature enables SIMD support in certain ripgrep dependencies
|
||||
(responsible for transcoding). They are not necessary to get SIMD optimizations
|
||||
for search; those are enabled automatically. Hopefully, some day, the
|
||||
`simd-accel` feature will similarly become unnecessary. **WARNING:** Currently,
|
||||
enabling this option can increase compilation times dramatically.
|
||||
|
||||
Finally, optional PCRE2 support can be built with ripgrep by enabling the
|
||||
`pcre2` feature:
|
||||
|
||||
```
|
||||
$ cargo build --release --features 'pcre2'
|
||||
```
|
||||
|
||||
(Tip: use `--features 'pcre2 simd-accel'` to also include compile time SIMD
|
||||
optimizations, which will only work with a nightly compiler.)
|
||||
|
||||
Enabling the PCRE2 feature works with a stable Rust compiler and will
|
||||
attempt to automatically find and link with your system's PCRE2 library via
|
||||
`pkg-config`. If one doesn't exist, then ripgrep will build PCRE2 from source
|
||||
using your system's C compiler and then statically link it into the final
|
||||
executable. Static linking can be forced even when there is an available PCRE2
|
||||
system library by either building ripgrep with the MUSL target or by setting
|
||||
`PCRE2_SYS_STATIC=1`.
|
||||
|
||||
ripgrep can be built with the MUSL target on Linux by first installing the MUSL
|
||||
library on your system (consult your friendly neighborhood package manager).
|
||||
Then you just need to add MUSL support to your Rust toolchain and rebuild
|
||||
ripgrep, which yields a fully static executable:
|
||||
|
||||
```
|
||||
$ rustup target add x86_64-unknown-linux-musl
|
||||
$ cargo build --release --target x86_64-unknown-linux-musl
|
||||
```
|
||||
|
||||
Applying the `--features` flag from above works as expected. If you want to
|
||||
build a static executable with MUSL and with PCRE2, then you will need to have
|
||||
`musl-gcc` installed, which might be in a separate package from the actual
|
||||
MUSL library, depending on your Linux distribution.
|
||||
|
||||
|
||||
### Running tests
|
||||
@@ -302,3 +428,20 @@ $ cargo test --all
|
||||
```
|
||||
|
||||
from the repository root.
|
||||
|
||||
|
||||
### Vulnerability reporting
|
||||
|
||||
For reporting a security vulnerability, please
|
||||
[contact Andrew Gallant](https://blog.burntsushi.net/about/),
|
||||
which has my email address and PGP public key if you wish to send an encrypted
|
||||
message.
|
||||
|
||||
|
||||
### Translations
|
||||
|
||||
The following is a list of known translations of ripgrep's documentation. These
|
||||
are unofficially maintained and may not be up to date.
|
||||
|
||||
* [Chinese](https://github.com/chinanf-boy/ripgrep-zh#%E6%9B%B4%E6%96%B0-)
|
||||
* [Spanish](https://github.com/UltiRequiem/traducciones/tree/master/ripgrep)
|
||||
|
||||
56
RELEASE-CHECKLIST.md
Normal file
56
RELEASE-CHECKLIST.md
Normal file
@@ -0,0 +1,56 @@
|
||||
Release Checklist
|
||||
-----------------
|
||||
* Ensure local `master` is up to date with respect to `origin/master`.
|
||||
* Run `cargo update` and review dependency updates. Commit updated
|
||||
`Cargo.lock`.
|
||||
* Run `cargo outdated` and review semver incompatible updates. Unless there is
|
||||
a strong motivation otherwise, review and update every dependency. Also
|
||||
run `--aggressive`, but don't update to crates that are still in beta.
|
||||
* Review changes for every crate in `crates` since the last ripgrep release.
|
||||
If the set of changes is non-empty, issue a new release for that crate. Check
|
||||
crates in the following order. After updating a crate, ensure minimal
|
||||
versions are updated as appropriate in dependents. If an update is required,
|
||||
run `cargo-up --no-push crates/{CRATE}/Cargo.toml`.
|
||||
* crates/globset
|
||||
* crates/ignore
|
||||
* crates/cli
|
||||
* crates/matcher
|
||||
* crates/regex
|
||||
* crates/pcre2
|
||||
* crates/searcher
|
||||
* crates/printer
|
||||
* crates/grep (bump minimal versions as necessary)
|
||||
* crates/core (do **not** bump version, but update dependencies as needed)
|
||||
* Update the CHANGELOG as appropriate.
|
||||
* Edit the `Cargo.toml` to set the new ripgrep version. Run
|
||||
`cargo update -p ripgrep` so that the `Cargo.lock` is updated. Commit the
|
||||
changes and create a new signed tag. Alternatively, use
|
||||
`cargo-up --no-push --no-release Cargo.toml {VERSION}` to automate this.
|
||||
* Push changes to GitHub, NOT including the tag. (But do not publish new
|
||||
version of ripgrep to crates.io yet.)
|
||||
* Once CI for `master` finishes successfully, push the version tag. (Trying to
|
||||
do this in one step seems to result in GitHub Actions not seeing the tag
|
||||
push and thus not running the release workflow.)
|
||||
* Wait for CI to finish creating the release. If the release build fails, then
|
||||
delete the tag from GitHub, make fixes, re-tag, delete the release and push.
|
||||
* Copy the relevant section of the CHANGELOG to the tagged release notes.
|
||||
Include this blurb describing what ripgrep is:
|
||||
> In case you haven't heard of it before, ripgrep is a line-oriented search
|
||||
> tool that recursively searches the current directory for a regex pattern.
|
||||
> By default, ripgrep will respect gitignore rules and automatically skip
|
||||
> hidden files/directories and binary files.
|
||||
* Run `ci/build-deb` locally and manually upload the deb package to the
|
||||
release.
|
||||
* Run `cargo publish`.
|
||||
* Run `ci/sha256-releases {VERSION} >> pkg/brew/ripgrep-bin.rb`. Then edit
|
||||
`pkg/brew/ripgrep-bin.rb` to update the version number and sha256 hashes.
|
||||
Remove extraneous stuff added by `ci/sha256-releases`. Commit changes.
|
||||
* Add TBD section to the top of the CHANGELOG:
|
||||
```
|
||||
TBD
|
||||
===
|
||||
Unreleased changes. Release notes have not yet been written.
|
||||
```
|
||||
|
||||
Note that
|
||||
[`cargo-up` can be found in BurntSushi's dotfiles](https://github.com/BurntSushi/dotfiles/blob/master/bin/cargo-up).
|
||||
85
appveyor.yml
85
appveyor.yml
@@ -1,85 +0,0 @@
|
||||
# Inspired from https://github.com/habitat-sh/habitat/blob/master/appveyor.yml
|
||||
cache:
|
||||
- c:\cargo\registry
|
||||
- c:\cargo\git
|
||||
- c:\projects\ripgrep\target
|
||||
|
||||
init:
|
||||
- mkdir c:\cargo
|
||||
- mkdir c:\rustup
|
||||
- SET PATH=c:\cargo\bin;%PATH%
|
||||
|
||||
clone_folder: c:\projects\ripgrep
|
||||
|
||||
environment:
|
||||
CARGO_HOME: "c:\\cargo"
|
||||
RUSTUP_HOME: "c:\\rustup"
|
||||
CARGO_TARGET_DIR: "c:\\projects\\ripgrep\\target"
|
||||
global:
|
||||
PROJECT_NAME: ripgrep
|
||||
RUST_BACKTRACE: full
|
||||
matrix:
|
||||
- TARGET: i686-pc-windows-gnu
|
||||
CHANNEL: stable
|
||||
- TARGET: i686-pc-windows-msvc
|
||||
CHANNEL: stable
|
||||
- TARGET: x86_64-pc-windows-gnu
|
||||
CHANNEL: stable
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
CHANNEL: stable
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
|
||||
# Install Rust and Cargo
|
||||
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
|
||||
install:
|
||||
- curl -sSf -o rustup-init.exe https://win.rustup.rs/
|
||||
- rustup-init.exe -y --default-host %TARGET% --no-modify-path
|
||||
- if defined MSYS2_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS2_BITS%\bin
|
||||
- rustc -V
|
||||
- cargo -V
|
||||
|
||||
# ???
|
||||
build: false
|
||||
|
||||
# Equivalent to Travis' `script` phase
|
||||
# TODO modify this phase as you see fit
|
||||
test_script:
|
||||
- cargo test --verbose --all
|
||||
|
||||
before_deploy:
|
||||
# Generate artifacts for release
|
||||
# TODO(burntsushi): How can we enable SSSE3 on Windows?
|
||||
- cargo build --release
|
||||
- mkdir staging
|
||||
- copy target\release\rg.exe staging
|
||||
- ps: copy target\release\build\ripgrep-*\out\_rg.ps1 staging
|
||||
- cd staging
|
||||
# release zipfile will look like 'rust-everywhere-v1.2.3-x86_64-pc-windows-msvc'
|
||||
- 7z a ../%PROJECT_NAME%-%APPVEYOR_REPO_TAG_NAME%-%TARGET%.zip *
|
||||
- appveyor PushArtifact ../%PROJECT_NAME%-%APPVEYOR_REPO_TAG_NAME%-%TARGET%.zip
|
||||
|
||||
deploy:
|
||||
description: 'Automatically deployed release'
|
||||
# All the zipped artifacts will be deployed
|
||||
artifact: /.*\.zip/
|
||||
auth_token:
|
||||
secure: vv4vBCEosGlyQjaEC1+kraP2P6O4CQSa+Tw50oHWFTGcmuXxaWS0/yEXbxsIRLpw
|
||||
provider: GitHub
|
||||
# deploy when a new tag is pushed and only on the stable channel
|
||||
on:
|
||||
# channel to use to produce the release artifacts
|
||||
# NOTE make sure you only release *once* per target
|
||||
# TODO you may want to pick a different channel
|
||||
CHANNEL: stable
|
||||
appveyor_repo_tag: true
|
||||
|
||||
branches:
|
||||
only:
|
||||
- /\d+\.\d+\.\d+/
|
||||
- master
|
||||
# - appveyor
|
||||
# - /\d+\.\d+\.\d+/
|
||||
# except:
|
||||
# - master
|
||||
@@ -23,16 +23,16 @@ import time
|
||||
# strategies used to increase the relevance of results returned.
|
||||
|
||||
SUBTITLES_DIR = 'subtitles'
|
||||
SUBTITLES_EN_NAME = 'OpenSubtitles2016.raw.en'
|
||||
SUBTITLES_EN_NAME_SAMPLE = 'OpenSubtitles2016.raw.sample.en'
|
||||
SUBTITLES_EN_NAME = 'en.txt'
|
||||
SUBTITLES_EN_NAME_SAMPLE = 'en.sample.txt'
|
||||
SUBTITLES_EN_NAME_GZ = '%s.gz' % SUBTITLES_EN_NAME
|
||||
SUBTITLES_EN_URL = 'http://opus.lingfil.uu.se/OpenSubtitles2016/mono/OpenSubtitles2016.raw.en.gz' # noqa
|
||||
SUBTITLES_RU_NAME = 'OpenSubtitles2016.raw.ru'
|
||||
SUBTITLES_EN_URL = 'https://object.pouta.csc.fi/OPUS-OpenSubtitles/v2016/mono/en.txt.gz' # noqa
|
||||
SUBTITLES_RU_NAME = 'ru.txt'
|
||||
SUBTITLES_RU_NAME_GZ = '%s.gz' % SUBTITLES_RU_NAME
|
||||
SUBTITLES_RU_URL = 'http://opus.lingfil.uu.se/OpenSubtitles2016/mono/OpenSubtitles2016.raw.ru.gz' # noqa
|
||||
SUBTITLES_RU_URL = 'https://object.pouta.csc.fi/OPUS-OpenSubtitles/v2016/mono/ru.txt.gz' # noqa
|
||||
|
||||
LINUX_DIR = 'linux'
|
||||
LINUX_CLONE = 'git://github.com/BurntSushi/linux'
|
||||
LINUX_CLONE = 'https://github.com/BurntSushi/linux'
|
||||
|
||||
# Grep takes locale settings from the environment. There is a *substantial*
|
||||
# performance impact for enabling Unicode, so we need to handle this explicitly
|
||||
@@ -55,8 +55,10 @@ def bench_linux_literal_default(suite_dir):
|
||||
Benchmark the speed of a literal using *default* settings.
|
||||
|
||||
This is a purposefully unfair benchmark for use in performance
|
||||
analysis, but it is pedagogically useful to demonstrate how
|
||||
default behaviors differ.
|
||||
analysis, but it is pedagogically useful to demonstrate how default
|
||||
behaviors differ. For example, ugrep and grep don't do any smart
|
||||
filtering by default, so they will invariably search more files
|
||||
than ripgrep, ag or git grep.
|
||||
'''
|
||||
require(suite_dir, 'linux')
|
||||
cwd = path.join(suite_dir, LINUX_DIR)
|
||||
@@ -69,16 +71,11 @@ def bench_linux_literal_default(suite_dir):
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg', ['rg', pat]),
|
||||
mkcmd('ag', ['ag', pat]),
|
||||
# ucg reports the exact same matches as ag and rg even though it
|
||||
# doesn't read gitignore files. Instead, it has a file whitelist
|
||||
# that happens to match up exactly with the gitignores for this search.
|
||||
mkcmd('ucg', ['ucg', pat]),
|
||||
# I guess setting LC_ALL=en_US.UTF-8 probably isn't necessarily the
|
||||
# default, but I'd guess it to be on most desktop systems.
|
||||
mkcmd('pt', ['pt', pat]),
|
||||
# sift reports an extra line here for a binary file matched.
|
||||
mkcmd('sift', ['sift', pat]),
|
||||
mkcmd('git grep', ['git', 'grep', pat], env={'LC_ALL': 'en_US.UTF-8'}),
|
||||
mkcmd('git grep', ['git', 'grep', pat], env=GREP_UNICODE),
|
||||
mkcmd('ugrep', ['ugrep', '-r', pat, './']),
|
||||
mkcmd('grep', ['grep', '-r', pat, './'], env=GREP_UNICODE),
|
||||
])
|
||||
|
||||
|
||||
@@ -100,16 +97,16 @@ def bench_linux_literal(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', pat]),
|
||||
mkcmd('rg (ignore) (mmap)', ['rg', '-n', '--mmap', pat]),
|
||||
mkcmd('ag (ignore) (mmap)', ['ag', '-s', pat]),
|
||||
mkcmd('pt (ignore)', ['pt', pat]),
|
||||
mkcmd('sift (ignore)', SIFT + ['-n', '--git', pat]),
|
||||
mkcmd('git grep (ignore)', [
|
||||
mkcmd('rg', ['rg', '-n', pat]),
|
||||
mkcmd('rg (mmap)', ['rg', '-n', '--mmap', pat]),
|
||||
mkcmd('ag (mmap)', ['ag', '-s', pat]),
|
||||
mkcmd('git grep', [
|
||||
'git', 'grep', '-I', '-n', pat,
|
||||
], env={'LC_ALL': 'C'}),
|
||||
mkcmd('rg (whitelist)', ['rg', '-n', '--no-ignore', '-tall', pat]),
|
||||
mkcmd('ucg (whitelist)', ['ucg', '--nosmart-case', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
@@ -129,31 +126,26 @@ def bench_linux_literal_casei(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', '-i', pat]),
|
||||
mkcmd('rg (ignore) (mmap)', ['rg', '-n', '-i', '--mmap', pat]),
|
||||
mkcmd('ag (ignore) (mmap)', ['ag', '-i', pat]),
|
||||
mkcmd('pt (ignore)', ['pt', '-i', pat]),
|
||||
mkcmd('sift (ignore)', SIFT + ['-n', '-i', '--git', pat]),
|
||||
mkcmd('rg', ['rg', '-n', '-i', pat]),
|
||||
mkcmd('rg (mmap)', ['rg', '-n', '-i', '--mmap', pat]),
|
||||
mkcmd('ag (mmap)', ['ag', '-i', pat]),
|
||||
# It'd technically be more appropriate to set LC_ALL=en_US.UTF-8 here,
|
||||
# since that is certainly what ripgrep is doing, but this is for an
|
||||
# ASCII literal, so we should give `git grep` all the opportunity to
|
||||
# do its best.
|
||||
mkcmd('git grep (ignore)', [
|
||||
mkcmd('git grep', [
|
||||
'git', 'grep', '-I', '-n', '-i', pat,
|
||||
], env={'LC_ALL': 'C'}),
|
||||
mkcmd('rg (whitelist)', [
|
||||
'rg', '-n', '-i', '--no-ignore', '-tall', pat,
|
||||
]),
|
||||
mkcmd('ucg (whitelist)', ['ucg', '-i', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', '-i', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
def bench_linux_re_literal_suffix(suite_dir):
|
||||
'''
|
||||
Benchmark the speed of a literal inside a regex.
|
||||
|
||||
This, for example, inhibits a prefix byte optimization used
|
||||
inside of Go's regex engine (relevant for sift and pt).
|
||||
'''
|
||||
require(suite_dir, 'linux')
|
||||
cwd = path.join(suite_dir, LINUX_DIR)
|
||||
@@ -164,26 +156,23 @@ def bench_linux_re_literal_suffix(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', pat]),
|
||||
mkcmd('ag (ignore)', ['ag', '-s', pat]),
|
||||
mkcmd('pt (ignore)', ['pt', '-e', pat]),
|
||||
mkcmd('sift (ignore)', SIFT + ['-n', '--git', pat]),
|
||||
mkcmd('rg', ['rg', '-n', pat]),
|
||||
mkcmd('ag', ['ag', '-s', pat]),
|
||||
mkcmd(
|
||||
'git grep (ignore)',
|
||||
'git grep',
|
||||
['git', 'grep', '-E', '-I', '-n', pat],
|
||||
env={'LC_ALL': 'C'},
|
||||
),
|
||||
mkcmd('rg (whitelist)', ['rg', '-n', '--no-ignore', '-tall', pat]),
|
||||
mkcmd('ucg (whitelist)', ['ucg', '--nosmart-case', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
def bench_linux_word(suite_dir):
|
||||
'''
|
||||
Benchmark use of the -w ("match word") flag in each tool.
|
||||
|
||||
sift has a lot of trouble with this because it forces it into Go's
|
||||
regex engine by surrounding the pattern with \b assertions.
|
||||
'''
|
||||
require(suite_dir, 'linux')
|
||||
cwd = path.join(suite_dir, LINUX_DIR)
|
||||
@@ -194,28 +183,23 @@ def bench_linux_word(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', '-w', pat]),
|
||||
mkcmd('ag (ignore)', ['ag', '-s', '-w', pat]),
|
||||
mkcmd('pt (ignore)', ['pt', '-w', pat]),
|
||||
mkcmd('sift (ignore)', SIFT + ['-n', '-w', '--git', pat]),
|
||||
mkcmd('rg', ['rg', '-n', '-w', pat]),
|
||||
mkcmd('ag', ['ag', '-s', '-w', pat]),
|
||||
mkcmd(
|
||||
'git grep (ignore)',
|
||||
'git grep',
|
||||
['git', 'grep', '-E', '-I', '-n', '-w', pat],
|
||||
env={'LC_ALL': 'C'},
|
||||
),
|
||||
mkcmd('rg (whitelist)', [
|
||||
'rg', '-n', '-w', '--no-ignore', '-tall', pat,
|
||||
]),
|
||||
mkcmd('ucg (whitelist)', ['ucg', '--nosmart-case', '-w', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', '-w', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
def bench_linux_unicode_greek(suite_dir):
|
||||
'''
|
||||
Benchmark matching of a Unicode category.
|
||||
|
||||
Only three tools (ripgrep, sift and pt) support this. We omit
|
||||
pt because it is too slow.
|
||||
'''
|
||||
require(suite_dir, 'linux')
|
||||
cwd = path.join(suite_dir, LINUX_DIR)
|
||||
@@ -227,8 +211,10 @@ def bench_linux_unicode_greek(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg', ['rg', '-n', pat]),
|
||||
mkcmd('pt', ['pt', '-e', pat]),
|
||||
mkcmd('sift', SIFT + ['-n', '--git', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
@@ -248,18 +234,20 @@ def bench_linux_unicode_greek_casei(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg', ['rg', '-n', '-i', pat]),
|
||||
mkcmd('pt', ['pt', '-i', '-e', pat]),
|
||||
mkcmd('sift', SIFT + ['-n', '-i', '--git', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', '-i', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
def bench_linux_unicode_word(suite_dir):
|
||||
'''
|
||||
Benchmark Unicode aware \w character class.
|
||||
Benchmark Unicode aware \\w character class.
|
||||
|
||||
Only ripgrep and git-grep (with LC_ALL=en_US.UTF-8) actually get
|
||||
this right. Everything else uses the standard ASCII interpretation
|
||||
of \w.
|
||||
of \\w.
|
||||
'''
|
||||
require(suite_dir, 'linux')
|
||||
cwd = path.join(suite_dir, LINUX_DIR)
|
||||
@@ -270,26 +258,27 @@ def bench_linux_unicode_word(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', pat]),
|
||||
mkcmd('rg (ignore) (ASCII)', ['rg', '-n', '(?-u)' + pat]),
|
||||
mkcmd('ag (ignore) (ASCII)', ['ag', '-s', pat]),
|
||||
mkcmd('pt (ignore) (ASCII)', ['pt', '-e', pat]),
|
||||
mkcmd('sift (ignore) (ASCII)', SIFT + ['-n', '--git', pat]),
|
||||
mkcmd('rg', ['rg', '-n', pat]),
|
||||
mkcmd('rg (ASCII)', ['rg', '-n', '(?-u)' + pat]),
|
||||
mkcmd('ag (ASCII)', ['ag', '-s', pat]),
|
||||
mkcmd(
|
||||
'git grep (ignore)',
|
||||
'git grep',
|
||||
['git', 'grep', '-E', '-I', '-n', pat],
|
||||
env={'LC_ALL': 'en_US.UTF-8'},
|
||||
),
|
||||
mkcmd(
|
||||
'git grep (ignore) (ASCII)',
|
||||
'git grep (ASCII)',
|
||||
['git', 'grep', '-E', '-I', '-n', pat],
|
||||
env={'LC_ALL': 'C'},
|
||||
),
|
||||
mkcmd('rg (whitelist)', ['rg', '-n', '--no-ignore', '-tall', pat]),
|
||||
mkcmd('rg (whitelist) (ASCII)', [
|
||||
'rg', '-n', '--no-ignore', '-tall', '(?-u)' + pat,
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', pat, './',
|
||||
]),
|
||||
mkcmd('ugrep (ASCII)', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', '-U', pat, './',
|
||||
]),
|
||||
mkcmd('ucg (ASCII)', ['ucg', '--nosmart-case', pat]),
|
||||
])
|
||||
|
||||
|
||||
@@ -311,26 +300,27 @@ def bench_linux_no_literal(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', pat]),
|
||||
mkcmd('rg (ignore) (ASCII)', ['rg', '-n', '(?-u)' + pat]),
|
||||
mkcmd('ag (ignore) (ASCII)', ['ag', '-s', pat]),
|
||||
mkcmd('pt (ignore) (ASCII)', ['pt', '-e', pat]),
|
||||
mkcmd('sift (ignore) (ASCII)', SIFT + ['-n', '--git', pat]),
|
||||
mkcmd('rg', ['rg', '-n', pat]),
|
||||
mkcmd('rg (ASCII)', ['rg', '-n', '(?-u)' + pat]),
|
||||
mkcmd('ag (ASCII)', ['ag', '-s', pat]),
|
||||
mkcmd(
|
||||
'git grep (ignore)',
|
||||
'git grep',
|
||||
['git', 'grep', '-E', '-I', '-n', pat],
|
||||
env={'LC_ALL': 'en_US.UTF-8'},
|
||||
),
|
||||
mkcmd(
|
||||
'git grep (ignore) (ASCII)',
|
||||
'git grep (ASCII)',
|
||||
['git', 'grep', '-E', '-I', '-n', pat],
|
||||
env={'LC_ALL': 'C'},
|
||||
),
|
||||
mkcmd('rg (whitelist)', ['rg', '-n', '--no-ignore', '-tall', pat]),
|
||||
mkcmd('rg (whitelist) (ASCII)', [
|
||||
'rg', '-n', '--no-ignore', '-tall', '(?-u)' + pat,
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', pat, './',
|
||||
]),
|
||||
mkcmd('ugrep (ASCII)', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', '-U', pat, './',
|
||||
]),
|
||||
mkcmd('ucg (whitelist) (ASCII)', ['ucg', '--nosmart-case', pat]),
|
||||
])
|
||||
|
||||
|
||||
@@ -352,15 +342,17 @@ def bench_linux_alternates(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', pat]),
|
||||
mkcmd('ag (ignore)', ['ag', '-s', pat]),
|
||||
mkcmd('rg', ['rg', '-n', pat]),
|
||||
mkcmd('ag', ['ag', '-s', pat]),
|
||||
mkcmd(
|
||||
'git grep (ignore)',
|
||||
'git grep',
|
||||
['git', 'grep', '-E', '-I', '-n', pat],
|
||||
env={'LC_ALL': 'C'},
|
||||
),
|
||||
mkcmd('rg (whitelist)', ['rg', '--no-ignore', '-n', pat]),
|
||||
mkcmd('ucg (whitelist)', ['ucg', '--nosmart-case', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
@@ -375,15 +367,17 @@ def bench_linux_alternates_casei(suite_dir):
|
||||
return Command(*args, **kwargs)
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
mkcmd('rg (ignore)', ['rg', '-n', '-i', pat]),
|
||||
mkcmd('ag (ignore)', ['ag', '-i', pat]),
|
||||
mkcmd('rg', ['rg', '-n', '-i', pat]),
|
||||
mkcmd('ag', ['ag', '-i', pat]),
|
||||
mkcmd(
|
||||
'git grep (ignore)',
|
||||
'git grep',
|
||||
['git', 'grep', '-E', '-I', '-n', '-i', pat],
|
||||
env={'LC_ALL': 'C'},
|
||||
),
|
||||
mkcmd('rg (whitelist)', ['rg', '--no-ignore', '-n', '-i', pat]),
|
||||
mkcmd('ucg (whitelist)', ['ucg', '-i', pat]),
|
||||
mkcmd('ugrep', [
|
||||
'ugrep', '-r', '--ignore-files', '--no-hidden', '-I',
|
||||
'-n', '-i', pat, './',
|
||||
])
|
||||
])
|
||||
|
||||
|
||||
@@ -398,15 +392,11 @@ def bench_subtitles_en_literal(suite_dir):
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', pat, en]),
|
||||
Command('rg (no mmap)', ['rg', '--no-mmap', pat, en]),
|
||||
Command('pt', ['pt', '-N', pat, en]),
|
||||
Command('sift', ['sift', pat, en]),
|
||||
Command('grep', ['grep', '-a', pat, en], env=GREP_ASCII),
|
||||
Command('grep', ['grep', pat, en], env=GREP_ASCII),
|
||||
Command('rg (lines)', ['rg', '-n', pat, en]),
|
||||
Command('ag (lines)', ['ag', '-s', pat, en]),
|
||||
Command('ucg (lines)', ['ucg', '--nosmart-case', pat, en]),
|
||||
Command('pt (lines)', ['pt', pat, en]),
|
||||
Command('sift (lines)', ['sift', '-n', pat, en]),
|
||||
Command('grep (lines)', ['grep', '-an', pat, en], env=GREP_ASCII),
|
||||
Command('grep (lines)', ['grep', '-n', pat, en], env=GREP_ASCII),
|
||||
Command('ugrep (lines)', ['ugrep', '-n', pat, en])
|
||||
])
|
||||
|
||||
|
||||
@@ -420,13 +410,11 @@ def bench_subtitles_en_literal_casei(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', '-i', pat, en]),
|
||||
Command('grep', ['grep', '-ai', pat, en], env=GREP_UNICODE),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-ai', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep', ['grep', '-i', pat, en], env=GREP_UNICODE),
|
||||
Command('grep (ASCII)', ['grep', '-E', '-i', pat, en], env=GREP_ASCII),
|
||||
Command('rg (lines)', ['rg', '-n', '-i', pat, en]),
|
||||
Command('ag (lines) (ASCII)', ['ag', '-i', pat, en]),
|
||||
Command('ucg (lines) (ASCII)', ['ucg', '-i', pat, en]),
|
||||
Command('ugrep (lines)', ['ugrep', '-n', '-i', pat, en])
|
||||
])
|
||||
|
||||
|
||||
@@ -443,12 +431,10 @@ def bench_subtitles_en_literal_word(suite_dir):
|
||||
'rg', '-n', r'(?-u:\b)' + pat + r'(?-u:\b)', en,
|
||||
]),
|
||||
Command('ag (ASCII)', ['ag', '-sw', pat, en]),
|
||||
Command('ucg (ASCII)', ['ucg', '--nosmart-case', pat, en]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-anw', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (ASCII)', ['grep', '-nw', pat, en], env=GREP_ASCII),
|
||||
Command('ugrep (ASCII)', ['ugrep', '-nw', pat, en]),
|
||||
Command('rg', ['rg', '-nw', pat, en]),
|
||||
Command('grep', ['grep', '-anw', pat, en], env=GREP_UNICODE),
|
||||
Command('grep', ['grep', '-nw', pat, en], env=GREP_UNICODE),
|
||||
])
|
||||
|
||||
|
||||
@@ -469,14 +455,10 @@ def bench_subtitles_en_alternate(suite_dir):
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg (lines)', ['rg', '-n', pat, en]),
|
||||
Command('ag (lines)', ['ag', '-s', pat, en]),
|
||||
Command('ucg (lines)', ['ucg', '--nosmart-case', pat, en]),
|
||||
Command('grep (lines)', [
|
||||
'grep', '-E', '-an', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (lines)', ['grep', '-E', '-n', pat, en], env=GREP_ASCII),
|
||||
Command('ugrep (lines)', ['ugrep', '-n', pat, en]),
|
||||
Command('rg', ['rg', pat, en]),
|
||||
Command('grep', [
|
||||
'grep', '-E', '-a', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep', ['grep', '-E', pat, en], env=GREP_ASCII),
|
||||
])
|
||||
|
||||
|
||||
@@ -496,12 +478,12 @@ def bench_subtitles_en_alternate_casei(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('ag (ASCII)', ['ag', '-s', '-i', pat, en]),
|
||||
Command('ucg (ASCII)', ['ucg', '-i', pat, en]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-ani', pat, en,
|
||||
'grep', '-E', '-ni', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('ugrep (ASCII)', ['ugrep', '-n', '-i', pat, en]),
|
||||
Command('rg', ['rg', '-n', '-i', pat, en]),
|
||||
Command('grep', ['grep', '-E', '-ani', pat, en], env=GREP_UNICODE),
|
||||
Command('grep', ['grep', '-E', '-ni', pat, en], env=GREP_UNICODE),
|
||||
])
|
||||
|
||||
|
||||
@@ -515,13 +497,12 @@ def bench_subtitles_en_surrounding_words(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', '-n', pat, en]),
|
||||
Command('grep', ['grep', '-E', '-an', pat, en], env=GREP_UNICODE),
|
||||
Command('grep', ['grep', '-E', '-n', pat, en], env=GREP_UNICODE),
|
||||
Command('ugrep', ['ugrep', '-n', pat, en]),
|
||||
Command('rg (ASCII)', ['rg', '-n', '(?-u)' + pat, en]),
|
||||
Command('ag (ASCII)', ['ag', '-s', pat, en]),
|
||||
Command('ucg (ASCII)', ['ucg', '--nosmart-case', pat, en]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-an', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (ASCII)', ['grep', '-E', '-n', pat, en], env=GREP_ASCII),
|
||||
Command('ugrep (ASCII)', ['ugrep', '-n', '-U', pat, en])
|
||||
])
|
||||
|
||||
|
||||
@@ -540,12 +521,11 @@ def bench_subtitles_en_no_literal(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', '-n', pat, en]),
|
||||
Command('ugrep', ['ugrep', '-n', pat, en]),
|
||||
Command('rg (ASCII)', ['rg', '-n', '(?-u)' + pat, en]),
|
||||
Command('ag (ASCII)', ['ag', '-s', pat, en]),
|
||||
Command('ucg (ASCII)', ['ucg', '--nosmart-case', pat, en]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-an', pat, en,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (ASCII)', ['grep', '-E', '-n', pat, en], env=GREP_ASCII),
|
||||
Command('ugrep (ASCII)', ['ugrep', '-n', '-U', pat, en])
|
||||
])
|
||||
|
||||
|
||||
@@ -560,15 +540,15 @@ def bench_subtitles_ru_literal(suite_dir):
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', pat, ru]),
|
||||
Command('rg (no mmap)', ['rg', '--no-mmap', pat, ru]),
|
||||
Command('pt', ['pt', '-N', pat, ru]),
|
||||
Command('sift', ['sift', pat, ru]),
|
||||
Command('grep', ['grep', '-a', pat, ru], env=GREP_ASCII),
|
||||
Command('grep', ['grep', pat, ru], env=GREP_ASCII),
|
||||
Command('rg (lines)', ['rg', '-n', pat, ru]),
|
||||
Command('ag (lines)', ['ag', '-s', pat, ru]),
|
||||
Command('ucg (lines)', ['ucg', '--nosmart-case', pat, ru]),
|
||||
Command('pt (lines)', ['pt', pat, ru]),
|
||||
Command('sift (lines)', ['sift', '-n', pat, ru]),
|
||||
Command('grep (lines)', ['grep', '-an', pat, ru], env=GREP_ASCII),
|
||||
Command('grep (lines)', ['grep', '-n', pat, ru], env=GREP_ASCII),
|
||||
# ugrep incorrectly identifies this corpus as binary, but it is
|
||||
# entirely valid UTF-8. So we tell ugrep to always treat the corpus
|
||||
# as text even though this technically gives it an edge over other
|
||||
# tools. (It no longer needs to check for binary data.)
|
||||
Command('ugrep (lines)', ['ugrep', '-a', '-n', pat, ru])
|
||||
])
|
||||
|
||||
|
||||
@@ -582,13 +562,12 @@ def bench_subtitles_ru_literal_casei(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', '-i', pat, ru]),
|
||||
Command('grep', ['grep', '-ai', pat, ru], env=GREP_UNICODE),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-ai', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep', ['grep', '-i', pat, ru], env=GREP_UNICODE),
|
||||
Command('grep (ASCII)', ['grep', '-E', '-i', pat, ru], env=GREP_ASCII),
|
||||
Command('rg (lines)', ['rg', '-n', '-i', pat, ru]),
|
||||
Command('ag (lines) (ASCII)', ['ag', '-i', pat, ru]),
|
||||
Command('ucg (lines) (ASCII)', ['ucg', '-i', pat, ru]),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep (lines) (ASCII)', ['ugrep', '-a', '-n', '-i', pat, ru])
|
||||
])
|
||||
|
||||
|
||||
@@ -602,15 +581,20 @@ def bench_subtitles_ru_literal_word(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg (ASCII)', [
|
||||
'rg', '-n', r'(?-u:\b)' + pat + r'(?-u:\b)', ru,
|
||||
# You might think we'd use \b here for word boundaries, but both
|
||||
# GNU grep and ripgrep implement -w with the formulation below.
|
||||
# Since we can't use Unicode in a pattern and disable Unicode word
|
||||
# boundaries, we just hand-jam this ourselves.
|
||||
'rg', '-n', r'(?-u:^|\W)' + pat + r'(?-u:$|\W)', ru,
|
||||
]),
|
||||
Command('ag (ASCII)', ['ag', '-sw', pat, ru]),
|
||||
Command('ucg (ASCII)', ['ucg', '--nosmart-case', pat, ru]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-anw', pat, ru,
|
||||
'grep', '-nw', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep (ASCII)', ['ugrep', '-anw', pat, ru]),
|
||||
Command('rg', ['rg', '-nw', pat, ru]),
|
||||
Command('grep', ['grep', '-anw', pat, ru], env=GREP_UNICODE),
|
||||
Command('grep', ['grep', '-nw', pat, ru], env=GREP_UNICODE),
|
||||
])
|
||||
|
||||
|
||||
@@ -631,14 +615,11 @@ def bench_subtitles_ru_alternate(suite_dir):
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg (lines)', ['rg', '-n', pat, ru]),
|
||||
Command('ag (lines)', ['ag', '-s', pat, ru]),
|
||||
Command('ucg (lines)', ['ucg', '--nosmart-case', pat, ru]),
|
||||
Command('grep (lines)', [
|
||||
'grep', '-E', '-an', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (lines)', ['grep', '-E', '-n', pat, ru], env=GREP_ASCII),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep (lines)', ['ugrep', '-an', pat, ru]),
|
||||
Command('rg', ['rg', pat, ru]),
|
||||
Command('grep', [
|
||||
'grep', '-E', '-a', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep', ['grep', '-E', pat, ru], env=GREP_ASCII),
|
||||
])
|
||||
|
||||
|
||||
@@ -658,12 +639,13 @@ def bench_subtitles_ru_alternate_casei(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('ag (ASCII)', ['ag', '-s', '-i', pat, ru]),
|
||||
Command('ucg (ASCII)', ['ucg', '-i', pat, ru]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-ani', pat, ru,
|
||||
'grep', '-E', '-ni', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep (ASCII)', ['ugrep', '-ani', pat, ru]),
|
||||
Command('rg', ['rg', '-n', '-i', pat, ru]),
|
||||
Command('grep', ['grep', '-E', '-ani', pat, ru], env=GREP_UNICODE),
|
||||
Command('grep', ['grep', '-E', '-ni', pat, ru], env=GREP_UNICODE),
|
||||
])
|
||||
|
||||
|
||||
@@ -677,12 +659,12 @@ def bench_subtitles_ru_surrounding_words(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', '-n', pat, ru]),
|
||||
Command('grep', ['grep', '-E', '-an', pat, ru], env=GREP_UNICODE),
|
||||
Command('grep', ['grep', '-E', '-n', pat, ru], env=GREP_UNICODE),
|
||||
Command('ugrep', ['ugrep', '-an', pat, ru]),
|
||||
Command('ag (ASCII)', ['ag', '-s', pat, ru]),
|
||||
Command('ucg (ASCII)', ['ucg', '--nosmart-case', pat, ru]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-an', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (ASCII)', ['grep', '-E', '-n', pat, ru], env=GREP_ASCII),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep (ASCII)', ['ugrep', '-a', '-n', '-U', pat, ru]),
|
||||
])
|
||||
|
||||
|
||||
@@ -701,12 +683,13 @@ def bench_subtitles_ru_no_literal(suite_dir):
|
||||
|
||||
return Benchmark(pattern=pat, commands=[
|
||||
Command('rg', ['rg', '-n', pat, ru]),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep', ['ugrep', '-an', pat, ru]),
|
||||
Command('rg (ASCII)', ['rg', '-n', '(?-u)' + pat, ru]),
|
||||
Command('ag (ASCII)', ['ag', '-s', pat, ru]),
|
||||
Command('ucg (ASCII)', ['ucg', '--nosmart-case', pat, ru]),
|
||||
Command('grep (ASCII)', [
|
||||
'grep', '-E', '-an', pat, ru,
|
||||
], env=GREP_ASCII),
|
||||
Command('grep (ASCII)', ['grep', '-E', '-n', pat, ru], env=GREP_ASCII),
|
||||
# See bench_subtitles_ru_literal for why we use '-a' here.
|
||||
Command('ugrep (ASCII)', ['ugrep', '-anU', pat, ru])
|
||||
])
|
||||
|
||||
|
||||
@@ -756,7 +739,7 @@ class Benchmark(object):
|
||||
def __init__(self, name=None, pattern=None, commands=None,
|
||||
warmup_count=1, count=3, line_count=True,
|
||||
allow_missing_commands=False,
|
||||
disabled_cmds=None):
|
||||
disabled_cmds=None, order=0):
|
||||
'''
|
||||
Create a single benchmark.
|
||||
|
||||
@@ -792,6 +775,8 @@ class Benchmark(object):
|
||||
will simply skip it.
|
||||
:param list(str) disabled_cmds:
|
||||
A list of commands to skip.
|
||||
:param int order:
|
||||
An integer indicating the sequence number of this benchmark.
|
||||
'''
|
||||
self.name = name
|
||||
self.pattern = pattern
|
||||
@@ -801,6 +786,7 @@ class Benchmark(object):
|
||||
self.line_count = line_count
|
||||
self.allow_missing_commands = allow_missing_commands
|
||||
self.disabled_cmds = set(disabled_cmds or [])
|
||||
self.order = order
|
||||
|
||||
def raise_if_missing(self):
|
||||
'''
|
||||
@@ -894,7 +880,7 @@ class Result(object):
|
||||
'''
|
||||
Create a new set of results, initially empty.
|
||||
|
||||
:param Benchmarl benchmark:
|
||||
:param Benchmark benchmark:
|
||||
The benchmark that produced these results.
|
||||
'''
|
||||
self.benchmark = benchmark
|
||||
@@ -1088,7 +1074,7 @@ def download_subtitles_en(suite_dir):
|
||||
# benchmarks finish in a reasonable time.
|
||||
with open(path.join(subtitle_dir, en_path_sample), 'wb+') as f:
|
||||
run_cmd(
|
||||
['head', '-n', '32722372', en_path],
|
||||
['head', '-n', '55000000', en_path],
|
||||
cwd=subtitle_dir, stdout=f)
|
||||
|
||||
|
||||
@@ -1163,19 +1149,22 @@ def collect_benchmarks(suite_dir, filter_pat=None,
|
||||
requires corpora that are missing, then a log message is
|
||||
emitted to stderr and it is not yielded.
|
||||
'''
|
||||
for fun in sorted(globals()):
|
||||
if not fun.startswith('bench_'):
|
||||
benchmarks = []
|
||||
for global_name in globals():
|
||||
if not global_name.startswith('bench_'):
|
||||
continue
|
||||
name = re.sub('^bench_', '', fun)
|
||||
name = re.sub('^bench_', '', global_name)
|
||||
if filter_pat is not None and not re.search(filter_pat, name):
|
||||
continue
|
||||
try:
|
||||
benchmark = globals()[fun](suite_dir)
|
||||
fun = globals()[global_name]
|
||||
benchmark = fun(suite_dir)
|
||||
benchmark.name = name
|
||||
benchmark.warmup_count = warmup_iter
|
||||
benchmark.count = bench_iter
|
||||
benchmark.allow_missing_commands = allow_missing_commands
|
||||
benchmark.disabled_cmds = disabled_cmds
|
||||
benchmark.order = fun.__code__.co_firstlineno
|
||||
benchmark.raise_if_missing()
|
||||
except MissingDependencies as e:
|
||||
eprint(
|
||||
@@ -1190,7 +1179,8 @@ def collect_benchmarks(suite_dir, filter_pat=None,
|
||||
'(run with --allow-missing to run incomplete benchmarks)'
|
||||
eprint(fmt % (', '.join(e.missing_names), name))
|
||||
continue
|
||||
yield benchmark
|
||||
benchmarks.append(benchmark)
|
||||
return sorted(benchmarks, key=lambda b: b.order)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
37
benchsuite/runs/2020-10-14-archlinux-frink/README.md
Normal file
37
benchsuite/runs/2020-10-14-archlinux-frink/README.md
Normal file
@@ -0,0 +1,37 @@
|
||||
This directory contains updated benchmarks as of 2020-10-14. They were captured
|
||||
via the benchsuite script at `benchsuite/benchsuite` from the root of this
|
||||
repository. The command that was run:
|
||||
|
||||
$ ./benchsuite \
|
||||
--dir /tmp/benchsuite \
|
||||
--raw runs/2020-10-14-archlinux-frink/raw.csv \
|
||||
--warmup-iter 1 \
|
||||
--bench-iter 5
|
||||
|
||||
The versions of each tool are as follows:
|
||||
|
||||
$ rg --version
|
||||
ripgrep 12.1.1 (rev def993bad1)
|
||||
-SIMD -AVX (compiled)
|
||||
+SIMD +AVX (runtime)
|
||||
|
||||
$ grep -V
|
||||
grep (GNU grep) 3.4
|
||||
|
||||
$ ag -V
|
||||
ag version 2.2.0
|
||||
|
||||
Features:
|
||||
+jit +lzma +zlib
|
||||
|
||||
$ git --version
|
||||
git version 2.28.0
|
||||
|
||||
$ ugrep --version
|
||||
ugrep 3.0.2 x86_64-pc-linux-gnu +avx2 +pcre2_jit +zlib +bzip2 +lzma +lz4
|
||||
License BSD-3-Clause: <https://opensource.org/licenses/BSD-3-Clause>
|
||||
Written by Robert van Engelen and others: <https://github.com/Genivia/ugrep>
|
||||
|
||||
The version of ripgrep used was compiled from source on commit def993bad1:
|
||||
|
||||
$ cargo build --release --features 'pcre2'
|
||||
671
benchsuite/runs/2020-10-14-archlinux-frink/raw.csv
Normal file
671
benchsuite/runs/2020-10-14-archlinux-frink/raw.csv
Normal file
@@ -0,0 +1,671 @@
|
||||
benchmark,warmup_iter,iter,name,command,duration,lines,env
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.12675833702087402,19,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.1196434497833252,19,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.12096214294433594,19,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.1257617473602295,19,
|
||||
linux_literal_default,1,5,rg,rg PM_RESUME,0.12903356552124023,19,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.8575565814971924,19,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.9113664627075195,19,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.944256067276001,19,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.5309450626373291,19,
|
||||
linux_literal_default,1,5,ag,ag PM_RESUME,0.6105470657348633,19,
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.49039149284362793,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.48095154762268066,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.48927950859069824,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.47182321548461914,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,git grep,git grep PM_RESUME,0.46923041343688965,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,ugrep,ugrep -r PM_RESUME ./,0.13612771034240723,19,
|
||||
linux_literal_default,1,5,ugrep,ugrep -r PM_RESUME ./,0.13677191734313965,19,
|
||||
linux_literal_default,1,5,ugrep,ugrep -r PM_RESUME ./,0.13688087463378906,19,
|
||||
linux_literal_default,1,5,ugrep,ugrep -r PM_RESUME ./,0.13218474388122559,19,
|
||||
linux_literal_default,1,5,ugrep,ugrep -r PM_RESUME ./,0.13851046562194824,19,
|
||||
linux_literal_default,1,5,grep,grep -r PM_RESUME ./,1.1436240673065186,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,grep,grep -r PM_RESUME ./,1.1436970233917236,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,grep,grep -r PM_RESUME ./,1.1542651653289795,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,grep,grep -r PM_RESUME ./,1.14790940284729,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,5,grep,grep -r PM_RESUME ./,1.1441664695739746,19,LC_ALL=en_US.UTF-8
|
||||
linux_literal,1,5,rg,rg -n PM_RESUME,0.134232759475708,19,
|
||||
linux_literal,1,5,rg,rg -n PM_RESUME,0.12477993965148926,19,
|
||||
linux_literal,1,5,rg,rg -n PM_RESUME,0.11790871620178223,19,
|
||||
linux_literal,1,5,rg,rg -n PM_RESUME,0.13471150398254395,19,
|
||||
linux_literal,1,5,rg,rg -n PM_RESUME,0.13730239868164062,19,
|
||||
linux_literal,1,5,rg (mmap),rg -n --mmap PM_RESUME,1.2953157424926758,19,
|
||||
linux_literal,1,5,rg (mmap),rg -n --mmap PM_RESUME,1.3263885974884033,19,
|
||||
linux_literal,1,5,rg (mmap),rg -n --mmap PM_RESUME,1.320932388305664,19,
|
||||
linux_literal,1,5,rg (mmap),rg -n --mmap PM_RESUME,1.3446438312530518,19,
|
||||
linux_literal,1,5,rg (mmap),rg -n --mmap PM_RESUME,1.3919141292572021,19,
|
||||
linux_literal,1,5,ag (mmap),ag -s PM_RESUME,0.7901346683502197,19,
|
||||
linux_literal,1,5,ag (mmap),ag -s PM_RESUME,0.9647164344787598,19,
|
||||
linux_literal,1,5,ag (mmap),ag -s PM_RESUME,0.8800022602081299,19,
|
||||
linux_literal,1,5,ag (mmap),ag -s PM_RESUME,0.9307558536529541,19,
|
||||
linux_literal,1,5,ag (mmap),ag -s PM_RESUME,0.8346366882324219,19,
|
||||
linux_literal,1,5,git grep,git grep -I -n PM_RESUME,0.4694955348968506,19,LC_ALL=C
|
||||
linux_literal,1,5,git grep,git grep -I -n PM_RESUME,0.4620368480682373,19,LC_ALL=C
|
||||
linux_literal,1,5,git grep,git grep -I -n PM_RESUME,0.4673285484313965,19,LC_ALL=C
|
||||
linux_literal,1,5,git grep,git grep -I -n PM_RESUME,0.4570960998535156,19,LC_ALL=C
|
||||
linux_literal,1,5,git grep,git grep -I -n PM_RESUME,0.4648761749267578,19,LC_ALL=C
|
||||
linux_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.3233473300933838,19,
|
||||
linux_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.3199331760406494,19,
|
||||
linux_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.29825615882873535,19,
|
||||
linux_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.3003232479095459,19,
|
||||
linux_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.30283141136169434,19,
|
||||
linux_literal_casei,1,5,rg,rg -n -i PM_RESUME,0.1349015235900879,456,
|
||||
linux_literal_casei,1,5,rg,rg -n -i PM_RESUME,0.1277780532836914,456,
|
||||
linux_literal_casei,1,5,rg,rg -n -i PM_RESUME,0.1251516342163086,456,
|
||||
linux_literal_casei,1,5,rg,rg -n -i PM_RESUME,0.12959671020507812,456,
|
||||
linux_literal_casei,1,5,rg,rg -n -i PM_RESUME,0.1374528408050537,456,
|
||||
linux_literal_casei,1,5,rg (mmap),rg -n -i --mmap PM_RESUME,1.3468265533447266,456,
|
||||
linux_literal_casei,1,5,rg (mmap),rg -n -i --mmap PM_RESUME,1.3552894592285156,456,
|
||||
linux_literal_casei,1,5,rg (mmap),rg -n -i --mmap PM_RESUME,1.3028552532196045,456,
|
||||
linux_literal_casei,1,5,rg (mmap),rg -n -i --mmap PM_RESUME,1.336735725402832,456,
|
||||
linux_literal_casei,1,5,rg (mmap),rg -n -i --mmap PM_RESUME,1.338634729385376,456,
|
||||
linux_literal_casei,1,5,ag (mmap),ag -i PM_RESUME,0.5562450885772705,456,
|
||||
linux_literal_casei,1,5,ag (mmap),ag -i PM_RESUME,0.7324790954589844,456,
|
||||
linux_literal_casei,1,5,ag (mmap),ag -i PM_RESUME,0.8382794857025146,456,
|
||||
linux_literal_casei,1,5,ag (mmap),ag -i PM_RESUME,0.5817627906799316,456,
|
||||
linux_literal_casei,1,5,ag (mmap),ag -i PM_RESUME,0.5771033763885498,456,
|
||||
linux_literal_casei,1,5,git grep,git grep -I -n -i PM_RESUME,0.48885059356689453,456,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep,git grep -I -n -i PM_RESUME,0.4838893413543701,456,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep,git grep -I -n -i PM_RESUME,0.48733997344970703,456,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep,git grep -I -n -i PM_RESUME,0.4765594005584717,456,LC_ALL=C
|
||||
linux_literal_casei,1,5,git grep,git grep -I -n -i PM_RESUME,0.47402334213256836,456,LC_ALL=C
|
||||
linux_literal_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.3075406551361084,456,
|
||||
linux_literal_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.2922379970550537,456,
|
||||
linux_literal_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.2901036739349365,456,
|
||||
linux_literal_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.2723674774169922,456,
|
||||
linux_literal_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.2762429714202881,456,
|
||||
linux_re_literal_suffix,1,5,rg,rg -n [A-Z]+_RESUME,0.12853646278381348,1944,
|
||||
linux_re_literal_suffix,1,5,rg,rg -n [A-Z]+_RESUME,0.1190040111541748,1944,
|
||||
linux_re_literal_suffix,1,5,rg,rg -n [A-Z]+_RESUME,0.14054393768310547,1944,
|
||||
linux_re_literal_suffix,1,5,rg,rg -n [A-Z]+_RESUME,0.12263894081115723,1944,
|
||||
linux_re_literal_suffix,1,5,rg,rg -n [A-Z]+_RESUME,0.12101268768310547,1944,
|
||||
linux_re_literal_suffix,1,5,ag,ag -s [A-Z]+_RESUME,0.9220716953277588,1944,
|
||||
linux_re_literal_suffix,1,5,ag,ag -s [A-Z]+_RESUME,1.009810209274292,1944,
|
||||
linux_re_literal_suffix,1,5,ag,ag -s [A-Z]+_RESUME,0.9654982089996338,1944,
|
||||
linux_re_literal_suffix,1,5,ag,ag -s [A-Z]+_RESUME,1.2758586406707764,1944,
|
||||
linux_re_literal_suffix,1,5,ag,ag -s [A-Z]+_RESUME,1.0480666160583496,1944,
|
||||
linux_re_literal_suffix,1,5,git grep,git grep -E -I -n [A-Z]+_RESUME,1.1811027526855469,1944,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep,git grep -E -I -n [A-Z]+_RESUME,1.1824719905853271,1944,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep,git grep -E -I -n [A-Z]+_RESUME,1.2052066326141357,1944,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep,git grep -E -I -n [A-Z]+_RESUME,1.224193811416626,1944,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,git grep,git grep -E -I -n [A-Z]+_RESUME,1.2896029949188232,1944,LC_ALL=C
|
||||
linux_re_literal_suffix,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.5580098628997803,1944,
|
||||
linux_re_literal_suffix,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.5409820079803467,1944,
|
||||
linux_re_literal_suffix,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.5436761379241943,1944,
|
||||
linux_re_literal_suffix,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.5317332744598389,1944,
|
||||
linux_re_literal_suffix,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.5662341117858887,1944,
|
||||
linux_word,1,5,rg,rg -n -w PM_RESUME,0.13112211227416992,6,
|
||||
linux_word,1,5,rg,rg -n -w PM_RESUME,0.13633346557617188,6,
|
||||
linux_word,1,5,rg,rg -n -w PM_RESUME,0.1308743953704834,6,
|
||||
linux_word,1,5,rg,rg -n -w PM_RESUME,0.13691973686218262,6,
|
||||
linux_word,1,5,rg,rg -n -w PM_RESUME,0.1369326114654541,6,
|
||||
linux_word,1,5,ag,ag -s -w PM_RESUME,0.5965347290039062,6,
|
||||
linux_word,1,5,ag,ag -s -w PM_RESUME,0.8891518115997314,6,
|
||||
linux_word,1,5,ag,ag -s -w PM_RESUME,0.5207972526550293,6,
|
||||
linux_word,1,5,ag,ag -s -w PM_RESUME,0.5551142692565918,6,
|
||||
linux_word,1,5,ag,ag -s -w PM_RESUME,0.5308854579925537,6,
|
||||
linux_word,1,5,git grep,git grep -E -I -n -w PM_RESUME,0.45984363555908203,6,LC_ALL=C
|
||||
linux_word,1,5,git grep,git grep -E -I -n -w PM_RESUME,0.47351694107055664,6,LC_ALL=C
|
||||
linux_word,1,5,git grep,git grep -E -I -n -w PM_RESUME,0.5011758804321289,6,LC_ALL=C
|
||||
linux_word,1,5,git grep,git grep -E -I -n -w PM_RESUME,0.45740509033203125,6,LC_ALL=C
|
||||
linux_word,1,5,git grep,git grep -E -I -n -w PM_RESUME,0.46122002601623535,6,LC_ALL=C
|
||||
linux_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.3174629211425781,6,
|
||||
linux_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.32368993759155273,6,
|
||||
linux_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.3131399154663086,6,
|
||||
linux_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.2834908962249756,6,
|
||||
linux_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.2899782657623291,6,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.2624638080596924,105,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.26248669624328613,105,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.26514244079589844,105,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.26303768157958984,105,
|
||||
linux_unicode_greek,1,5,rg,rg -n \p{Greek},0.2612752914428711,105,
|
||||
linux_unicode_greek,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.2842683792114258,105,
|
||||
linux_unicode_greek,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.2718374729156494,105,
|
||||
linux_unicode_greek,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.26900339126586914,105,
|
||||
linux_unicode_greek,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.267728328704834,105,
|
||||
linux_unicode_greek,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.27019381523132324,105,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.24460315704345703,225,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.2752077579498291,225,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.25118350982666016,225,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.2610158920288086,225,
|
||||
linux_unicode_greek_casei,1,5,rg,rg -n -i \p{Greek},0.24675774574279785,225,
|
||||
linux_unicode_greek_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.26882410049438477,105,
|
||||
linux_unicode_greek_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.2770118713378906,105,
|
||||
linux_unicode_greek_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.2694118022918701,105,
|
||||
linux_unicode_greek_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.2690916061401367,105,
|
||||
linux_unicode_greek_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.2686276435852051,105,
|
||||
linux_unicode_word,1,5,rg,rg -n \wAh,0.13727664947509766,229,
|
||||
linux_unicode_word,1,5,rg,rg -n \wAh,0.1450798511505127,229,
|
||||
linux_unicode_word,1,5,rg,rg -n \wAh,0.13819336891174316,229,
|
||||
linux_unicode_word,1,5,rg,rg -n \wAh,0.1422877311706543,229,
|
||||
linux_unicode_word,1,5,rg,rg -n \wAh,0.13657712936401367,229,
|
||||
linux_unicode_word,1,5,rg (ASCII),rg -n (?-u)\wAh,0.1487271785736084,216,
|
||||
linux_unicode_word,1,5,rg (ASCII),rg -n (?-u)\wAh,0.1459641456604004,216,
|
||||
linux_unicode_word,1,5,rg (ASCII),rg -n (?-u)\wAh,0.13515281677246094,216,
|
||||
linux_unicode_word,1,5,rg (ASCII),rg -n (?-u)\wAh,0.12724566459655762,216,
|
||||
linux_unicode_word,1,5,rg (ASCII),rg -n (?-u)\wAh,0.13360023498535156,216,
|
||||
linux_unicode_word,1,5,ag (ASCII),ag -s \wAh,1.2160453796386719,216,
|
||||
linux_unicode_word,1,5,ag (ASCII),ag -s \wAh,1.230163335800171,216,
|
||||
linux_unicode_word,1,5,ag (ASCII),ag -s \wAh,1.2649273872375488,216,
|
||||
linux_unicode_word,1,5,ag (ASCII),ag -s \wAh,1.224984884262085,216,
|
||||
linux_unicode_word,1,5,ag (ASCII),ag -s \wAh,1.4559555053710938,216,
|
||||
linux_unicode_word,1,5,git grep,git grep -E -I -n \wAh,8.233768224716187,229,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep,git grep -E -I -n \wAh,8.191053867340088,229,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep,git grep -E -I -n \wAh,8.175920724868774,229,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep,git grep -E -I -n \wAh,8.167959451675415,229,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep,git grep -E -I -n \wAh,8.1710205078125,229,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,5,git grep (ASCII),git grep -E -I -n \wAh,2.3747494220733643,216,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ASCII),git grep -E -I -n \wAh,2.3170926570892334,216,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ASCII),git grep -E -I -n \wAh,2.3430888652801514,216,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ASCII),git grep -E -I -n \wAh,2.3219168186187744,216,LC_ALL=C
|
||||
linux_unicode_word,1,5,git grep (ASCII),git grep -E -I -n \wAh,2.3155832290649414,216,LC_ALL=C
|
||||
linux_unicode_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.2722008228302002,229,
|
||||
linux_unicode_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.27547430992126465,229,
|
||||
linux_unicode_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.2771613597869873,229,
|
||||
linux_unicode_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.27692317962646484,229,
|
||||
linux_unicode_word,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.27749085426330566,229,
|
||||
linux_unicode_word,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.2744929790496826,216,
|
||||
linux_unicode_word,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.2725999355316162,216,
|
||||
linux_unicode_word,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.27443718910217285,216,
|
||||
linux_unicode_word,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.2668039798736572,216,
|
||||
linux_unicode_word,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.27918338775634766,216,
|
||||
linux_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.38802123069763184,611,
|
||||
linux_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.40351152420043945,611,
|
||||
linux_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.40592288970947266,611,
|
||||
linux_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.40622901916503906,611,
|
||||
linux_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.40683722496032715,611,
|
||||
linux_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2553420066833496,610,
|
||||
linux_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2511327266693115,610,
|
||||
linux_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2530384063720703,610,
|
||||
linux_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2420644760131836,610,
|
||||
linux_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2691671848297119,610,
|
||||
linux_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.9446702003479004,971,
|
||||
linux_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.9380638599395752,971,
|
||||
linux_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.9273786544799805,971,
|
||||
linux_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.9271430969238281,971,
|
||||
linux_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.9307007789611816,971,
|
||||
linux_no_literal,1,5,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},14.531656265258789,611,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},14.570266008377075,611,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},14.51328158378601,611,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},14.644389629364014,611,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},14.694648027420044,611,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,5,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},3.164829730987549,610,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},3.2377045154571533,610,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},3.1798932552337646,610,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},3.142343044281006,610,LC_ALL=C
|
||||
linux_no_literal,1,5,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},3.185952663421631,610,LC_ALL=C
|
||||
linux_no_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,6.241358041763306,973,
|
||||
linux_no_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,6.213250637054443,973,
|
||||
linux_no_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,6.242088079452515,973,
|
||||
linux_no_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,6.126717567443848,973,
|
||||
linux_no_literal,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,6.15744948387146,973,
|
||||
linux_no_literal,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.3647449016571045,972,
|
||||
linux_no_literal,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.36277341842651367,972,
|
||||
linux_no_literal,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.3670034408569336,972,
|
||||
linux_no_literal,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.3563535213470459,972,
|
||||
linux_no_literal,1,5,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.36490702629089355,972,
|
||||
linux_alternates,1,5,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.14299488067626953,112,
|
||||
linux_alternates,1,5,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.15548348426818848,112,
|
||||
linux_alternates,1,5,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.14477276802062988,112,
|
||||
linux_alternates,1,5,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12926578521728516,112,
|
||||
linux_alternates,1,5,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.13896560668945312,112,
|
||||
linux_alternates,1,5,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9893472194671631,112,
|
||||
linux_alternates,1,5,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,1.016686201095581,112,
|
||||
linux_alternates,1,5,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9755496978759766,112,
|
||||
linux_alternates,1,5,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9718713760375977,112,
|
||||
linux_alternates,1,5,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,1.0030465126037598,112,
|
||||
linux_alternates,1,5,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5737886428833008,112,LC_ALL=C
|
||||
linux_alternates,1,5,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.562185525894165,112,LC_ALL=C
|
||||
linux_alternates,1,5,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5762710571289062,112,LC_ALL=C
|
||||
linux_alternates,1,5,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5561251640319824,112,LC_ALL=C
|
||||
linux_alternates,1,5,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5849525928497314,112,LC_ALL=C
|
||||
linux_alternates,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.3186032772064209,112,
|
||||
linux_alternates,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.2896738052368164,112,
|
||||
linux_alternates,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.28582000732421875,112,
|
||||
linux_alternates,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.2837677001953125,112,
|
||||
linux_alternates,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.27143406867980957,112,
|
||||
linux_alternates_casei,1,5,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.21955585479736328,203,
|
||||
linux_alternates_casei,1,5,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.22631502151489258,203,
|
||||
linux_alternates_casei,1,5,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.23458337783813477,203,
|
||||
linux_alternates_casei,1,5,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.21781086921691895,203,
|
||||
linux_alternates_casei,1,5,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.231217622756958,203,
|
||||
linux_alternates_casei,1,5,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.7170076370239258,203,
|
||||
linux_alternates_casei,1,5,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.7032256126403809,203,
|
||||
linux_alternates_casei,1,5,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.6868026256561279,203,
|
||||
linux_alternates_casei,1,5,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.6965539455413818,203,
|
||||
linux_alternates_casei,1,5,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.6966633796691895,203,
|
||||
linux_alternates_casei,1,5,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9774580001831055,203,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9654648303985596,203,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.967714786529541,203,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9789888858795166,203,LC_ALL=C
|
||||
linux_alternates_casei,1,5,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.9938976764678955,203,LC_ALL=C
|
||||
linux_alternates_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.2825000286102295,203,
|
||||
linux_alternates_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.27024054527282715,203,
|
||||
linux_alternates_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.27353668212890625,203,
|
||||
linux_alternates_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.27333736419677734,203,
|
||||
linux_alternates_casei,1,5,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.2730555534362793,203,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.2259538173675537,830,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.22034168243408203,830,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.22986674308776855,830,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.22815775871276855,830,
|
||||
subtitles_en_literal,1,5,rg,rg Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.2238922119140625,830,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.36427783966064453,830,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.37499117851257324,830,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.36223769187927246,830,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3646128177642822,830,
|
||||
subtitles_en_literal,1,5,rg (no mmap),rg --no-mmap Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.36281347274780273,830,
|
||||
subtitles_en_literal,1,5,grep,grep Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.8064453601837158,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.8001935482025146,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.8018591403961182,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.7978458404541016,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep,grep Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.7912843227386475,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.31099891662597656,830,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3145768642425537,830,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.30507469177246094,830,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3450126647949219,830,
|
||||
subtitles_en_literal,1,5,rg (lines),rg -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.31091880798339844,830,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5518174171447754,830,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.551568031311035,830,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5306365489959717,830,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.537529468536377,830,
|
||||
subtitles_en_literal,1,5,ag (lines),ag -s Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5627124309539795,830,
|
||||
subtitles_en_literal,1,5,grep (lines),grep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.2934913635253906,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.2990975379943848,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.2942156791687012,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.2887969017028809,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,grep (lines),grep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.2922444343566895,830,LC_ALL=C
|
||||
subtitles_en_literal,1,5,ugrep (lines),ugrep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3939177989959717,830,
|
||||
subtitles_en_literal,1,5,ugrep (lines),ugrep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3916018009185791,830,
|
||||
subtitles_en_literal,1,5,ugrep (lines),ugrep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.40460968017578125,830,
|
||||
subtitles_en_literal,1,5,ugrep (lines),ugrep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.41738367080688477,830,
|
||||
subtitles_en_literal,1,5,ugrep (lines),ugrep -n Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.41339826583862305,830,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.37847900390625,871,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3692331314086914,871,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.40493106842041016,871,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4074361324310303,871,
|
||||
subtitles_en_literal_casei,1,5,rg,rg -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4297189712524414,871,
|
||||
subtitles_en_literal_casei,1,5,grep,grep -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,3.63842511177063,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,3.6366350650787354,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,3.6044440269470215,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,3.6123127937316895,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep,grep -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,3.6119742393493652,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.917151689529419,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.9379458427429199,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.9703550338745117,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.9309988021850586,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,grep (ASCII),grep -E -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.9328129291534424,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.5196061134338379,871,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.5225450992584229,871,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4856400489807129,871,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.5204241275787354,871,
|
||||
subtitles_en_literal_casei,1,5,rg (lines),rg -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.5224106311798096,871,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5935003757476807,871,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.640918016433716,871,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.602182626724243,871,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.575654983520508,871,
|
||||
subtitles_en_literal_casei,1,5,ag (lines) (ASCII),ag -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5606820583343506,871,
|
||||
subtitles_en_literal_casei,1,5,ugrep (lines),ugrep -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.0980546474456787,871,
|
||||
subtitles_en_literal_casei,1,5,ugrep (lines),ugrep -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.095038652420044,871,
|
||||
subtitles_en_literal_casei,1,5,ugrep (lines),ugrep -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.0974702835083008,871,
|
||||
subtitles_en_literal_casei,1,5,ugrep (lines),ugrep -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.113879919052124,871,
|
||||
subtitles_en_literal_casei,1,5,ugrep (lines),ugrep -n -i Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.1096961498260498,871,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/en.sample.txt,0.3175060749053955,830,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/en.sample.txt,0.321685791015625,830,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/en.sample.txt,0.30799293518066406,830,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/en.sample.txt,0.31140613555908203,830,
|
||||
subtitles_en_literal_word,1,5,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /tmp/benchsuite/subtitles/en.sample.txt,0.32439208030700684,830,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5530965328216553,830,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5833561420440674,830,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5765762329101562,830,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.610975742340088,830,
|
||||
subtitles_en_literal_word,1,5,ag (ASCII),ag -sw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,2.5965471267700195,830,
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.3212966918945312,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.311401128768921,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.298889398574829,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.316542148590088,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,grep (ASCII),grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.3483500480651855,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,5,ugrep (ASCII),ugrep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4127326011657715,830,
|
||||
subtitles_en_literal_word,1,5,ugrep (ASCII),ugrep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4138009548187256,830,
|
||||
subtitles_en_literal_word,1,5,ugrep (ASCII),ugrep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4203319549560547,830,
|
||||
subtitles_en_literal_word,1,5,ugrep (ASCII),ugrep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.4127979278564453,830,
|
||||
subtitles_en_literal_word,1,5,ugrep (ASCII),ugrep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.41126537322998047,830,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3251321315765381,830,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.31773900985717773,830,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.32987523078918457,830,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.32228970527648926,830,
|
||||
subtitles_en_literal_word,1,5,rg,rg -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,0.3207516670227051,830,
|
||||
subtitles_en_literal_word,1,5,grep,grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.2946159839630127,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.333972454071045,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.3002500534057617,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.347550630569458,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,5,grep,grep -nw Sherlock Holmes /tmp/benchsuite/subtitles/en.sample.txt,1.306572675704956,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.4178187847137451,1094,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.44626832008361816,1094,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.44959425926208496,1094,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.38634324073791504,1094,
|
||||
subtitles_en_alternate,1,5,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.4460463523864746,1094,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.6045682430267334,1094,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.6191344261169434,1094,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.579859972000122,1094,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.6637580394744873,1094,
|
||||
subtitles_en_alternate,1,5,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.5728182792663574,1094,
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.323948621749878,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.3338429927825928,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.34714937210083,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.314117908477783,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,3.303710699081421,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.147033452987671,1094,
|
||||
subtitles_en_alternate,1,5,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.2054970264434814,1094,
|
||||
subtitles_en_alternate,1,5,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.0998892784118652,1094,
|
||||
subtitles_en_alternate,1,5,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.101989984512329,1094,
|
||||
subtitles_en_alternate,1,5,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.110612154006958,1094,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.29009222984313965,1094,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.29300451278686523,1094,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.3199915885925293,1094,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.3187263011932373,1094,
|
||||
subtitles_en_alternate,1,5,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.30321288108825684,1094,
|
||||
subtitles_en_alternate,1,5,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,2.813009738922119,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,2.80930757522583,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,2.814509153366089,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,2.8390560150146484,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,5,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,2.830871105194092,1094,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,6.166510343551636,1136,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,6.192304849624634,1136,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,6.185140132904053,1136,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,6.20132040977478,1136,
|
||||
subtitles_en_alternate_casei,1,5,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,6.159040451049805,1136,
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.523138999938965,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.512346267700195,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.562563896179199,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.533160448074341,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.504830837249756,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.1120033264160156,1136,
|
||||
subtitles_en_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.1150739192962646,1136,
|
||||
subtitles_en_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.1018304824829102,1136,
|
||||
subtitles_en_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.1106996536254883,1136,
|
||||
subtitles_en_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,1.0994808673858643,1136,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.8494291305541992,1136,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.7878148555755615,1136,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.8290884494781494,1136,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.7409803867340088,1136,
|
||||
subtitles_en_alternate_casei,1,5,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,0.7880558967590332,1136,
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.5523765087127686,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.527086019515991,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.740911483764648,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.520638465881348,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,5,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /tmp/benchsuite/subtitles/en.sample.txt,5.52523398399353,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.3353078365325928,483,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.3248591423034668,483,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.33918261528015137,483,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.33177971839904785,483,
|
||||
subtitles_en_surrounding_words,1,5,rg,rg -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.34472131729125977,483,
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7516274452209473,483,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7489221096038818,483,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7574889659881592,483,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.813244342803955,483,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,grep,grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.750051498413086,483,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,70.12419986724854,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,70.26925611495972,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,70.56865787506104,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,70.12933135032654,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,70.07925295829773,489,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.3309454917907715,483,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.33062124252319336,483,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.3292708396911621,483,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.3300509452819824,483,
|
||||
subtitles_en_surrounding_words,1,5,rg (ASCII),rg -n (?-u)\w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,0.3252389430999756,483,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,7.372813701629639,489,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,7.338848114013672,489,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,7.739792108535767,489,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,7.302056074142456,489,
|
||||
subtitles_en_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,7.334207057952881,489,
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7617950439453125,483,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7765378952026367,483,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7456245422363281,483,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.748713731765747,483,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,1.7846882343292236,483,LC_ALL=C
|
||||
subtitles_en_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,31.14370322227478,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,31.543628454208374,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,31.133421182632446,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,31.149214506149292,489,
|
||||
subtitles_en_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Holmes\s+\w+ /tmp/benchsuite/subtitles/en.sample.txt,31.180144548416138,489,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.9173591136932373,22,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.867539644241333,22,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.9047088623046875,22,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.9265778064727783,22,
|
||||
subtitles_en_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.874317169189453,22,
|
||||
subtitles_en_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,24.619744777679443,309,
|
||||
subtitles_en_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,24.622087240219116,309,
|
||||
subtitles_en_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,24.770710468292236,309,
|
||||
subtitles_en_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,24.60181713104248,309,
|
||||
subtitles_en_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,24.678969383239746,309,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.676262140274048,22,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.673837184906006,22,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.667243003845215,22,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.667970657348633,22,
|
||||
subtitles_en_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,2.6588196754455566,22,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,10.786212682723999,302,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,10.744041204452515,302,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,10.74718165397644,302,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,10.768681287765503,302,
|
||||
subtitles_en_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,10.772834777832031,302,
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,6.287469148635864,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,6.243509769439697,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,6.242478370666504,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,6.2600791454315186,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,6.2560741901397705,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,4.670856237411499,302,
|
||||
subtitles_en_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,4.703561544418335,302,
|
||||
subtitles_en_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,4.675989627838135,302,
|
||||
subtitles_en_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,4.6688103675842285,302,
|
||||
subtitles_en_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/en.sample.txt,4.715432167053223,302,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.20440673828125,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.20561552047729492,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.2381761074066162,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.23102140426635742,583,
|
||||
subtitles_ru_literal,1,5,rg,rg Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.19649791717529297,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.3158297538757324,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.3136112689971924,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.32402992248535156,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.3248250484466553,583,
|
||||
subtitles_ru_literal,1,5,rg (no mmap),rg --no-mmap Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.3201103210449219,583,
|
||||
subtitles_ru_literal,1,5,grep,grep Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7790360450744629,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7977695465087891,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7397308349609375,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7123947143554688,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep,grep Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.711977481842041,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.27593088150024414,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.2842848300933838,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.28340864181518555,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.28469133377075195,583,
|
||||
subtitles_ru_literal,1,5,rg (lines),rg -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.27951884269714355,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,2.7401182651519775,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,2.658051013946533,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,2.666799306869507,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,2.7145025730133057,583,
|
||||
subtitles_ru_literal,1,5,ag (lines),ag -s Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,2.7412168979644775,583,
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0886235237121582,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0896506309509277,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.1100494861602783,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.088308334350586,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,grep (lines),grep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0891127586364746,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,5,ugrep (lines),ugrep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8426175117492676,583,
|
||||
subtitles_ru_literal,1,5,ugrep (lines),ugrep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.85064697265625,583,
|
||||
subtitles_ru_literal,1,5,ugrep (lines),ugrep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8356082439422607,583,
|
||||
subtitles_ru_literal,1,5,ugrep (lines),ugrep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8405826091766357,583,
|
||||
subtitles_ru_literal,1,5,ugrep (lines),ugrep -n Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.83730149269104,583,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.48739099502563477,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.4823324680328369,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.4832422733306885,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.4812777042388916,604,
|
||||
subtitles_ru_literal_casei,1,5,rg,rg -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.4854264259338379,604,
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,6.694453477859497,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,6.759232044219971,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,6.686243534088135,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,6.7029454708099365,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep,grep -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,6.699738264083862,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7290260791778564,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7400493621826172,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7299001216888428,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7308380603790283,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,grep (ASCII),grep -E -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.7283904552459717,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.5711629390716553,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.574974536895752,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.5820963382720947,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.5438523292541504,604,
|
||||
subtitles_ru_literal_casei,1,5,rg (lines),rg -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.5054161548614502,604,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6135058403015137,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6051545143127441,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6032793521881104,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6097028255462646,,
|
||||
subtitles_ru_literal_casei,1,5,ag (lines) (ASCII),ag -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6850666999816895,,
|
||||
subtitles_ru_literal_casei,1,5,ugrep (lines) (ASCII),ugrep -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.833592176437378,583,
|
||||
subtitles_ru_literal_casei,1,5,ugrep (lines) (ASCII),ugrep -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8357219696044922,583,
|
||||
subtitles_ru_literal_casei,1,5,ugrep (lines) (ASCII),ugrep -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8394358158111572,583,
|
||||
subtitles_ru_literal_casei,1,5,ugrep (lines) (ASCII),ugrep -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8334264755249023,583,
|
||||
subtitles_ru_literal_casei,1,5,ugrep (lines) (ASCII),ugrep -n -i Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8304622173309326,583,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /tmp/benchsuite/subtitles/ru.txt,0.2904787063598633,583,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /tmp/benchsuite/subtitles/ru.txt,0.2831101417541504,583,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /tmp/benchsuite/subtitles/ru.txt,0.2786984443664551,583,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /tmp/benchsuite/subtitles/ru.txt,0.28719663619995117,583,
|
||||
subtitles_ru_literal_word,1,5,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /tmp/benchsuite/subtitles/ru.txt,0.27600622177124023,583,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6810102462768555,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6855161190032959,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6827929019927979,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6587810516357422,,
|
||||
subtitles_ru_literal_word,1,5,ag (ASCII),ag -sw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.6551673412322998,,
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0948495864868164,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.097151756286621,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.1051688194274902,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.1151607036590576,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,grep (ASCII),grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.1100919246673584,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,5,ugrep (ASCII),ugrep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.84104585647583,,
|
||||
subtitles_ru_literal_word,1,5,ugrep (ASCII),ugrep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.9092209339141846,,
|
||||
subtitles_ru_literal_word,1,5,ugrep (ASCII),ugrep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.836583137512207,,
|
||||
subtitles_ru_literal_word,1,5,ugrep (ASCII),ugrep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8941335678100586,,
|
||||
subtitles_ru_literal_word,1,5,ugrep (ASCII),ugrep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.8811957836151123,,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.2956504821777344,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.29023194313049316,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.3374972343444824,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.29686713218688965,579,
|
||||
subtitles_ru_literal_word,1,5,rg,rg -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,0.29778003692626953,579,
|
||||
subtitles_ru_literal_word,1,5,grep,grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.1042869091033936,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.1068925857543945,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0973529815673828,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0917479991912842,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,5,grep,grep -nw Шерлок Холмс /tmp/benchsuite/subtitles/ru.txt,1.0987188816070557,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8945937156677246,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8919808864593506,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.9041986465454102,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8838107585906982,691,
|
||||
subtitles_ru_alternate,1,5,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.903540849685669,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.715298652648926,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.676830530166626,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.721431016921997,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.6990325450897217,691,
|
||||
subtitles_ru_alternate,1,5,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.764216184616089,691,
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.519805669784546,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.40212869644165,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.381818294525146,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.386401176452637,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.425997257232666,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,ugrep (lines),ugrep -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.259684801101685,691,
|
||||
subtitles_ru_alternate,1,5,ugrep (lines),ugrep -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.236181735992432,691,
|
||||
subtitles_ru_alternate,1,5,ugrep (lines),ugrep -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.340983629226685,691,
|
||||
subtitles_ru_alternate,1,5,ugrep (lines),ugrep -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.21895980834961,691,
|
||||
subtitles_ru_alternate,1,5,ugrep (lines),ugrep -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.194425106048584,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8262777328491211,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8343832492828369,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8675012588500977,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8584244251251221,691,
|
||||
subtitles_ru_alternate,1,5,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,0.8777158260345459,691,
|
||||
subtitles_ru_alternate,1,5,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.25586986541748,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.007173538208008,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.068726301193237,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.010542631149292,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,5,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.021028280258179,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.7179486751556396,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.682896375656128,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.699859142303467,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.662733316421509,691,
|
||||
subtitles_ru_alternate_casei,1,5,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,3.661060094833374,691,
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.434819221496582,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.436205625534058,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.388120412826538,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.407799243927002,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,8.44464373588562,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.216991662979126,691,
|
||||
subtitles_ru_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.470320701599121,691,
|
||||
subtitles_ru_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.21274471282959,691,
|
||||
subtitles_ru_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.38324522972107,691,
|
||||
subtitles_ru_alternate_casei,1,5,ugrep (ASCII),ugrep -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,13.3148832321167,691,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,6.205031156539917,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,6.1502509117126465,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,6.150696516036987,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,6.150148630142212,735,
|
||||
subtitles_ru_alternate_casei,1,5,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,6.153124809265137,735,
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,7.477111339569092,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,7.483617782592773,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,7.502292156219482,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,7.528963327407837,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,5,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /tmp/benchsuite/subtitles/ru.txt,7.482379198074341,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,0.3461883068084717,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,0.30211687088012695,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,0.30521416664123535,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,0.2969543933868408,278,
|
||||
subtitles_ru_surrounding_words,1,5,rg,rg -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,0.3003671169281006,278,
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.4209251403808594,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.4190807342529297,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.4178283214569092,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.4173235893249512,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,grep,grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.4221296310424805,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,70.6701226234436,326,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,71.15788650512695,326,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,71.07276272773743,326,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,70.5626060962677,326,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,70.54449439048767,326,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.868441104888916,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.886382818222046,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.8685986995697021,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.8727426528930664,,
|
||||
subtitles_ru_surrounding_words,1,5,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.8667800426483154,,
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.3818490505218506,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.3709721565246582,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.3819043636322021,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.460402488708496,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.4097135066986084,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.286102294921875,,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.2712647914886475,,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.2950100898742676,,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.264500617980957,,
|
||||
subtitles_ru_surrounding_words,1,5,ugrep (ASCII),ugrep -n -U \w+\s+Холмс\s+\w+ /tmp/benchsuite/subtitles/ru.txt,1.2877566814422607,,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,3.1152236461639404,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,3.1311423778533936,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,3.0800061225891113,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,3.070636510848999,41,
|
||||
subtitles_ru_no_literal,1,5,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,3.0940587520599365,41,
|
||||
subtitles_ru_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,50.85447072982788,86,
|
||||
subtitles_ru_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,50.832582235336304,86,
|
||||
subtitles_ru_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,50.8755087852478,86,
|
||||
subtitles_ru_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,50.79056358337402,86,
|
||||
subtitles_ru_no_literal,1,5,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,50.84795618057251,86,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,2.716826915740967,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,2.7381114959716797,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,2.7545180320739746,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,2.7215416431427,,
|
||||
subtitles_ru_no_literal,1,5,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,2.707784414291382,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.9250116348266602,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.8956947326660156,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.8904175758361816,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.8968868255615234,,
|
||||
subtitles_ru_no_literal,1,5,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.900888204574585,,
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.755054235458374,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.7681376934051514,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.7654614448547363,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.75648832321167,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.7456772327423096,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.2170698642730713,,
|
||||
subtitles_ru_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.1907124519348145,,
|
||||
subtitles_ru_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.1722266674041748,,
|
||||
subtitles_ru_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.191617727279663,,
|
||||
subtitles_ru_no_literal,1,5,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /tmp/benchsuite/subtitles/ru.txt,1.1909863948822021,,
|
||||
|
208
benchsuite/runs/2020-10-14-archlinux-frink/summary
Normal file
208
benchsuite/runs/2020-10-14-archlinux-frink/summary
Normal file
@@ -0,0 +1,208 @@
|
||||
linux_literal_default (pattern: PM_RESUME)
|
||||
------------------------------------------
|
||||
rg* 0.124 +/- 0.004 (lines: 19)*
|
||||
ag 0.771 +/- 0.187 (lines: 19)
|
||||
git grep 0.480 +/- 0.010 (lines: 19)
|
||||
ugrep 0.136 +/- 0.002 (lines: 19)
|
||||
grep 1.147 +/- 0.005 (lines: 19)
|
||||
|
||||
linux_literal (pattern: PM_RESUME)
|
||||
----------------------------------
|
||||
rg* 0.130 +/- 0.008 (lines: 19)*
|
||||
rg (mmap) 1.336 +/- 0.036 (lines: 19)
|
||||
ag (mmap) 0.880 +/- 0.071 (lines: 19)
|
||||
git grep 0.464 +/- 0.005 (lines: 19)
|
||||
ugrep 0.309 +/- 0.012 (lines: 19)
|
||||
|
||||
linux_literal_casei (pattern: PM_RESUME)
|
||||
----------------------------------------
|
||||
rg* 0.131 +/- 0.005 (lines: 456)*
|
||||
rg (mmap) 1.336 +/- 0.020 (lines: 456)
|
||||
ag (mmap) 0.657 +/- 0.123 (lines: 456)
|
||||
git grep 0.482 +/- 0.007 (lines: 456)
|
||||
ugrep 0.288 +/- 0.014 (lines: 456)
|
||||
|
||||
linux_re_literal_suffix (pattern: [A-Z]+_RESUME)
|
||||
------------------------------------------------
|
||||
rg* 0.126 +/- 0.009 (lines: 1944)*
|
||||
ag 1.044 +/- 0.138 (lines: 1944)
|
||||
git grep 1.217 +/- 0.045 (lines: 1944)
|
||||
ugrep 0.548 +/- 0.014 (lines: 1944)
|
||||
|
||||
linux_word (pattern: PM_RESUME)
|
||||
-------------------------------
|
||||
rg* 0.134 +/- 0.003 (lines: 6)*
|
||||
ag 0.618 +/- 0.154 (lines: 6)
|
||||
git grep 0.471 +/- 0.018 (lines: 6)
|
||||
ugrep 0.306 +/- 0.018 (lines: 6)
|
||||
|
||||
linux_unicode_greek (pattern: \p{Greek})
|
||||
----------------------------------------
|
||||
rg* 0.263 +/- 0.001 (lines: 105)*
|
||||
ugrep 0.273 +/- 0.007 (lines: 105)
|
||||
|
||||
linux_unicode_greek_casei (pattern: \p{Greek})
|
||||
----------------------------------------------
|
||||
rg* 0.256 +/- 0.013 (lines: 225)*
|
||||
ugrep 0.271 +/- 0.004 (lines: 105)
|
||||
|
||||
linux_unicode_word (pattern: \wAh)
|
||||
----------------------------------
|
||||
rg 0.140 +/- 0.004 (lines: 229)
|
||||
rg (ASCII)* 0.138 +/- 0.009 (lines: 216)*
|
||||
ag (ASCII) 1.278 +/- 0.101 (lines: 216)
|
||||
git grep 8.188 +/- 0.027 (lines: 229)
|
||||
git grep (ASCII) 2.334 +/- 0.025 (lines: 216)
|
||||
ugrep 0.276 +/- 0.002 (lines: 229)
|
||||
ugrep (ASCII) 0.274 +/- 0.004 (lines: 216)
|
||||
|
||||
linux_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
-----------------------------------------------------------------
|
||||
rg 0.402 +/- 0.008 (lines: 611)
|
||||
rg (ASCII)* 0.254 +/- 0.010 (lines: 610)*
|
||||
ag (ASCII) 0.934 +/- 0.008 (lines: 971)
|
||||
git grep 14.591 +/- 0.077 (lines: 611)
|
||||
git grep (ASCII) 3.182 +/- 0.035 (lines: 610)
|
||||
ugrep 6.196 +/- 0.052 (lines: 973)
|
||||
ugrep (ASCII) 0.363 +/- 0.004 (lines: 972)
|
||||
|
||||
linux_alternates (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------
|
||||
rg* 0.142 +/- 0.010 (lines: 112)*
|
||||
ag 0.991 +/- 0.019 (lines: 112)
|
||||
git grep 0.571 +/- 0.011 (lines: 112)
|
||||
ugrep 0.290 +/- 0.017 (lines: 112)
|
||||
|
||||
linux_alternates_casei (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------------
|
||||
rg* 0.226 +/- 0.007 (lines: 203)*
|
||||
ag 0.700 +/- 0.011 (lines: 203)
|
||||
git grep 0.977 +/- 0.011 (lines: 203)
|
||||
ugrep 0.275 +/- 0.005 (lines: 203)
|
||||
|
||||
subtitles_en_literal (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------
|
||||
rg* 0.226 +/- 0.004 (lines: 830)*
|
||||
rg (no mmap) 0.366 +/- 0.005 (lines: 830)
|
||||
grep 0.800 +/- 0.006 (lines: 830)
|
||||
rg (lines) 0.317 +/- 0.016 (lines: 830)
|
||||
ag (lines) 2.547 +/- 0.013 (lines: 830)
|
||||
grep (lines) 1.294 +/- 0.004 (lines: 830)
|
||||
ugrep (lines) 0.404 +/- 0.011 (lines: 830)
|
||||
|
||||
subtitles_en_literal_casei (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------------
|
||||
rg* 0.398 +/- 0.024 (lines: 871)*
|
||||
grep 3.621 +/- 0.016 (lines: 871)
|
||||
grep (ASCII) 0.938 +/- 0.020 (lines: 871)
|
||||
rg (lines) 0.514 +/- 0.016 (lines: 871)
|
||||
ag (lines) (ASCII) 2.595 +/- 0.030 (lines: 871)
|
||||
ugrep (lines) 1.103 +/- 0.008 (lines: 871)
|
||||
|
||||
subtitles_en_literal_word (pattern: Sherlock Holmes)
|
||||
----------------------------------------------------
|
||||
rg (ASCII)* 0.317 +/- 0.007 (lines: 830)*
|
||||
ag (ASCII) 2.584 +/- 0.022 (lines: 830)
|
||||
grep (ASCII) 1.319 +/- 0.018 (lines: 830)
|
||||
ugrep (ASCII) 0.414 +/- 0.004 (lines: 830)
|
||||
rg 0.323 +/- 0.005 (lines: 830)
|
||||
grep 1.317 +/- 0.023 (lines: 830)
|
||||
|
||||
subtitles_en_alternate (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 0.429 +/- 0.027 (lines: 1094)
|
||||
ag (lines) 3.608 +/- 0.036 (lines: 1094)
|
||||
grep (lines) 3.325 +/- 0.017 (lines: 1094)
|
||||
ugrep (lines) 1.133 +/- 0.045 (lines: 1094)
|
||||
rg* 0.305 +/- 0.014 (lines: 1094)*
|
||||
grep 2.821 +/- 0.013 (lines: 1094)
|
||||
|
||||
subtitles_en_alternate_casei (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII) 6.181 +/- 0.018 (lines: 1136)
|
||||
grep (ASCII) 5.527 +/- 0.022 (lines: 1136)
|
||||
ugrep (ASCII) 1.108 +/- 0.007 (lines: 1136)
|
||||
rg* 0.799 +/- 0.042 (lines: 1136)*
|
||||
grep 5.573 +/- 0.095 (lines: 1136)
|
||||
|
||||
subtitles_en_surrounding_words (pattern: \w+\s+Holmes\s+\w+)
|
||||
------------------------------------------------------------
|
||||
rg* 0.335 +/- 0.008 (lines: 483)
|
||||
grep 1.764 +/- 0.028 (lines: 483)
|
||||
ugrep 70.234 +/- 0.200 (lines: 489)
|
||||
rg (ASCII) 0.329 +/- 0.002 (lines: 483)*
|
||||
ag (ASCII) 7.418 +/- 0.182 (lines: 489)
|
||||
grep (ASCII) 1.763 +/- 0.017 (lines: 483)
|
||||
ugrep (ASCII) 31.230 +/- 0.176 (lines: 489)
|
||||
|
||||
subtitles_en_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 2.898 +/- 0.026 (lines: 22)
|
||||
ugrep 24.659 +/- 0.069 (lines: 309)
|
||||
rg (ASCII)* 2.669 +/- 0.007 (lines: 22)*
|
||||
ag (ASCII) 10.764 +/- 0.018 (lines: 302)
|
||||
grep (ASCII) 6.258 +/- 0.018 (lines: 22)
|
||||
ugrep (ASCII) 4.687 +/- 0.021 (lines: 302)
|
||||
|
||||
subtitles_ru_literal (pattern: Шерлок Холмс)
|
||||
--------------------------------------------
|
||||
rg* 0.215 +/- 0.018 (lines: 583)*
|
||||
rg (no mmap) 0.320 +/- 0.005 (lines: 583)
|
||||
grep 0.748 +/- 0.039 (lines: 583)
|
||||
rg (lines) 0.282 +/- 0.004 (lines: 583)
|
||||
ag (lines) 2.704 +/- 0.040 (lines: 583)
|
||||
grep (lines) 1.093 +/- 0.009 (lines: 583)
|
||||
ugrep (lines) 1.841 +/- 0.006 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_casei (pattern: Шерлок Холмс)
|
||||
--------------------------------------------------
|
||||
rg* 0.484 +/- 0.002 (lines: 604)*
|
||||
grep 6.709 +/- 0.029 (lines: 604)
|
||||
grep (ASCII) 0.732 +/- 0.005 (lines: 583)
|
||||
rg (lines) 0.556 +/- 0.032 (lines: 604)
|
||||
ag (lines) (ASCII) 0.623 +/- 0.035 (lines: 0)
|
||||
ugrep (lines) (ASCII) 1.835 +/- 0.003 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_word (pattern: Шерлок Холмс)
|
||||
-------------------------------------------------
|
||||
rg (ASCII)* 0.283 +/- 0.006 (lines: 583)*
|
||||
ag (ASCII) 0.673 +/- 0.014 (lines: 0)
|
||||
grep (ASCII) 1.104 +/- 0.009 (lines: 583)
|
||||
ugrep (ASCII) 1.872 +/- 0.032 (lines: 0)
|
||||
rg 0.304 +/- 0.019 (lines: 579)
|
||||
grep 1.100 +/- 0.006 (lines: 579)
|
||||
|
||||
subtitles_ru_alternate (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 0.896 +/- 0.009 (lines: 691)
|
||||
ag (lines) 3.715 +/- 0.032 (lines: 691)
|
||||
grep (lines) 8.423 +/- 0.057 (lines: 691)
|
||||
ugrep (lines) 13.250 +/- 0.056 (lines: 691)
|
||||
rg* 0.853 +/- 0.022 (lines: 691)*
|
||||
grep 8.073 +/- 0.105 (lines: 691)
|
||||
|
||||
subtitles_ru_alternate_casei (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII)* 3.685 +/- 0.024 (lines: 691)*
|
||||
grep (ASCII) 8.422 +/- 0.024 (lines: 691)
|
||||
ugrep (ASCII) 13.320 +/- 0.110 (lines: 691)
|
||||
rg 6.162 +/- 0.024 (lines: 735)
|
||||
grep 7.495 +/- 0.021 (lines: 735)
|
||||
|
||||
subtitles_ru_surrounding_words (pattern: \w+\s+Холмс\s+\w+)
|
||||
-----------------------------------------------------------
|
||||
rg* 0.310 +/- 0.020 (lines: 278)*
|
||||
grep 1.419 +/- 0.002 (lines: 278)
|
||||
ugrep 70.802 +/- 0.292 (lines: 326)
|
||||
ag (ASCII) 1.873 +/- 0.008 (lines: 0)
|
||||
grep (ASCII) 1.401 +/- 0.036 (lines: 0)
|
||||
ugrep (ASCII) 1.281 +/- 0.013 (lines: 0)
|
||||
|
||||
subtitles_ru_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 3.098 +/- 0.025 (lines: 41)
|
||||
ugrep 50.840 +/- 0.032 (lines: 86)
|
||||
rg (ASCII) 2.728 +/- 0.019 (lines: 0)
|
||||
ag (ASCII) 1.902 +/- 0.014 (lines: 0)
|
||||
grep (ASCII) 1.758 +/- 0.009 (lines: 0)
|
||||
ugrep (ASCII)* 1.193 +/- 0.016 (lines: 0)*
|
||||
38
benchsuite/runs/2022-12-16-archlinux-duff/README.md
Normal file
38
benchsuite/runs/2022-12-16-archlinux-duff/README.md
Normal file
@@ -0,0 +1,38 @@
|
||||
This directory contains updated benchmarks as of 2022-12-16. They were captured
|
||||
via the benchsuite script at `benchsuite/benchsuite` from the root of this
|
||||
repository. The command that was run:
|
||||
|
||||
$ ./benchsuite \
|
||||
--dir /dev/shm/benchsuite \
|
||||
--raw runs/2022-12-16-archlinux-duff/raw.csv \
|
||||
| tee runs/2022-12-16-archlinux-duff/summary
|
||||
|
||||
The versions of each tool are as follows:
|
||||
|
||||
$ rg --version
|
||||
ripgrep 13.0.0 (rev 87c4a2b4b1)
|
||||
-SIMD -AVX (compiled)
|
||||
+SIMD +AVX (runtime)
|
||||
|
||||
$ grep -V
|
||||
grep (GNU grep) 3.8
|
||||
|
||||
$ ag -V
|
||||
ag version 2.2.0
|
||||
|
||||
Features:
|
||||
+jit +lzma +zlib
|
||||
|
||||
$ git --version
|
||||
git version 2.39.0
|
||||
|
||||
$ ugrep --version
|
||||
ugrep 3.9.2 x86_64-pc-linux-gnu +avx2 +pcre2jit +zlib +bzip2 +lzma +lz4 +zstd
|
||||
License BSD-3-Clause: <https://opensource.org/licenses/BSD-3-Clause>
|
||||
Written by Robert van Engelen and others: <https://github.com/Genivia/ugrep>
|
||||
|
||||
The version of ripgrep used was compiled from source on commit 7f23cd63:
|
||||
|
||||
$ cargo build --release --features 'pcre2'
|
||||
|
||||
This was run on a machine with an Intel i9-12900K with 128GB of memory.
|
||||
400
benchsuite/runs/2022-12-16-archlinux-duff/raw.csv
Normal file
400
benchsuite/runs/2022-12-16-archlinux-duff/raw.csv
Normal file
@@ -0,0 +1,400 @@
|
||||
benchmark,warmup_iter,iter,name,command,duration,lines,env
|
||||
linux_literal_default,1,3,rg,rg PM_RESUME,0.08678817749023438,39,
|
||||
linux_literal_default,1,3,rg,rg PM_RESUME,0.08307123184204102,39,
|
||||
linux_literal_default,1,3,rg,rg PM_RESUME,0.08347964286804199,39,
|
||||
linux_literal_default,1,3,ag,ag PM_RESUME,0.2955434322357178,39,
|
||||
linux_literal_default,1,3,ag,ag PM_RESUME,0.2954287528991699,39,
|
||||
linux_literal_default,1,3,ag,ag PM_RESUME,0.2938194274902344,39,
|
||||
linux_literal_default,1,3,git grep,git grep PM_RESUME,0.23198556900024414,39,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,3,git grep,git grep PM_RESUME,0.22356963157653809,39,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,3,git grep,git grep PM_RESUME,0.2189793586730957,39,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,3,ugrep,ugrep -r PM_RESUME ./,0.10710000991821289,39,
|
||||
linux_literal_default,1,3,ugrep,ugrep -r PM_RESUME ./,0.10364222526550293,39,
|
||||
linux_literal_default,1,3,ugrep,ugrep -r PM_RESUME ./,0.1052248477935791,39,
|
||||
linux_literal_default,1,3,grep,grep -r PM_RESUME ./,0.9994468688964844,39,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,3,grep,grep -r PM_RESUME ./,0.9939279556274414,39,LC_ALL=en_US.UTF-8
|
||||
linux_literal_default,1,3,grep,grep -r PM_RESUME ./,0.9957931041717529,39,LC_ALL=en_US.UTF-8
|
||||
linux_literal,1,3,rg,rg -n PM_RESUME,0.08603358268737793,39,
|
||||
linux_literal,1,3,rg,rg -n PM_RESUME,0.0837090015411377,39,
|
||||
linux_literal,1,3,rg,rg -n PM_RESUME,0.08435535430908203,39,
|
||||
linux_literal,1,3,rg (mmap),rg -n --mmap PM_RESUME,0.3215503692626953,39,
|
||||
linux_literal,1,3,rg (mmap),rg -n --mmap PM_RESUME,0.32426929473876953,39,
|
||||
linux_literal,1,3,rg (mmap),rg -n --mmap PM_RESUME,0.3215982913970947,39,
|
||||
linux_literal,1,3,ag (mmap),ag -s PM_RESUME,0.2894856929779053,39,
|
||||
linux_literal,1,3,ag (mmap),ag -s PM_RESUME,0.2892603874206543,39,
|
||||
linux_literal,1,3,ag (mmap),ag -s PM_RESUME,0.29217028617858887,39,
|
||||
linux_literal,1,3,git grep,git grep -I -n PM_RESUME,0.206068754196167,39,LC_ALL=C
|
||||
linux_literal,1,3,git grep,git grep -I -n PM_RESUME,0.2218036651611328,39,LC_ALL=C
|
||||
linux_literal,1,3,git grep,git grep -I -n PM_RESUME,0.20590710639953613,39,LC_ALL=C
|
||||
linux_literal,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.18692874908447266,39,
|
||||
linux_literal,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.19518327713012695,39,
|
||||
linux_literal,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n PM_RESUME ./,0.18577361106872559,39,
|
||||
linux_literal_casei,1,3,rg,rg -n -i PM_RESUME,0.08709383010864258,536,
|
||||
linux_literal_casei,1,3,rg,rg -n -i PM_RESUME,0.08861064910888672,536,
|
||||
linux_literal_casei,1,3,rg,rg -n -i PM_RESUME,0.08769798278808594,536,
|
||||
linux_literal_casei,1,3,rg (mmap),rg -n -i --mmap PM_RESUME,0.3218965530395508,536,
|
||||
linux_literal_casei,1,3,rg (mmap),rg -n -i --mmap PM_RESUME,0.30869364738464355,536,
|
||||
linux_literal_casei,1,3,rg (mmap),rg -n -i --mmap PM_RESUME,0.31044936180114746,536,
|
||||
linux_literal_casei,1,3,ag (mmap),ag -i PM_RESUME,0.2989068031311035,536,
|
||||
linux_literal_casei,1,3,ag (mmap),ag -i PM_RESUME,0.2996039390563965,536,
|
||||
linux_literal_casei,1,3,ag (mmap),ag -i PM_RESUME,0.29817700386047363,536,
|
||||
linux_literal_casei,1,3,git grep,git grep -I -n -i PM_RESUME,0.2122786045074463,536,LC_ALL=C
|
||||
linux_literal_casei,1,3,git grep,git grep -I -n -i PM_RESUME,0.20763754844665527,536,LC_ALL=C
|
||||
linux_literal_casei,1,3,git grep,git grep -I -n -i PM_RESUME,0.220794677734375,536,LC_ALL=C
|
||||
linux_literal_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.17305850982666016,536,
|
||||
linux_literal_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.1745915412902832,536,
|
||||
linux_literal_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i PM_RESUME ./,0.17526865005493164,536,
|
||||
linux_re_literal_suffix,1,3,rg,rg -n [A-Z]+_RESUME,0.08527851104736328,2160,
|
||||
linux_re_literal_suffix,1,3,rg,rg -n [A-Z]+_RESUME,0.08487534523010254,2160,
|
||||
linux_re_literal_suffix,1,3,rg,rg -n [A-Z]+_RESUME,0.0848684310913086,2160,
|
||||
linux_re_literal_suffix,1,3,ag,ag -s [A-Z]+_RESUME,0.37945985794067383,2160,
|
||||
linux_re_literal_suffix,1,3,ag,ag -s [A-Z]+_RESUME,0.36303210258483887,2160,
|
||||
linux_re_literal_suffix,1,3,ag,ag -s [A-Z]+_RESUME,0.36359691619873047,2160,
|
||||
linux_re_literal_suffix,1,3,git grep,git grep -E -I -n [A-Z]+_RESUME,0.9589834213256836,2160,LC_ALL=C
|
||||
linux_re_literal_suffix,1,3,git grep,git grep -E -I -n [A-Z]+_RESUME,0.9206984043121338,2160,LC_ALL=C
|
||||
linux_re_literal_suffix,1,3,git grep,git grep -E -I -n [A-Z]+_RESUME,0.8642933368682861,2160,LC_ALL=C
|
||||
linux_re_literal_suffix,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.40503501892089844,2160,
|
||||
linux_re_literal_suffix,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.4531714916229248,2160,
|
||||
linux_re_literal_suffix,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n [A-Z]+_RESUME ./,0.4397866725921631,2160,
|
||||
linux_word,1,3,rg,rg -n -w PM_RESUME,0.08639907836914062,9,
|
||||
linux_word,1,3,rg,rg -n -w PM_RESUME,0.08583569526672363,9,
|
||||
linux_word,1,3,rg,rg -n -w PM_RESUME,0.08414363861083984,9,
|
||||
linux_word,1,3,ag,ag -s -w PM_RESUME,0.2853865623474121,9,
|
||||
linux_word,1,3,ag,ag -s -w PM_RESUME,0.2871377468109131,9,
|
||||
linux_word,1,3,ag,ag -s -w PM_RESUME,0.28753662109375,9,
|
||||
linux_word,1,3,git grep,git grep -E -I -n -w PM_RESUME,0.20428204536437988,9,LC_ALL=C
|
||||
linux_word,1,3,git grep,git grep -E -I -n -w PM_RESUME,0.20490717887878418,9,LC_ALL=C
|
||||
linux_word,1,3,git grep,git grep -E -I -n -w PM_RESUME,0.20840072631835938,9,LC_ALL=C
|
||||
linux_word,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.18790841102600098,9,
|
||||
linux_word,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.18659543991088867,9,
|
||||
linux_word,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -w PM_RESUME ./,0.19104933738708496,9,
|
||||
linux_unicode_greek,1,3,rg,rg -n \p{Greek},0.19976496696472168,105,
|
||||
linux_unicode_greek,1,3,rg,rg -n \p{Greek},0.20618367195129395,105,
|
||||
linux_unicode_greek,1,3,rg,rg -n \p{Greek},0.19702935218811035,105,
|
||||
linux_unicode_greek,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.17758727073669434,105,
|
||||
linux_unicode_greek,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.17793798446655273,105,
|
||||
linux_unicode_greek,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \p{Greek} ./,0.1872577667236328,105,
|
||||
linux_unicode_greek_casei,1,3,rg,rg -n -i \p{Greek},0.19808244705200195,245,
|
||||
linux_unicode_greek_casei,1,3,rg,rg -n -i \p{Greek},0.1979837417602539,245,
|
||||
linux_unicode_greek_casei,1,3,rg,rg -n -i \p{Greek},0.1984400749206543,245,
|
||||
linux_unicode_greek_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.1819148063659668,105,
|
||||
linux_unicode_greek_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.17530512809753418,105,
|
||||
linux_unicode_greek_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i \p{Greek} ./,0.17999005317687988,105,
|
||||
linux_unicode_word,1,3,rg,rg -n \wAh,0.08527827262878418,247,
|
||||
linux_unicode_word,1,3,rg,rg -n \wAh,0.08541679382324219,247,
|
||||
linux_unicode_word,1,3,rg,rg -n \wAh,0.08553218841552734,247,
|
||||
linux_unicode_word,1,3,rg (ASCII),rg -n (?-u)\wAh,0.08484745025634766,233,
|
||||
linux_unicode_word,1,3,rg (ASCII),rg -n (?-u)\wAh,0.08466482162475586,233,
|
||||
linux_unicode_word,1,3,rg (ASCII),rg -n (?-u)\wAh,0.08487439155578613,233,
|
||||
linux_unicode_word,1,3,ag (ASCII),ag -s \wAh,0.3061795234680176,233,
|
||||
linux_unicode_word,1,3,ag (ASCII),ag -s \wAh,0.2993617057800293,233,
|
||||
linux_unicode_word,1,3,ag (ASCII),ag -s \wAh,0.29722046852111816,233,
|
||||
linux_unicode_word,1,3,git grep,git grep -E -I -n \wAh,4.257144451141357,247,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,3,git grep,git grep -E -I -n \wAh,3.852163076400757,247,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,3,git grep,git grep -E -I -n \wAh,3.8293941020965576,247,LC_ALL=en_US.UTF-8
|
||||
linux_unicode_word,1,3,git grep (ASCII),git grep -E -I -n \wAh,1.647632122039795,233,LC_ALL=C
|
||||
linux_unicode_word,1,3,git grep (ASCII),git grep -E -I -n \wAh,1.6269629001617432,233,LC_ALL=C
|
||||
linux_unicode_word,1,3,git grep (ASCII),git grep -E -I -n \wAh,1.5847914218902588,233,LC_ALL=C
|
||||
linux_unicode_word,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.1802208423614502,247,
|
||||
linux_unicode_word,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.17564702033996582,247,
|
||||
linux_unicode_word,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \wAh ./,0.1746981143951416,247,
|
||||
linux_unicode_word,1,3,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.1799161434173584,233,
|
||||
linux_unicode_word,1,3,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.18733000755310059,233,
|
||||
linux_unicode_word,1,3,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \wAh ./,0.18859529495239258,233,
|
||||
linux_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.26203155517578125,721,
|
||||
linux_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2615540027618408,721,
|
||||
linux_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.2730247974395752,721,
|
||||
linux_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.19902300834655762,720,
|
||||
linux_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.20034146308898926,720,
|
||||
linux_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.20192813873291016,720,
|
||||
linux_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.8269081115722656,1134,
|
||||
linux_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.8393104076385498,1134,
|
||||
linux_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},0.8293666839599609,1134,
|
||||
linux_no_literal,1,3,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},7.334395408630371,721,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,3,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},7.338796854019165,721,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,3,git grep,git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},7.36545991897583,721,LC_ALL=en_US.UTF-8
|
||||
linux_no_literal,1,3,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},2.1588926315307617,720,LC_ALL=C
|
||||
linux_no_literal,1,3,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},2.132209062576294,720,LC_ALL=C
|
||||
linux_no_literal,1,3,git grep (ASCII),git grep -E -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5},2.1407439708709717,720,LC_ALL=C
|
||||
linux_no_literal,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,3.410162925720215,723,
|
||||
linux_no_literal,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,3.405057668685913,723,
|
||||
linux_no_literal,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,3.3945884704589844,723,
|
||||
linux_no_literal,1,3,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.23865604400634766,722,
|
||||
linux_no_literal,1,3,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.23371148109436035,722,
|
||||
linux_no_literal,1,3,ugrep (ASCII),ugrep -r --ignore-files --no-hidden -I -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} ./,0.2343149185180664,722,
|
||||
linux_alternates,1,3,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.08691263198852539,140,
|
||||
linux_alternates,1,3,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.08707070350646973,140,
|
||||
linux_alternates,1,3,rg,rg -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.08713960647583008,140,
|
||||
linux_alternates,1,3,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.32947278022766113,140,
|
||||
linux_alternates,1,3,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.33203840255737305,140,
|
||||
linux_alternates,1,3,ag,ag -s ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.3292670249938965,140,
|
||||
linux_alternates,1,3,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.4576725959777832,140,LC_ALL=C
|
||||
linux_alternates,1,3,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.41936421394348145,140,LC_ALL=C
|
||||
linux_alternates,1,3,git grep,git grep -E -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.3639688491821289,140,LC_ALL=C
|
||||
linux_alternates,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.17806458473205566,140,
|
||||
linux_alternates,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.18224716186523438,140,
|
||||
linux_alternates,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.17795038223266602,140,
|
||||
linux_alternates_casei,1,3,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12421393394470215,241,
|
||||
linux_alternates_casei,1,3,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12235784530639648,241,
|
||||
linux_alternates_casei,1,3,rg,rg -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.12151455879211426,241,
|
||||
linux_alternates_casei,1,3,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.529585599899292,241,
|
||||
linux_alternates_casei,1,3,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5305526256561279,241,
|
||||
linux_alternates_casei,1,3,ag,ag -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.5311264991760254,241,
|
||||
linux_alternates_casei,1,3,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.7589735984802246,241,LC_ALL=C
|
||||
linux_alternates_casei,1,3,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.7852108478546143,241,LC_ALL=C
|
||||
linux_alternates_casei,1,3,git grep,git grep -E -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT,0.8308050632476807,241,LC_ALL=C
|
||||
linux_alternates_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.17955923080444336,241,
|
||||
linux_alternates_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.1745290756225586,241,
|
||||
linux_alternates_casei,1,3,ugrep,ugrep -r --ignore-files --no-hidden -I -n -i ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT ./,0.1773686408996582,241,
|
||||
subtitles_en_literal,1,3,rg,rg Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.1213979721069336,830,
|
||||
subtitles_en_literal,1,3,rg,rg Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.1213991641998291,830,
|
||||
subtitles_en_literal,1,3,rg,rg Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.12620782852172852,830,
|
||||
subtitles_en_literal,1,3,rg (no mmap),rg --no-mmap Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18207263946533203,830,
|
||||
subtitles_en_literal,1,3,rg (no mmap),rg --no-mmap Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.17281484603881836,830,
|
||||
subtitles_en_literal,1,3,rg (no mmap),rg --no-mmap Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.17368507385253906,830,
|
||||
subtitles_en_literal,1,3,grep,grep Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.560560941696167,830,LC_ALL=C
|
||||
subtitles_en_literal,1,3,grep,grep Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.563499927520752,830,LC_ALL=C
|
||||
subtitles_en_literal,1,3,grep,grep Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.5916609764099121,830,LC_ALL=C
|
||||
subtitles_en_literal,1,3,rg (lines),rg -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.19600844383239746,830,
|
||||
subtitles_en_literal,1,3,rg (lines),rg -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18436980247497559,830,
|
||||
subtitles_en_literal,1,3,rg (lines),rg -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18594050407409668,830,
|
||||
subtitles_en_literal,1,3,ag (lines),ag -s Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.871025562286377,830,
|
||||
subtitles_en_literal,1,3,ag (lines),ag -s Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.8636960983276367,830,
|
||||
subtitles_en_literal,1,3,ag (lines),ag -s Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.8680994510650635,830,
|
||||
subtitles_en_literal,1,3,grep (lines),grep -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.9978001117706299,830,LC_ALL=C
|
||||
subtitles_en_literal,1,3,grep (lines),grep -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.9385361671447754,830,LC_ALL=C
|
||||
subtitles_en_literal,1,3,grep (lines),grep -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.0036489963531494,830,LC_ALL=C
|
||||
subtitles_en_literal,1,3,ugrep (lines),ugrep -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18918490409851074,830,
|
||||
subtitles_en_literal,1,3,ugrep (lines),ugrep -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.1769108772277832,830,
|
||||
subtitles_en_literal,1,3,ugrep (lines),ugrep -n Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18808293342590332,830,
|
||||
subtitles_en_literal_casei,1,3,rg,rg -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.21876287460327148,871,
|
||||
subtitles_en_literal_casei,1,3,rg,rg -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.2044692039489746,871,
|
||||
subtitles_en_literal_casei,1,3,rg,rg -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.2184743881225586,871,
|
||||
subtitles_en_literal_casei,1,3,grep,grep -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,2.224027156829834,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,3,grep,grep -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,2.223188877105713,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,3,grep,grep -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,2.223966598510742,871,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_casei,1,3,grep (ASCII),grep -E -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.671149492263794,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,3,grep (ASCII),grep -E -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.6705749034881592,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,3,grep (ASCII),grep -E -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.6700258255004883,871,LC_ALL=C
|
||||
subtitles_en_literal_casei,1,3,rg (lines),rg -n -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.2624058723449707,871,
|
||||
subtitles_en_literal_casei,1,3,rg (lines),rg -n -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.25513339042663574,871,
|
||||
subtitles_en_literal_casei,1,3,rg (lines),rg -n -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.26088857650756836,871,
|
||||
subtitles_en_literal_casei,1,3,ag (lines) (ASCII),ag -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.9144322872161865,871,
|
||||
subtitles_en_literal_casei,1,3,ag (lines) (ASCII),ag -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.866628885269165,871,
|
||||
subtitles_en_literal_casei,1,3,ag (lines) (ASCII),ag -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.9098389148712158,871,
|
||||
subtitles_en_literal_casei,1,3,ugrep (lines),ugrep -n -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.7860472202301025,871,
|
||||
subtitles_en_literal_casei,1,3,ugrep (lines),ugrep -n -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.7858343124389648,871,
|
||||
subtitles_en_literal_casei,1,3,ugrep (lines),ugrep -n -i Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.782252311706543,871,
|
||||
subtitles_en_literal_word,1,3,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /dev/shm/benchsuite/subtitles/en.sample.txt,0.18424677848815918,830,
|
||||
subtitles_en_literal_word,1,3,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /dev/shm/benchsuite/subtitles/en.sample.txt,0.19610810279846191,830,
|
||||
subtitles_en_literal_word,1,3,rg (ASCII),rg -n (?-u:\b)Sherlock Holmes(?-u:\b) /dev/shm/benchsuite/subtitles/en.sample.txt,0.18711471557617188,830,
|
||||
subtitles_en_literal_word,1,3,ag (ASCII),ag -sw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.8301315307617188,830,
|
||||
subtitles_en_literal_word,1,3,ag (ASCII),ag -sw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.8689801692962646,830,
|
||||
subtitles_en_literal_word,1,3,ag (ASCII),ag -sw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.8279321193695068,830,
|
||||
subtitles_en_literal_word,1,3,grep (ASCII),grep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.0036842823028564,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,3,grep (ASCII),grep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.002833604812622,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,3,grep (ASCII),grep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.9236147403717041,830,LC_ALL=C
|
||||
subtitles_en_literal_word,1,3,ugrep (ASCII),ugrep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.17717313766479492,830,
|
||||
subtitles_en_literal_word,1,3,ugrep (ASCII),ugrep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18994617462158203,830,
|
||||
subtitles_en_literal_word,1,3,ugrep (ASCII),ugrep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.17972850799560547,830,
|
||||
subtitles_en_literal_word,1,3,rg,rg -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18804550170898438,830,
|
||||
subtitles_en_literal_word,1,3,rg,rg -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.18867778778076172,830,
|
||||
subtitles_en_literal_word,1,3,rg,rg -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.19913530349731445,830,
|
||||
subtitles_en_literal_word,1,3,grep,grep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.0044364929199219,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,3,grep,grep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,1.0040032863616943,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_literal_word,1,3,grep,grep -nw Sherlock Holmes /dev/shm/benchsuite/subtitles/en.sample.txt,0.9627983570098877,830,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate,1,3,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.24848055839538574,1094,
|
||||
subtitles_en_alternate,1,3,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.24738383293151855,1094,
|
||||
subtitles_en_alternate,1,3,rg (lines),rg -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.24789118766784668,1094,
|
||||
subtitles_en_alternate,1,3,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,2.668708562850952,1094,
|
||||
subtitles_en_alternate,1,3,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,2.57511305809021,1094,
|
||||
subtitles_en_alternate,1,3,ag (lines),ag -s Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,2.6714110374450684,1094,
|
||||
subtitles_en_alternate,1,3,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,2.0586187839508057,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,3,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,2.0227150917053223,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,3,grep (lines),grep -E -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,2.075378179550171,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,3,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.7863781452178955,1094,
|
||||
subtitles_en_alternate,1,3,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.7874250411987305,1094,
|
||||
subtitles_en_alternate,1,3,ugrep (lines),ugrep -n Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.7867889404296875,1094,
|
||||
subtitles_en_alternate,1,3,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.18195557594299316,1094,
|
||||
subtitles_en_alternate,1,3,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.18239641189575195,1094,
|
||||
subtitles_en_alternate,1,3,rg,rg Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.1625690460205078,1094,
|
||||
subtitles_en_alternate,1,3,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,1.6601614952087402,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,3,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,1.6617567539215088,1094,LC_ALL=C
|
||||
subtitles_en_alternate,1,3,grep,grep -E Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,1.6584677696228027,1094,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,3,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,4.0028722286224365,1136,
|
||||
subtitles_en_alternate_casei,1,3,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.991217851638794,1136,
|
||||
subtitles_en_alternate_casei,1,3,ag (ASCII),ag -s -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,4.00272274017334,1136,
|
||||
subtitles_en_alternate_casei,1,3,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.549154758453369,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,3,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.5468921661376953,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,3,grep (ASCII),grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.5873491764068604,1136,LC_ALL=C
|
||||
subtitles_en_alternate_casei,1,3,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.7872169017791748,1136,
|
||||
subtitles_en_alternate_casei,1,3,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.784674882888794,1136,
|
||||
subtitles_en_alternate_casei,1,3,ugrep (ASCII),ugrep -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.7882401943206787,1136,
|
||||
subtitles_en_alternate_casei,1,3,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.4785435199737549,1136,
|
||||
subtitles_en_alternate_casei,1,3,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.4940922260284424,1136,
|
||||
subtitles_en_alternate_casei,1,3,rg,rg -n -i Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,0.4774627685546875,1136,
|
||||
subtitles_en_alternate_casei,1,3,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.5677175521850586,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,3,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.603273391723633,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_en_alternate_casei,1,3,grep,grep -E -ni Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty /dev/shm/benchsuite/subtitles/en.sample.txt,3.5834741592407227,1136,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,rg,rg -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.20238041877746582,278,
|
||||
subtitles_ru_surrounding_words,1,3,rg,rg -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.2031264305114746,278,
|
||||
subtitles_ru_surrounding_words,1,3,rg,rg -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.20475172996520996,278,
|
||||
subtitles_ru_surrounding_words,1,3,grep,grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0288453102111816,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,grep,grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.044802188873291,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,grep,grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0432109832763672,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,ugrep,ugrep -an \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,43.00765633583069,278,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep,ugrep -an \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,42.832849740982056,278,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep,ugrep -an \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,42.915205240249634,278,
|
||||
subtitles_ru_surrounding_words,1,3,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.083683967590332,,
|
||||
subtitles_ru_surrounding_words,1,3,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0841526985168457,,
|
||||
subtitles_ru_surrounding_words,1,3,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0850934982299805,,
|
||||
subtitles_ru_surrounding_words,1,3,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0116353034973145,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,3,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.9868073463439941,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,3,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0224814414978027,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,3,ugrep (ASCII),ugrep -a -n -U \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.8892502784729004,,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep (ASCII),ugrep -a -n -U \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.8910088539123535,,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep (ASCII),ugrep -a -n -U \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.8897674083709717,,
|
||||
subtitles_en_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,2.11850643157959,22,
|
||||
subtitles_en_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,2.1359670162200928,22,
|
||||
subtitles_en_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,2.103114128112793,22,
|
||||
subtitles_en_no_literal,1,3,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,13.050881385803223,22,
|
||||
subtitles_en_no_literal,1,3,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,13.050772190093994,22,
|
||||
subtitles_en_no_literal,1,3,ugrep,ugrep -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,13.05719804763794,22,
|
||||
subtitles_en_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,1.9961926937103271,22,
|
||||
subtitles_en_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,2.019721508026123,22,
|
||||
subtitles_en_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,1.9965126514434814,22,
|
||||
subtitles_en_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,6.849602222442627,302,
|
||||
subtitles_en_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,6.813834190368652,302,
|
||||
subtitles_en_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,6.8263633251190186,302,
|
||||
subtitles_en_no_literal,1,3,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,4.42924165725708,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,3,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,4.378557205200195,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,3,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,4.376646518707275,22,LC_ALL=C
|
||||
subtitles_en_no_literal,1,3,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,3.5110037326812744,22,
|
||||
subtitles_en_no_literal,1,3,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,3.5137360095977783,22,
|
||||
subtitles_en_no_literal,1,3,ugrep (ASCII),ugrep -n -U \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/en.sample.txt,3.5051844120025635,22,
|
||||
subtitles_ru_literal,1,3,rg,rg Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.13207745552062988,583,
|
||||
subtitles_ru_literal,1,3,rg,rg Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.13084721565246582,583,
|
||||
subtitles_ru_literal,1,3,rg,rg Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.13469862937927246,583,
|
||||
subtitles_ru_literal,1,3,rg (no mmap),rg --no-mmap Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.18022370338439941,583,
|
||||
subtitles_ru_literal,1,3,rg (no mmap),rg --no-mmap Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.1801767349243164,583,
|
||||
subtitles_ru_literal,1,3,rg (no mmap),rg --no-mmap Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.17995166778564453,583,
|
||||
subtitles_ru_literal,1,3,grep,grep Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.5151040554046631,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,3,grep,grep Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.5154542922973633,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,3,grep,grep Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.49927639961242676,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,3,rg (lines),rg -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.19464492797851562,583,
|
||||
subtitles_ru_literal,1,3,rg (lines),rg -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.18920588493347168,583,
|
||||
subtitles_ru_literal,1,3,rg (lines),rg -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.19465351104736328,583,
|
||||
subtitles_ru_literal,1,3,ag (lines),ag -s Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,1.9595966339111328,583,
|
||||
subtitles_ru_literal,1,3,ag (lines),ag -s Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,2.0014493465423584,583,
|
||||
subtitles_ru_literal,1,3,ag (lines),ag -s Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,1.9567768573760986,583,
|
||||
subtitles_ru_literal,1,3,grep (lines),grep -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8119180202484131,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,3,grep (lines),grep -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8111097812652588,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,3,grep (lines),grep -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8006868362426758,583,LC_ALL=C
|
||||
subtitles_ru_literal,1,3,ugrep (lines),ugrep -a -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.70003342628479,583,
|
||||
subtitles_ru_literal,1,3,ugrep (lines),ugrep -a -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.650275468826294,583,
|
||||
subtitles_ru_literal,1,3,ugrep (lines),ugrep -a -n Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.689772367477417,583,
|
||||
subtitles_ru_literal_casei,1,3,rg,rg -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.267578125,604,
|
||||
subtitles_ru_literal_casei,1,3,rg,rg -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.2665982246398926,604,
|
||||
subtitles_ru_literal_casei,1,3,rg,rg -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.26861572265625,604,
|
||||
subtitles_ru_literal_casei,1,3,grep,grep -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,4.764627456665039,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,3,grep,grep -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,4.767015695571899,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,3,grep,grep -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,4.7688889503479,604,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_casei,1,3,grep (ASCII),grep -E -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.5046737194061279,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,3,grep (ASCII),grep -E -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.5139875411987305,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,3,grep (ASCII),grep -E -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.4993159770965576,583,LC_ALL=C
|
||||
subtitles_ru_literal_casei,1,3,rg (lines),rg -n -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.33438658714294434,604,
|
||||
subtitles_ru_literal_casei,1,3,rg (lines),rg -n -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.3398289680480957,604,
|
||||
subtitles_ru_literal_casei,1,3,rg (lines),rg -n -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.3298227787017822,604,
|
||||
subtitles_ru_literal_casei,1,3,ag (lines) (ASCII),ag -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.4468214511871338,,
|
||||
subtitles_ru_literal_casei,1,3,ag (lines) (ASCII),ag -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.44559574127197266,,
|
||||
subtitles_ru_literal_casei,1,3,ag (lines) (ASCII),ag -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.47882938385009766,,
|
||||
subtitles_ru_literal_casei,1,3,ugrep (lines) (ASCII),ugrep -a -n -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.7039575576782227,583,
|
||||
subtitles_ru_literal_casei,1,3,ugrep (lines) (ASCII),ugrep -a -n -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.6490752696990967,583,
|
||||
subtitles_ru_literal_casei,1,3,ugrep (lines) (ASCII),ugrep -a -n -i Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8081104755401611,583,
|
||||
subtitles_ru_literal_word,1,3,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /dev/shm/benchsuite/subtitles/ru.txt,0.20162224769592285,583,
|
||||
subtitles_ru_literal_word,1,3,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /dev/shm/benchsuite/subtitles/ru.txt,0.18215250968933105,583,
|
||||
subtitles_ru_literal_word,1,3,rg (ASCII),rg -n (?-u:^|\W)Шерлок Холмс(?-u:$|\W) /dev/shm/benchsuite/subtitles/ru.txt,0.20087671279907227,583,
|
||||
subtitles_ru_literal_word,1,3,ag (ASCII),ag -sw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.48624587059020996,,
|
||||
subtitles_ru_literal_word,1,3,ag (ASCII),ag -sw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.5212516784667969,,
|
||||
subtitles_ru_literal_word,1,3,ag (ASCII),ag -sw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.520557165145874,,
|
||||
subtitles_ru_literal_word,1,3,grep (ASCII),grep -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8108196258544922,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,3,grep (ASCII),grep -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8121066093444824,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,3,grep (ASCII),grep -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.7784581184387207,583,LC_ALL=C
|
||||
subtitles_ru_literal_word,1,3,ugrep (ASCII),ugrep -anw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.7469344139099121,583,
|
||||
subtitles_ru_literal_word,1,3,ugrep (ASCII),ugrep -anw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.6838233470916748,583,
|
||||
subtitles_ru_literal_word,1,3,ugrep (ASCII),ugrep -anw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.6921679973602295,583,
|
||||
subtitles_ru_literal_word,1,3,rg,rg -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.19918251037597656,579,
|
||||
subtitles_ru_literal_word,1,3,rg,rg -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.2046656608581543,579,
|
||||
subtitles_ru_literal_word,1,3,rg,rg -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.1984848976135254,579,
|
||||
subtitles_ru_literal_word,1,3,grep,grep -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.794173002243042,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,3,grep,grep -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.7715346813201904,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_literal_word,1,3,grep,grep -nw Шерлок Холмс /dev/shm/benchsuite/subtitles/ru.txt,0.8116705417633057,579,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate,1,3,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,0.6730976104736328,691,
|
||||
subtitles_ru_alternate,1,3,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,0.7020411491394043,691,
|
||||
subtitles_ru_alternate,1,3,rg (lines),rg -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,0.6693949699401855,691,
|
||||
subtitles_ru_alternate,1,3,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,2.7100515365600586,691,
|
||||
subtitles_ru_alternate,1,3,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,2.7458419799804688,691,
|
||||
subtitles_ru_alternate,1,3,ag (lines),ag -s Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,2.7115116119384766,691,
|
||||
subtitles_ru_alternate,1,3,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.703738451004028,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,3,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.715883731842041,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,3,grep (lines),grep -E -n Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.712724924087524,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,3,ugrep (lines),ugrep -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,8.276995420455933,691,
|
||||
subtitles_ru_alternate,1,3,ugrep (lines),ugrep -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,8.304608345031738,691,
|
||||
subtitles_ru_alternate,1,3,ugrep (lines),ugrep -an Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,8.322760820388794,691,
|
||||
subtitles_ru_alternate,1,3,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,0.6119842529296875,691,
|
||||
subtitles_ru_alternate,1,3,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,0.6368775367736816,691,
|
||||
subtitles_ru_alternate,1,3,rg,rg Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,0.6258070468902588,691,
|
||||
subtitles_ru_alternate,1,3,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.4300291538238525,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,3,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.418199300765991,691,LC_ALL=C
|
||||
subtitles_ru_alternate,1,3,grep,grep -E Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.425868511199951,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,3,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,2.7216460704803467,691,
|
||||
subtitles_ru_alternate_casei,1,3,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,2.7108607292175293,691,
|
||||
subtitles_ru_alternate_casei,1,3,ag (ASCII),ag -s -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,2.747138500213623,691,
|
||||
subtitles_ru_alternate_casei,1,3,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.711230039596558,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,3,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.709407329559326,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,3,grep (ASCII),grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.714034557342529,691,LC_ALL=C
|
||||
subtitles_ru_alternate_casei,1,3,ugrep (ASCII),ugrep -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,8.305904626846313,691,
|
||||
subtitles_ru_alternate_casei,1,3,ugrep (ASCII),ugrep -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,8.307406187057495,691,
|
||||
subtitles_ru_alternate_casei,1,3,ugrep (ASCII),ugrep -ani Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,8.288233995437622,691,
|
||||
subtitles_ru_alternate_casei,1,3,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,3.673624277114868,735,
|
||||
subtitles_ru_alternate_casei,1,3,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,3.6759188175201416,735,
|
||||
subtitles_ru_alternate_casei,1,3,rg,rg -n -i Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,3.66877818107605,735,
|
||||
subtitles_ru_alternate_casei,1,3,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.366282224655151,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,3,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.370524883270264,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_alternate_casei,1,3,grep,grep -E -ni Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти /dev/shm/benchsuite/subtitles/ru.txt,5.342163324356079,735,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,rg,rg -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.20331382751464844,278,
|
||||
subtitles_ru_surrounding_words,1,3,rg,rg -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.2034592628479004,278,
|
||||
subtitles_ru_surrounding_words,1,3,rg,rg -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.20407724380493164,278,
|
||||
subtitles_ru_surrounding_words,1,3,grep,grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0436389446258545,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,grep,grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0388383865356445,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,grep,grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0446207523345947,278,LC_ALL=en_US.UTF-8
|
||||
subtitles_ru_surrounding_words,1,3,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.29245424270629883,1,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.29168128967285156,1,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep,ugrep -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.29593825340270996,1,
|
||||
subtitles_ru_surrounding_words,1,3,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.085604190826416,,
|
||||
subtitles_ru_surrounding_words,1,3,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.083526372909546,,
|
||||
subtitles_ru_surrounding_words,1,3,ag (ASCII),ag -s \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.1223819255828857,,
|
||||
subtitles_ru_surrounding_words,1,3,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.9905192852020264,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,3,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0222513675689697,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,3,grep (ASCII),grep -E -n \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,1.0216262340545654,,LC_ALL=C
|
||||
subtitles_ru_surrounding_words,1,3,ugrep (ASCII),ugrep -a -n -U \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.8875806331634521,,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep (ASCII),ugrep -a -n -U \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.8861405849456787,,
|
||||
subtitles_ru_surrounding_words,1,3,ugrep (ASCII),ugrep -a -n -U \w+\s+Холмс\s+\w+ /dev/shm/benchsuite/subtitles/ru.txt,0.8898241519927979,,
|
||||
subtitles_ru_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,2.237398147583008,41,
|
||||
subtitles_ru_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,2.253706693649292,41,
|
||||
subtitles_ru_no_literal,1,3,rg,rg -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,2.2161178588867188,41,
|
||||
subtitles_ru_no_literal,1,3,ugrep,ugrep -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,28.85959553718567,41,
|
||||
subtitles_ru_no_literal,1,3,ugrep,ugrep -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,28.666419982910156,41,
|
||||
subtitles_ru_no_literal,1,3,ugrep,ugrep -an \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,28.90555214881897,41,
|
||||
subtitles_ru_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,2.051813840866089,,
|
||||
subtitles_ru_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,2.026675224304199,,
|
||||
subtitles_ru_no_literal,1,3,rg (ASCII),rg -n (?-u)\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,2.027498245239258,,
|
||||
subtitles_ru_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,1.0998010635375977,,
|
||||
subtitles_ru_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,1.0900018215179443,,
|
||||
subtitles_ru_no_literal,1,3,ag (ASCII),ag -s \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,1.0901548862457275,,
|
||||
subtitles_ru_no_literal,1,3,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,1.0691263675689697,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,3,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,1.0875153541564941,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,3,grep (ASCII),grep -E -n \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,1.0997354984283447,,LC_ALL=C
|
||||
subtitles_ru_no_literal,1,3,ugrep (ASCII),ugrep -anU \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,0.8329172134399414,,
|
||||
subtitles_ru_no_literal,1,3,ugrep (ASCII),ugrep -anU \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,0.8292679786682129,,
|
||||
subtitles_ru_no_literal,1,3,ugrep (ASCII),ugrep -anU \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5} /dev/shm/benchsuite/subtitles/ru.txt,0.8326950073242188,,
|
||||
|
208
benchsuite/runs/2022-12-16-archlinux-duff/summary
Normal file
208
benchsuite/runs/2022-12-16-archlinux-duff/summary
Normal file
@@ -0,0 +1,208 @@
|
||||
linux_literal_default (pattern: PM_RESUME)
|
||||
------------------------------------------
|
||||
rg* 0.084 +/- 0.002 (lines: 39)*
|
||||
ag 0.295 +/- 0.001 (lines: 39)
|
||||
git grep 0.225 +/- 0.007 (lines: 39)
|
||||
ugrep 0.105 +/- 0.002 (lines: 39)
|
||||
grep 0.996 +/- 0.003 (lines: 39)
|
||||
|
||||
linux_literal (pattern: PM_RESUME)
|
||||
----------------------------------
|
||||
rg* 0.085 +/- 0.001 (lines: 39)*
|
||||
rg (mmap) 0.322 +/- 0.002 (lines: 39)
|
||||
ag (mmap) 0.290 +/- 0.002 (lines: 39)
|
||||
git grep 0.211 +/- 0.009 (lines: 39)
|
||||
ugrep 0.189 +/- 0.005 (lines: 39)
|
||||
|
||||
linux_literal_casei (pattern: PM_RESUME)
|
||||
----------------------------------------
|
||||
rg* 0.088 +/- 0.001 (lines: 536)*
|
||||
rg (mmap) 0.314 +/- 0.007 (lines: 536)
|
||||
ag (mmap) 0.299 +/- 0.001 (lines: 536)
|
||||
git grep 0.214 +/- 0.007 (lines: 536)
|
||||
ugrep 0.174 +/- 0.001 (lines: 536)
|
||||
|
||||
linux_re_literal_suffix (pattern: [A-Z]+_RESUME)
|
||||
------------------------------------------------
|
||||
rg* 0.085 +/- 0.000 (lines: 2160)*
|
||||
ag 0.369 +/- 0.009 (lines: 2160)
|
||||
git grep 0.915 +/- 0.048 (lines: 2160)
|
||||
ugrep 0.433 +/- 0.025 (lines: 2160)
|
||||
|
||||
linux_word (pattern: PM_RESUME)
|
||||
-------------------------------
|
||||
rg* 0.085 +/- 0.001 (lines: 9)*
|
||||
ag 0.287 +/- 0.001 (lines: 9)
|
||||
git grep 0.206 +/- 0.002 (lines: 9)
|
||||
ugrep 0.189 +/- 0.002 (lines: 9)
|
||||
|
||||
linux_unicode_greek (pattern: \p{Greek})
|
||||
----------------------------------------
|
||||
rg 0.201 +/- 0.005 (lines: 105)
|
||||
ugrep* 0.181 +/- 0.005 (lines: 105)*
|
||||
|
||||
linux_unicode_greek_casei (pattern: \p{Greek})
|
||||
----------------------------------------------
|
||||
rg 0.198 +/- 0.000 (lines: 245)
|
||||
ugrep* 0.179 +/- 0.003 (lines: 105)*
|
||||
|
||||
linux_unicode_word (pattern: \wAh)
|
||||
----------------------------------
|
||||
rg 0.085 +/- 0.000 (lines: 247)
|
||||
rg (ASCII)* 0.085 +/- 0.000 (lines: 233)*
|
||||
ag (ASCII) 0.301 +/- 0.005 (lines: 233)
|
||||
git grep 3.980 +/- 0.241 (lines: 247)
|
||||
git grep (ASCII) 1.620 +/- 0.032 (lines: 233)
|
||||
ugrep 0.177 +/- 0.003 (lines: 247)
|
||||
ugrep (ASCII) 0.185 +/- 0.005 (lines: 233)
|
||||
|
||||
linux_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
-----------------------------------------------------------------
|
||||
rg 0.266 +/- 0.006 (lines: 721)
|
||||
rg (ASCII)* 0.200 +/- 0.001 (lines: 720)*
|
||||
ag (ASCII) 0.832 +/- 0.007 (lines: 1134)
|
||||
git grep 7.346 +/- 0.017 (lines: 721)
|
||||
git grep (ASCII) 2.144 +/- 0.014 (lines: 720)
|
||||
ugrep 3.403 +/- 0.008 (lines: 723)
|
||||
ugrep (ASCII) 0.236 +/- 0.003 (lines: 722)
|
||||
|
||||
linux_alternates (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------
|
||||
rg* 0.087 +/- 0.000 (lines: 140)*
|
||||
ag 0.330 +/- 0.002 (lines: 140)
|
||||
git grep 0.414 +/- 0.047 (lines: 140)
|
||||
ugrep 0.179 +/- 0.002 (lines: 140)
|
||||
|
||||
linux_alternates_casei (pattern: ERR_SYS|PME_TURN_OFF|LINK_REQ_RST|CFG_BME_EVT)
|
||||
-------------------------------------------------------------------------------
|
||||
rg* 0.123 +/- 0.001 (lines: 241)*
|
||||
ag 0.530 +/- 0.001 (lines: 241)
|
||||
git grep 0.792 +/- 0.036 (lines: 241)
|
||||
ugrep 0.177 +/- 0.003 (lines: 241)
|
||||
|
||||
subtitles_en_literal (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------
|
||||
rg* 0.123 +/- 0.003 (lines: 830)*
|
||||
rg (no mmap) 0.176 +/- 0.005 (lines: 830)
|
||||
grep 0.572 +/- 0.017 (lines: 830)
|
||||
rg (lines) 0.189 +/- 0.006 (lines: 830)
|
||||
ag (lines) 1.868 +/- 0.004 (lines: 830)
|
||||
grep (lines) 0.980 +/- 0.036 (lines: 830)
|
||||
ugrep (lines) 0.185 +/- 0.007 (lines: 830)
|
||||
|
||||
subtitles_en_literal_casei (pattern: Sherlock Holmes)
|
||||
-----------------------------------------------------
|
||||
rg* 0.214 +/- 0.008 (lines: 871)*
|
||||
grep 2.224 +/- 0.000 (lines: 871)
|
||||
grep (ASCII) 0.671 +/- 0.001 (lines: 871)
|
||||
rg (lines) 0.259 +/- 0.004 (lines: 871)
|
||||
ag (lines) (ASCII) 1.897 +/- 0.026 (lines: 871)
|
||||
ugrep (lines) 0.785 +/- 0.002 (lines: 871)
|
||||
|
||||
subtitles_en_literal_word (pattern: Sherlock Holmes)
|
||||
----------------------------------------------------
|
||||
rg (ASCII) 0.189 +/- 0.006 (lines: 830)
|
||||
ag (ASCII) 1.842 +/- 0.023 (lines: 830)
|
||||
grep (ASCII) 0.977 +/- 0.046 (lines: 830)
|
||||
ugrep (ASCII)* 0.182 +/- 0.007 (lines: 830)*
|
||||
rg 0.192 +/- 0.006 (lines: 830)
|
||||
grep 0.990 +/- 0.024 (lines: 830)
|
||||
|
||||
subtitles_en_alternate (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 0.248 +/- 0.001 (lines: 1094)
|
||||
ag (lines) 2.638 +/- 0.055 (lines: 1094)
|
||||
grep (lines) 2.052 +/- 0.027 (lines: 1094)
|
||||
ugrep (lines) 0.787 +/- 0.001 (lines: 1094)
|
||||
rg* 0.176 +/- 0.011 (lines: 1094)*
|
||||
grep 1.660 +/- 0.002 (lines: 1094)
|
||||
|
||||
subtitles_en_alternate_casei (pattern: Sherlock Holmes|John Watson|Irene Adler|Inspector Lestrade|Professor Moriarty)
|
||||
---------------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII) 3.999 +/- 0.007 (lines: 1136)
|
||||
grep (ASCII) 3.561 +/- 0.023 (lines: 1136)
|
||||
ugrep (ASCII) 0.787 +/- 0.002 (lines: 1136)
|
||||
rg* 0.483 +/- 0.009 (lines: 1136)*
|
||||
grep 3.585 +/- 0.018 (lines: 1136)
|
||||
|
||||
subtitles_en_surrounding_words (pattern: \w+\s+Holmes\s+\w+)
|
||||
------------------------------------------------------------
|
||||
rg 0.200 +/- 0.001 (lines: 483)
|
||||
grep 1.303 +/- 0.040 (lines: 483)
|
||||
ugrep 43.220 +/- 0.047 (lines: 483)
|
||||
rg (ASCII)* 0.197 +/- 0.000 (lines: 483)*
|
||||
ag (ASCII) 5.223 +/- 0.056 (lines: 489)
|
||||
grep (ASCII) 1.316 +/- 0.043 (lines: 483)
|
||||
ugrep (ASCII) 17.647 +/- 0.219 (lines: 483)
|
||||
|
||||
subtitles_en_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 2.119 +/- 0.016 (lines: 22)
|
||||
ugrep 13.053 +/- 0.004 (lines: 22)
|
||||
rg (ASCII)* 2.004 +/- 0.013 (lines: 22)*
|
||||
ag (ASCII) 6.830 +/- 0.018 (lines: 302)
|
||||
grep (ASCII) 4.395 +/- 0.030 (lines: 22)
|
||||
ugrep (ASCII) 3.510 +/- 0.004 (lines: 22)
|
||||
|
||||
subtitles_ru_literal (pattern: Шерлок Холмс)
|
||||
--------------------------------------------
|
||||
rg* 0.133 +/- 0.002 (lines: 583)*
|
||||
rg (no mmap) 0.180 +/- 0.000 (lines: 583)
|
||||
grep 0.510 +/- 0.009 (lines: 583)
|
||||
rg (lines) 0.193 +/- 0.003 (lines: 583)
|
||||
ag (lines) 1.973 +/- 0.025 (lines: 583)
|
||||
grep (lines) 0.808 +/- 0.006 (lines: 583)
|
||||
ugrep (lines) 0.680 +/- 0.026 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_casei (pattern: Шерлок Холмс)
|
||||
--------------------------------------------------
|
||||
rg* 0.268 +/- 0.001 (lines: 604)*
|
||||
grep 4.767 +/- 0.002 (lines: 604)
|
||||
grep (ASCII) 0.506 +/- 0.007 (lines: 583)
|
||||
rg (lines) 0.335 +/- 0.005 (lines: 604)
|
||||
ag (lines) (ASCII) 0.457 +/- 0.019 (lines: 0)
|
||||
ugrep (lines) (ASCII) 0.720 +/- 0.081 (lines: 583)
|
||||
|
||||
subtitles_ru_literal_word (pattern: Шерлок Холмс)
|
||||
-------------------------------------------------
|
||||
rg (ASCII)* 0.195 +/- 0.011 (lines: 583)*
|
||||
ag (ASCII) 0.509 +/- 0.020 (lines: 0)
|
||||
grep (ASCII) 0.800 +/- 0.019 (lines: 583)
|
||||
ugrep (ASCII) 0.708 +/- 0.034 (lines: 583)
|
||||
rg 0.201 +/- 0.003 (lines: 579)
|
||||
grep 0.792 +/- 0.020 (lines: 579)
|
||||
|
||||
subtitles_ru_alternate (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------
|
||||
rg (lines) 0.682 +/- 0.018 (lines: 691)
|
||||
ag (lines) 2.722 +/- 0.020 (lines: 691)
|
||||
grep (lines) 5.711 +/- 0.006 (lines: 691)
|
||||
ugrep (lines) 8.301 +/- 0.023 (lines: 691)
|
||||
rg* 0.625 +/- 0.012 (lines: 691)*
|
||||
grep 5.425 +/- 0.006 (lines: 691)
|
||||
|
||||
subtitles_ru_alternate_casei (pattern: Шерлок Холмс|Джон Уотсон|Ирен Адлер|инспектор Лестрейд|профессор Мориарти)
|
||||
-----------------------------------------------------------------------------------------------------------------
|
||||
ag (ASCII)* 2.727 +/- 0.019 (lines: 691)*
|
||||
grep (ASCII) 5.712 +/- 0.002 (lines: 691)
|
||||
ugrep (ASCII) 8.301 +/- 0.011 (lines: 691)
|
||||
rg 3.673 +/- 0.004 (lines: 735)
|
||||
grep 5.360 +/- 0.015 (lines: 735)
|
||||
|
||||
subtitles_ru_surrounding_words (pattern: \w+\s+Холмс\s+\w+)
|
||||
-----------------------------------------------------------
|
||||
rg* 0.203 +/- 0.001 (lines: 278)*
|
||||
grep 1.039 +/- 0.009 (lines: 278)
|
||||
ugrep 42.919 +/- 0.087 (lines: 278)
|
||||
ag (ASCII) 1.084 +/- 0.001 (lines: 0)
|
||||
grep (ASCII) 1.007 +/- 0.018 (lines: 0)
|
||||
ugrep (ASCII) 0.890 +/- 0.001 (lines: 0)
|
||||
|
||||
subtitles_ru_no_literal (pattern: \w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5}\s+\w{5})
|
||||
----------------------------------------------------------------------------------------
|
||||
rg 2.236 +/- 0.019 (lines: 41)
|
||||
ugrep 28.811 +/- 0.127 (lines: 41)
|
||||
rg (ASCII) 2.035 +/- 0.014 (lines: 0)
|
||||
ag (ASCII) 1.093 +/- 0.006 (lines: 0)
|
||||
grep (ASCII) 1.085 +/- 0.015 (lines: 0)
|
||||
ugrep (ASCII)* 0.832 +/- 0.002 (lines: 0)*
|
||||
90
build.rs
90
build.rs
@@ -1,8 +1,3 @@
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use std::env;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{self, Read, Write};
|
||||
@@ -14,7 +9,7 @@ use clap::Shell;
|
||||
use app::{RGArg, RGArgKind};
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[path = "src/app.rs"]
|
||||
#[path = "crates/core/app.rs"]
|
||||
mod app;
|
||||
|
||||
fn main() {
|
||||
@@ -26,7 +21,8 @@ fn main() {
|
||||
eprintln!(
|
||||
"OUT_DIR environment variable not defined. \
|
||||
Please file a bug: \
|
||||
https://github.com/BurntSushi/ripgrep/issues/new");
|
||||
https://github.com/BurntSushi/ripgrep/issues/new"
|
||||
);
|
||||
process::exit(1);
|
||||
}
|
||||
};
|
||||
@@ -58,12 +54,62 @@ fn git_revision_hash() -> Option<String> {
|
||||
let result = process::Command::new("git")
|
||||
.args(&["rev-parse", "--short=10", "HEAD"])
|
||||
.output();
|
||||
result.ok().map(|output| {
|
||||
String::from_utf8_lossy(&output.stdout).trim().to_string()
|
||||
result.ok().and_then(|output| {
|
||||
let v = String::from_utf8_lossy(&output.stdout).trim().to_string();
|
||||
if v.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(v)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn generate_man_page<P: AsRef<Path>>(outdir: P) -> io::Result<()> {
|
||||
// If asciidoctor isn't installed, fallback to asciidoc.
|
||||
if let Err(err) = process::Command::new("asciidoctor").output() {
|
||||
eprintln!(
|
||||
"Could not run 'asciidoctor' binary, falling back to 'a2x'."
|
||||
);
|
||||
eprintln!("Error from running 'asciidoctor': {}", err);
|
||||
return legacy_generate_man_page::<P>(outdir);
|
||||
}
|
||||
// 1. Read asciidoctor template.
|
||||
// 2. Interpolate template with auto-generated docs.
|
||||
// 3. Save interpolation to disk.
|
||||
// 4. Use asciidoctor to convert to man page.
|
||||
let outdir = outdir.as_ref();
|
||||
let cwd = env::current_dir()?;
|
||||
let tpl_path = cwd.join("doc").join("rg.1.txt.tpl");
|
||||
let txt_path = outdir.join("rg.1.txt");
|
||||
|
||||
let mut tpl = String::new();
|
||||
File::open(&tpl_path)?.read_to_string(&mut tpl)?;
|
||||
let options =
|
||||
formatted_options()?.replace("{", "{").replace("}", "}");
|
||||
tpl = tpl.replace("{OPTIONS}", &options);
|
||||
|
||||
let githash = git_revision_hash();
|
||||
let githash = githash.as_ref().map(|x| &**x);
|
||||
tpl = tpl.replace("{VERSION}", &app::long_version(githash, false));
|
||||
|
||||
File::create(&txt_path)?.write_all(tpl.as_bytes())?;
|
||||
let result = process::Command::new("asciidoctor")
|
||||
.arg("--doctype")
|
||||
.arg("manpage")
|
||||
.arg("--backend")
|
||||
.arg("manpage")
|
||||
.arg(&txt_path)
|
||||
.spawn()?
|
||||
.wait()?;
|
||||
if !result.success() {
|
||||
let msg =
|
||||
format!("'asciidoctor' failed with exit code {:?}", result.code());
|
||||
return Err(ioerr(msg));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn legacy_generate_man_page<P: AsRef<Path>>(outdir: P) -> io::Result<()> {
|
||||
// If asciidoc isn't installed, then don't do anything.
|
||||
if let Err(err) = process::Command::new("a2x").output() {
|
||||
eprintln!("Could not run 'a2x' binary, skipping man page generation.");
|
||||
@@ -85,13 +131,15 @@ fn generate_man_page<P: AsRef<Path>>(outdir: P) -> io::Result<()> {
|
||||
|
||||
let githash = git_revision_hash();
|
||||
let githash = githash.as_ref().map(|x| &**x);
|
||||
tpl = tpl.replace("{VERSION}", &app::long_version(githash));
|
||||
tpl = tpl.replace("{VERSION}", &app::long_version(githash, false));
|
||||
|
||||
File::create(&txt_path)?.write_all(tpl.as_bytes())?;
|
||||
let result = process::Command::new("a2x")
|
||||
.arg("--no-xmllint")
|
||||
.arg("--doctype").arg("manpage")
|
||||
.arg("--format").arg("manpage")
|
||||
.arg("--doctype")
|
||||
.arg("manpage")
|
||||
.arg("--format")
|
||||
.arg("manpage")
|
||||
.arg(&txt_path)
|
||||
.spawn()?
|
||||
.wait()?;
|
||||
@@ -114,7 +162,7 @@ fn formatted_options() -> io::Result<String> {
|
||||
// ripgrep only has two positional arguments, and probably will only
|
||||
// ever have two positional arguments, so we just hardcode them into
|
||||
// the template.
|
||||
if let app::RGArgKind::Positional{..} = arg.kind {
|
||||
if let app::RGArgKind::Positional { .. } = arg.kind {
|
||||
continue;
|
||||
}
|
||||
formatted.push(formatted_arg(&arg)?);
|
||||
@@ -124,7 +172,9 @@ fn formatted_options() -> io::Result<String> {
|
||||
|
||||
fn formatted_arg(arg: &RGArg) -> io::Result<String> {
|
||||
match arg.kind {
|
||||
RGArgKind::Positional{..} => panic!("unexpected positional argument"),
|
||||
RGArgKind::Positional { .. } => {
|
||||
panic!("unexpected positional argument")
|
||||
}
|
||||
RGArgKind::Switch { long, short, multiple } => {
|
||||
let mut out = vec![];
|
||||
|
||||
@@ -163,7 +213,17 @@ fn formatted_arg(arg: &RGArg) -> io::Result<String> {
|
||||
}
|
||||
|
||||
fn formatted_doc_txt(arg: &RGArg) -> io::Result<String> {
|
||||
let paragraphs: Vec<&str> = arg.doc_long.split("\n\n").collect();
|
||||
let paragraphs: Vec<String> = arg
|
||||
.doc_long
|
||||
.replace("{", "{")
|
||||
.replace("}", r"}")
|
||||
// Hack to render ** literally in man page correctly. We can't put
|
||||
// these crazy +++ in the help text directly, since that shows
|
||||
// literally in --help output.
|
||||
.replace("*-g 'foo/**'*", "*-g +++'foo/**'+++*")
|
||||
.split("\n\n")
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
if paragraphs.is_empty() {
|
||||
return Err(ioerr(format!("missing docs for --{}", arg.name)));
|
||||
}
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# package the build artifacts
|
||||
|
||||
set -ex
|
||||
|
||||
. "$(dirname $0)/utils.sh"
|
||||
|
||||
# Generate artifacts for release
|
||||
mk_artifacts() {
|
||||
if is_ssse3_target; then
|
||||
RUSTFLAGS="-C target-feature=+ssse3" \
|
||||
cargo build --target "$TARGET" --release --features simd-accel
|
||||
else
|
||||
cargo build --target "$TARGET" --release
|
||||
fi
|
||||
}
|
||||
|
||||
mk_tarball() {
|
||||
# When cross-compiling, use the right `strip` tool on the binary.
|
||||
local gcc_prefix="$(gcc_prefix)"
|
||||
# Create a temporary dir that contains our staging area.
|
||||
# $tmpdir/$name is what eventually ends up as the deployed archive.
|
||||
local tmpdir="$(mktemp -d)"
|
||||
local name="${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}"
|
||||
local staging="$tmpdir/$name"
|
||||
mkdir -p "$staging"/{complete,doc}
|
||||
# The deployment directory is where the final archive will reside.
|
||||
# This path is known by the .travis.yml configuration.
|
||||
local out_dir="$(pwd)/deployment"
|
||||
mkdir -p "$out_dir"
|
||||
# Find the correct (most recent) Cargo "out" directory. The out directory
|
||||
# contains shell completion files and the man page.
|
||||
local cargo_out_dir="$(cargo_out_dir "target/$TARGET")"
|
||||
|
||||
# Copy the ripgrep binary and strip it.
|
||||
cp "target/$TARGET/release/rg" "$staging/rg"
|
||||
"${gcc_prefix}strip" "$staging/rg"
|
||||
# Copy the licenses and README.
|
||||
cp {README.md,UNLICENSE,COPYING,LICENSE-MIT} "$staging/"
|
||||
# Copy documentation and man page.
|
||||
cp {CHANGELOG.md,FAQ.md,GUIDE.md} "$staging/doc/"
|
||||
if command -V a2x 2>&1 > /dev/null; then
|
||||
# The man page should only exist if we have asciidoc installed.
|
||||
cp "$cargo_out_dir/rg.1" "$staging/doc/"
|
||||
fi
|
||||
# Copy shell completion files.
|
||||
cp "$cargo_out_dir"/{rg.bash,rg.fish,_rg.ps1} "$staging/complete/"
|
||||
cp complete/_rg "$staging/complete/"
|
||||
|
||||
(cd "$tmpdir" && tar czf "$out_dir/$name.tar.gz" "$name")
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
main() {
|
||||
mk_artifacts
|
||||
mk_tarball
|
||||
}
|
||||
|
||||
main
|
||||
42
ci/build-deb
Executable file
42
ci/build-deb
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
D="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||
|
||||
# This script builds a binary dpkg for Debian based distros. It does not
|
||||
# currently run in CI, and is instead run manually and the resulting dpkg is
|
||||
# uploaded to GitHub via the web UI.
|
||||
#
|
||||
# Note that this requires 'cargo deb', which can be installed with
|
||||
# 'cargo install cargo-deb'.
|
||||
#
|
||||
# This should be run from the root of the ripgrep repo.
|
||||
|
||||
if ! command -V cargo-deb > /dev/null 2>&1; then
|
||||
echo "cargo-deb command missing" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -V asciidoctor > /dev/null 2>&1; then
|
||||
echo "asciidoctor command missing" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 'cargo deb' does not seem to provide a way to specify an asset that is
|
||||
# created at build time, such as ripgrep's man page. To work around this,
|
||||
# we force a debug build, copy out the man page (and shell completions)
|
||||
# produced from that build, put it into a predictable location and then build
|
||||
# the deb, which knows where to look.
|
||||
cargo build
|
||||
|
||||
DEPLOY_DIR=deployment/deb
|
||||
OUT_DIR="$("$D"/cargo-out-dir target/debug/)"
|
||||
mkdir -p "$DEPLOY_DIR"
|
||||
|
||||
# Copy man page and shell completions.
|
||||
cp "$OUT_DIR"/{rg.1,rg.bash,rg.fish} "$DEPLOY_DIR/"
|
||||
cp complete/_rg "$DEPLOY_DIR/"
|
||||
|
||||
# Since we're distributing the dpkg, we don't know whether the user will have
|
||||
# PCRE2 installed, so just do a static build.
|
||||
PCRE2_SYS_STATIC=1 cargo deb --target x86_64-unknown-linux-musl
|
||||
19
ci/cargo-out-dir
Executable file
19
ci/cargo-out-dir
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Finds Cargo's `OUT_DIR` directory from the most recent build.
|
||||
#
|
||||
# This requires one parameter corresponding to the target directory
|
||||
# to search for the build output.
|
||||
|
||||
if [ $# != 1 ]; then
|
||||
echo "Usage: $(basename "$0") <target-dir>" >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# This works by finding the most recent stamp file, which is produced by
|
||||
# every ripgrep build.
|
||||
target_dir="$1"
|
||||
find "$target_dir" -name ripgrep-stamp -print0 \
|
||||
| xargs -0 ls -t \
|
||||
| head -n1 \
|
||||
| xargs dirname
|
||||
24
ci/docker/README.md
Normal file
24
ci/docker/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
These are Docker images used for cross compilation in CI builds (or locally)
|
||||
via the [Cross](https://github.com/rust-embedded/cross) tool.
|
||||
|
||||
The Cross tool actually provides its own Docker images, and all Docker images
|
||||
in this directory are derived from one of them. We provide our own in order
|
||||
to customize the environment. For example, we need to install some things like
|
||||
`asciidoctor` in order to generate man pages. We also install compression tools
|
||||
like `xz` so that tests for the `-z/--search-zip` flag are run.
|
||||
|
||||
If you make a change to a Docker image, then you can re-build it. `cd` into the
|
||||
directory containing the `Dockerfile` and run:
|
||||
|
||||
$ cd x86_64-unknown-linux-musl
|
||||
$ ./build
|
||||
|
||||
At this point, subsequent uses of `cross` will now use your built image since
|
||||
Docker prefers local images over remote images. In order to make these changes
|
||||
stick, they need to be pushed to Docker Hub:
|
||||
|
||||
$ docker push burntsushi/cross:x86_64-unknown-linux-musl
|
||||
|
||||
Of course, only I (BurntSushi) can push to that location. To make `cross` use
|
||||
a different location, then edit `Cross.toml` in the root of this repo to use
|
||||
a different image name for the desired target.
|
||||
4
ci/docker/arm-unknown-linux-gnueabihf/Dockerfile
Normal file
4
ci/docker/arm-unknown-linux-gnueabihf/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM rustembedded/cross:arm-unknown-linux-gnueabihf
|
||||
|
||||
COPY stage/ubuntu-install-packages /
|
||||
RUN /ubuntu-install-packages
|
||||
5
ci/docker/arm-unknown-linux-gnueabihf/build
Executable file
5
ci/docker/arm-unknown-linux-gnueabihf/build
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
mkdir -p stage
|
||||
cp ../../ubuntu-install-packages ./stage/
|
||||
docker build -t burntsushi/cross:arm-unknown-linux-gnueabihf .
|
||||
4
ci/docker/i686-unknown-linux-gnu/Dockerfile
Normal file
4
ci/docker/i686-unknown-linux-gnu/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM rustembedded/cross:i686-unknown-linux-gnu
|
||||
|
||||
COPY stage/ubuntu-install-packages /
|
||||
RUN /ubuntu-install-packages
|
||||
5
ci/docker/i686-unknown-linux-gnu/build
Executable file
5
ci/docker/i686-unknown-linux-gnu/build
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
mkdir -p stage
|
||||
cp ../../ubuntu-install-packages ./stage/
|
||||
docker build -t burntsushi/cross:i686-unknown-linux-gnu .
|
||||
4
ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile
Normal file
4
ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM rustembedded/cross:mips64-unknown-linux-gnuabi64
|
||||
|
||||
COPY stage/ubuntu-install-packages /
|
||||
RUN /ubuntu-install-packages
|
||||
5
ci/docker/mips64-unknown-linux-gnuabi64/build
Executable file
5
ci/docker/mips64-unknown-linux-gnuabi64/build
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
mkdir -p stage
|
||||
cp ../../ubuntu-install-packages ./stage/
|
||||
docker build -t burntsushi/cross:mips64-unknown-linux-gnuabi64 .
|
||||
4
ci/docker/x86_64-unknown-linux-musl/Dockerfile
Normal file
4
ci/docker/x86_64-unknown-linux-musl/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM rustembedded/cross:x86_64-unknown-linux-musl
|
||||
|
||||
COPY stage/ubuntu-install-packages /
|
||||
RUN /ubuntu-install-packages
|
||||
5
ci/docker/x86_64-unknown-linux-musl/build
Executable file
5
ci/docker/x86_64-unknown-linux-musl/build
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
mkdir -p stage
|
||||
cp ../../ubuntu-install-packages ./stage/
|
||||
docker build -t burntsushi/cross:x86_64-unknown-linux-musl .
|
||||
@@ -1,61 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# install stuff needed for the `script` phase
|
||||
|
||||
# Where rustup gets installed.
|
||||
export PATH="$PATH:$HOME/.cargo/bin"
|
||||
|
||||
set -ex
|
||||
|
||||
. "$(dirname $0)/utils.sh"
|
||||
|
||||
install_rustup() {
|
||||
curl https://sh.rustup.rs -sSf \
|
||||
| sh -s -- -y --default-toolchain="$TRAVIS_RUST_VERSION"
|
||||
rustc -V
|
||||
cargo -V
|
||||
}
|
||||
|
||||
install_targets() {
|
||||
if [ $(host) != "$TARGET" ]; then
|
||||
rustup target add $TARGET
|
||||
fi
|
||||
}
|
||||
|
||||
install_osx_dependencies() {
|
||||
if ! is_osx; then
|
||||
return
|
||||
fi
|
||||
|
||||
brew install asciidoc
|
||||
}
|
||||
|
||||
configure_cargo() {
|
||||
local prefix=$(gcc_prefix)
|
||||
if [ -n "${prefix}" ]; then
|
||||
local gcc_suffix=
|
||||
if [ -n "$GCC_VERSION" ]; then
|
||||
gcc_suffix="-$GCC_VERSION"
|
||||
fi
|
||||
local gcc="${prefix}gcc${gcc_suffix}"
|
||||
|
||||
# information about the cross compiler
|
||||
"${gcc}" -v
|
||||
|
||||
# tell cargo which linker to use for cross compilation
|
||||
mkdir -p .cargo
|
||||
cat >>.cargo/config <<EOF
|
||||
[target.$TARGET]
|
||||
linker = "${gcc}"
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
install_osx_dependencies
|
||||
install_rustup
|
||||
install_targets
|
||||
configure_cargo
|
||||
}
|
||||
|
||||
main
|
||||
3
ci/macos-install-packages
Executable file
3
ci/macos-install-packages
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
brew install asciidoctor
|
||||
48
ci/script.sh
48
ci/script.sh
@@ -1,48 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# build, test and generate docs in this phase
|
||||
|
||||
set -ex
|
||||
|
||||
. "$(dirname $0)/utils.sh"
|
||||
|
||||
main() {
|
||||
# Test a normal debug build.
|
||||
cargo build --target "$TARGET" --verbose --all
|
||||
|
||||
# Show the output of the most recent build.rs stderr.
|
||||
set +x
|
||||
stderr="$(find "target/$TARGET/debug" -name stderr -print0 | xargs -0 ls -t | head -n1)"
|
||||
if [ -s "$stderr" ]; then
|
||||
echo "===== $stderr ====="
|
||||
cat "$stderr"
|
||||
echo "====="
|
||||
fi
|
||||
set -x
|
||||
|
||||
# sanity check the file type
|
||||
file target/"$TARGET"/debug/rg
|
||||
|
||||
# Apparently tests don't work on arm, so just bail now. I guess we provide
|
||||
# ARM releases on a best effort basis?
|
||||
if is_arm; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Test that zsh completions are in sync with ripgrep's actual args.
|
||||
"$(dirname "${0}")/test_complete.sh"
|
||||
|
||||
# Check that we've generated man page and other shell completions.
|
||||
outdir="$(cargo_out_dir "target/$TARGET/debug")"
|
||||
file "$outdir/rg.bash"
|
||||
file "$outdir/rg.fish"
|
||||
file "$outdir/_rg.ps1"
|
||||
# N.B. man page isn't generated on ARM cross-compile, but we gave up
|
||||
# long before this anyway.
|
||||
file "$outdir/rg.1"
|
||||
|
||||
# Run tests for ripgrep and all sub-crates.
|
||||
cargo test --target "$TARGET" --verbose --all
|
||||
}
|
||||
|
||||
main
|
||||
@@ -1,70 +1,73 @@
|
||||
#!/usr/bin/env zsh
|
||||
|
||||
emulate zsh -o extended_glob -o no_function_argzero -o no_unset
|
||||
|
||||
##
|
||||
# Compares options in `rg --help` output to options in zsh completion function
|
||||
|
||||
emulate -R zsh
|
||||
setopt extended_glob
|
||||
setopt no_function_argzero
|
||||
setopt no_unset
|
||||
|
||||
get_comp_args() {
|
||||
# Technically there are many options that the completion system sets that
|
||||
# our function may rely on, but we'll trust that we've got it mostly right
|
||||
setopt local_options unset
|
||||
|
||||
# Our completion function recognises a special variable which tells it to
|
||||
# dump the _arguments specs and then just return. But do this in a sub-shell
|
||||
# anyway to avoid any weirdness
|
||||
( _RG_COMPLETE_LIST_ARGS=1 source $1 )
|
||||
return $?
|
||||
}
|
||||
|
||||
main() {
|
||||
local diff
|
||||
local rg="${${0:a}:h}/../target/${TARGET:-}/release/rg"
|
||||
local _rg="${${0:a}:h}/../complete/_rg"
|
||||
local rg="${0:a:h}/../${TARGET_DIR:-target}/release/rg"
|
||||
local _rg="${0:a:h}/../complete/_rg"
|
||||
local -a help_args comp_args
|
||||
|
||||
[[ -e $rg ]] || rg=${rg/%\/release\/rg/\/debug\/rg}
|
||||
|
||||
rg=${rg:a}
|
||||
_rg=${_rg:a}
|
||||
|
||||
[[ -e $rg ]] || {
|
||||
printf >&2 'File not found: %s\n' $rg
|
||||
print -r >&2 "File not found: $rg"
|
||||
return 1
|
||||
}
|
||||
[[ -e $_rg ]] || {
|
||||
printf >&2 'File not found: %s\n' $_rg
|
||||
print -r >&2 "File not found: $_rg"
|
||||
return 1
|
||||
}
|
||||
|
||||
printf 'Comparing options:\n-%s\n+%s\n' $rg $_rg
|
||||
print -rl - 'Comparing options:' "-$rg" "+$_rg"
|
||||
|
||||
# 'Parse' options out of the `--help` output. To prevent false positives we
|
||||
# only look at lines where the first non-white-space character is `-`
|
||||
# only look at lines where the first non-white-space character is `-`, or
|
||||
# where a long option starting with certain letters (see `_rg`) is found.
|
||||
# Occasionally we may have to handle some manually, however
|
||||
help_args=( ${(f)"$(
|
||||
$rg --help |
|
||||
$rg -- '^\s*-' |
|
||||
$rg -io -- '[\t ,](-[a-z0-9]|--[a-z0-9-]+)\b' |
|
||||
tr -d '\t ,' |
|
||||
$rg -i -- '^\s+--?[a-z0-9.]|--[a-z]' |
|
||||
$rg -ior '$1' -- $'[\t /\"\'`.,](-[a-z0-9.]|--[a-z0-9-]+)(,|\\b)' |
|
||||
$rg -v -- --print0 | # False positives
|
||||
sort -u
|
||||
)"} )
|
||||
|
||||
# 'Parse' options out of the completion function
|
||||
comp_args=( ${(f)"$( get_comp_args $_rg )"} )
|
||||
|
||||
# Note that we currently exclude hidden (!...) options; matching these
|
||||
# properly against the `--help` output could be irritating
|
||||
comp_args=( ${comp_args#\(*\)} ) # Strip excluded options
|
||||
comp_args=( ${comp_args#\*} ) # Strip repetition indicator
|
||||
comp_args=( ${comp_args%%-[:[]*} ) # Strip everything after -optname-
|
||||
comp_args=( ${comp_args%%[:+=[]*} ) # Strip everything after other optspecs
|
||||
comp_args=( ${comp_args##[^-]*} ) # Remove non-options
|
||||
|
||||
# This probably isn't necessary, but we should ensure the same order
|
||||
comp_args=( ${(f)"$( printf '%s\n' $comp_args | sort -u )"} )
|
||||
comp_args=( ${(f)"$( print -rl - $comp_args | sort -u )"} )
|
||||
|
||||
(( $#help_args )) || {
|
||||
printf >&2 'Failed to get help_args\n'
|
||||
print -r >&2 'Failed to get help_args'
|
||||
return 1
|
||||
}
|
||||
(( $#comp_args )) || {
|
||||
printf >&2 'Failed to get comp_args\n'
|
||||
print -r >&2 'Failed to get comp_args'
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -73,12 +76,12 @@ main() {
|
||||
diff -U2 \
|
||||
--label '`rg --help`' \
|
||||
--label '`_rg`' \
|
||||
=( printf '%s\n' $help_args ) =( printf '%s\n' $comp_args )
|
||||
=( print -rl - $help_args ) =( print -rl - $comp_args )
|
||||
else
|
||||
diff -U2 \
|
||||
-L '`rg --help`' \
|
||||
-L '`_rg`' \
|
||||
=( printf '%s\n' $help_args ) =( printf '%s\n' $comp_args )
|
||||
=( print -rl - $help_args ) =( print -rl - $comp_args )
|
||||
fi
|
||||
)"
|
||||
|
||||
@@ -91,4 +94,4 @@ main() {
|
||||
return 0
|
||||
}
|
||||
|
||||
main "${@}"
|
||||
main "$@"
|
||||
16
ci/ubuntu-install-packages
Executable file
16
ci/ubuntu-install-packages
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script gets run in weird environments that have been stripped of just
|
||||
# about every inessential thing. In order to keep this script versatile, we
|
||||
# just install 'sudo' and use it like normal if it doesn't exist. If it doesn't
|
||||
# exist, we assume we're root. (Otherwise we ain't doing much of anything
|
||||
# anyway.)
|
||||
if ! command -V sudo; then
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends sudo
|
||||
fi
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y --no-install-recommends \
|
||||
asciidoctor \
|
||||
zsh xz-utils liblz4-tool musl-tools \
|
||||
brotli zstd
|
||||
22
ci/utils.sh
22
ci/utils.sh
@@ -55,9 +55,9 @@ gcc_prefix() {
|
||||
esac
|
||||
}
|
||||
|
||||
is_ssse3_target() {
|
||||
case "$(architecture)" in
|
||||
amd64) return 0 ;;
|
||||
is_musl() {
|
||||
case "$TARGET" in
|
||||
*-musl) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
@@ -69,6 +69,13 @@ is_x86() {
|
||||
esac
|
||||
}
|
||||
|
||||
is_x86_64() {
|
||||
case "$(architecture)" in
|
||||
amd64) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_arm() {
|
||||
case "$(architecture)" in
|
||||
armhf) return 0 ;;
|
||||
@@ -89,3 +96,12 @@ is_osx() {
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
builder() {
|
||||
if is_musl && is_x86_64; then
|
||||
cargo install cross
|
||||
echo "cross"
|
||||
else
|
||||
echo "cargo"
|
||||
fi
|
||||
}
|
||||
|
||||
640
complete/_rg
640
complete/_rg
@@ -3,163 +3,392 @@
|
||||
##
|
||||
# zsh completion function for ripgrep
|
||||
#
|
||||
# Run ci/test_complete.sh after building to ensure that the options supported by
|
||||
# Run ci/test-complete after building to ensure that the options supported by
|
||||
# this function stay in synch with the `rg` binary.
|
||||
#
|
||||
# @see https://github.com/zsh-users/zsh/blob/master/Etc/completion-style-guide
|
||||
# For convenience, a completion reference guide is included at the bottom of
|
||||
# this file.
|
||||
#
|
||||
# Based on code from the zsh-users project — see copyright notice below.
|
||||
# Originally based on code from the zsh-users project — see copyright notice
|
||||
# below.
|
||||
|
||||
_rg() {
|
||||
local state_descr ret curcontext="${curcontext:-}"
|
||||
local -a context line state
|
||||
local -A opt_args val_args
|
||||
local -a rg_args
|
||||
local curcontext=$curcontext no='!' descr ret=1
|
||||
local -a context line state state_descr args tmp suf
|
||||
local -A opt_args
|
||||
|
||||
# Sort by long option name to match `rg --help`
|
||||
rg_args=(
|
||||
'(-A -C --after-context --context)'{-A+,--after-context=}'[specify lines to show after each match]:number of lines'
|
||||
'(-B -C --before-context --context)'{-B+,--before-context=}'[specify lines to show before each match]:number of lines'
|
||||
'(-i -s -S --ignore-case --case-sensitive --smart-case)'{-s,--case-sensitive}'[search case-sensitively]'
|
||||
'--color=[specify when to use colors in output]:when:( never auto always ansi )'
|
||||
'*--colors=[specify color settings and styles]: :->colorspec'
|
||||
'--column[show column numbers]'
|
||||
'(-A -B -C --after-context --before-context --context)'{-C+,--context=}'[specify lines to show before and after each match]:number of lines'
|
||||
'--context-separator=[specify string used to separate non-continuous context lines in output]:separator'
|
||||
'(-c --count --passthrough --passthru)'{-c,--count}'[only show count of matches for each file]'
|
||||
'--debug[show debug messages]'
|
||||
'--dfa-size-limit=[specify upper size limit of generated DFA]:DFA size'
|
||||
'(-E --encoding)'{-E+,--encoding=}'[specify text encoding of files to search]: :_rg_encodings'
|
||||
'*'{-f+,--file=}'[specify file containing patterns to search for]:file:_files'
|
||||
"(1)--files[show each file that would be searched (but don't search)]"
|
||||
'(-l --files-with-matches --files-without-match)'{-l,--files-with-matches}'[only show names of files with matches]'
|
||||
'(-l --files-with-matches --files-without-match)--files-without-match[only show names of files without matches]'
|
||||
'(-F --fixed-strings)'{-F,--fixed-strings}'[treat pattern as literal string instead of regular expression]'
|
||||
'(-L --follow)'{-L,--follow}'[follow symlinks]'
|
||||
'*'{-g+,--glob=}'[include or exclude files for searching that match the specified glob]:glob'
|
||||
'(: -)'{-h,--help}'[display help information]'
|
||||
'(-p --no-heading --pretty --vimgrep)--heading[show matches grouped by file name]'
|
||||
'--hidden[search hidden files and directories]'
|
||||
'*--iglob=[include or exclude files for searching that match the specified case-insensitive glob]:glob'
|
||||
'(-i -s -S --case-sensitive --ignore-case --smart-case)'{-i,--ignore-case}'[search case-insensitively]'
|
||||
'--ignore-file=[specify additional ignore file]:file:_files'
|
||||
'(-v --invert-match)'{-v,--invert-match}'[invert matching]'
|
||||
'(-n -N --line-number --no-line-number)'{-n,--line-number}'[show line numbers]'
|
||||
'(-N --no-line-number)--line-number-width=[specify width of displayed line number]:number of columns'
|
||||
'(-w -x --line-regexp --word-regexp)'{-x,--line-regexp}'[only show matches surrounded by line boundaries]'
|
||||
'(-M --max-columns)'{-M+,--max-columns=}'[specify max length of lines to print]:number of bytes'
|
||||
'(-m --max-count)'{-m+,--max-count=}'[specify max number of matches per file]:number of matches'
|
||||
'--max-filesize=[specify size above which files should be ignored]:file size'
|
||||
'--maxdepth=[specify max number of directories to descend]:number of directories'
|
||||
'(--mmap --no-mmap)--mmap[search using memory maps when possible]'
|
||||
'(-H --with-filename --no-filename)--no-filename[suppress all file names]'
|
||||
"(-p --heading --pretty --vimgrep)--no-heading[don't group matches by file name]"
|
||||
"--no-config[don't load configuration files]"
|
||||
"(--no-ignore-parent)--no-ignore[don't respect ignore files]"
|
||||
# ripgrep has many options which negate the effect of a more common one — for
|
||||
# example, `--no-column` to negate `--column`, and `--messages` to negate
|
||||
# `--no-messages`. There are so many of these, and they're so infrequently
|
||||
# used, that some users will probably find it irritating if they're completed
|
||||
# indiscriminately, so let's not do that unless either the current prefix
|
||||
# matches one of those negation options or the user has the `complete-all`
|
||||
# style set. Note that this prefix check has to be updated manually to account
|
||||
# for all of the potential negation options listed below!
|
||||
if
|
||||
# We also want to list all of these options during testing
|
||||
[[ $_RG_COMPLETE_LIST_ARGS == (1|t*|y*) ]] ||
|
||||
# (--[imnp]* => --ignore*, --messages, --no-*, --pcre2-unicode)
|
||||
[[ $PREFIX$SUFFIX == --[imnp]* ]] ||
|
||||
zstyle -t ":complete:$curcontext:*" complete-all
|
||||
then
|
||||
no=
|
||||
fi
|
||||
|
||||
# We make heavy use of argument groups here to prevent the option specs from
|
||||
# growing unwieldy. These aren't supported in zsh <5.4, though, so we'll strip
|
||||
# them out below if necessary. This makes the exclusions inaccurate on those
|
||||
# older versions, but oh well — it's not that big a deal
|
||||
args=(
|
||||
+ '(exclusive)' # Misc. fully exclusive options
|
||||
'(: * -)'{-h,--help}'[display help information]'
|
||||
'(: * -)'{-V,--version}'[display version information]'
|
||||
'(: * -)'--pcre2-version'[print the version of PCRE2 used by ripgrep, if available]'
|
||||
|
||||
+ '(buffered)' # buffering options
|
||||
'--line-buffered[force line buffering]'
|
||||
$no"--no-line-buffered[don't force line buffering]"
|
||||
'--block-buffered[force block buffering]'
|
||||
$no"--no-block-buffered[don't force block buffering]"
|
||||
|
||||
+ '(case)' # Case-sensitivity options
|
||||
{-i,--ignore-case}'[search case-insensitively]'
|
||||
{-s,--case-sensitive}'[search case-sensitively]'
|
||||
{-S,--smart-case}'[search case-insensitively if pattern is all lowercase]'
|
||||
|
||||
+ '(context-a)' # Context (after) options
|
||||
'(context-c)'{-A+,--after-context=}'[specify lines to show after each match]:number of lines'
|
||||
|
||||
+ '(context-b)' # Context (before) options
|
||||
'(context-c)'{-B+,--before-context=}'[specify lines to show before each match]:number of lines'
|
||||
|
||||
+ '(context-c)' # Context (combined) options
|
||||
'(context-a context-b)'{-C+,--context=}'[specify lines to show before and after each match]:number of lines'
|
||||
|
||||
+ '(column)' # Column options
|
||||
'--column[show column numbers for matches]'
|
||||
$no"--no-column[don't show column numbers for matches]"
|
||||
|
||||
+ '(count)' # Counting options
|
||||
{-c,--count}'[only show count of matching lines for each file]'
|
||||
'--count-matches[only show count of individual matches for each file]'
|
||||
'--include-zero[include files with zero matches in summary]'
|
||||
|
||||
+ '(encoding)' # Encoding options
|
||||
{-E+,--encoding=}'[specify text encoding of files to search]: :_rg_encodings'
|
||||
$no'--no-encoding[use default text encoding]'
|
||||
|
||||
+ '(engine)' # Engine choice options
|
||||
'--engine=[select which regex engine to use]:when:((
|
||||
default\:"use default engine"
|
||||
pcre2\:"identical to --pcre2"
|
||||
auto\:"identical to --auto-hybrid-regex"
|
||||
))'
|
||||
|
||||
+ file # File-input options
|
||||
'(1)*'{-f+,--file=}'[specify file containing patterns to search for]: :_files'
|
||||
|
||||
+ '(file-match)' # Files with/without match options
|
||||
'(stats)'{-l,--files-with-matches}'[only show names of files with matches]'
|
||||
'(stats)--files-without-match[only show names of files without matches]'
|
||||
|
||||
+ '(file-name)' # File-name options
|
||||
{-H,--with-filename}'[show file name for matches]'
|
||||
{-I,--no-filename}"[don't show file name for matches]"
|
||||
|
||||
+ '(file-system)' # File system options
|
||||
"--one-file-system[don't descend into directories on other file systems]"
|
||||
$no'--no-one-file-system[descend into directories on other file systems]'
|
||||
|
||||
+ '(fixed)' # Fixed-string options
|
||||
{-F,--fixed-strings}'[treat pattern as literal string instead of regular expression]'
|
||||
$no"--no-fixed-strings[don't treat pattern as literal string]"
|
||||
|
||||
+ '(follow)' # Symlink-following options
|
||||
{-L,--follow}'[follow symlinks]'
|
||||
$no"--no-follow[don't follow symlinks]"
|
||||
|
||||
+ glob # File-glob options
|
||||
'*'{-g+,--glob=}'[include/exclude files matching specified glob]:glob'
|
||||
'*--iglob=[include/exclude files matching specified case-insensitive glob]:glob'
|
||||
|
||||
+ '(glob-case-insensitive)' # File-glob case sensitivity options
|
||||
'--glob-case-insensitive[treat -g/--glob patterns case insensitively]'
|
||||
$no'--no-glob-case-insensitive[treat -g/--glob patterns case sensitively]'
|
||||
|
||||
+ '(heading)' # Heading options
|
||||
'(pretty-vimgrep)--heading[show matches grouped by file name]'
|
||||
"(pretty-vimgrep)--no-heading[don't show matches grouped by file name]"
|
||||
|
||||
+ '(hidden)' # Hidden-file options
|
||||
{-.,--hidden}'[search hidden files and directories]'
|
||||
$no"--no-hidden[don't search hidden files and directories]"
|
||||
|
||||
+ '(hybrid)' # hybrid regex options
|
||||
'--auto-hybrid-regex[dynamically use PCRE2 if necessary]'
|
||||
$no"--no-auto-hybrid-regex[don't dynamically use PCRE2 if necessary]"
|
||||
|
||||
+ '(ignore)' # Ignore-file options
|
||||
"(--no-ignore-global --no-ignore-parent --no-ignore-vcs --no-ignore-dot)--no-ignore[don't respect ignore files]"
|
||||
$no'(--ignore-global --ignore-parent --ignore-vcs --ignore-dot)--ignore[respect ignore files]'
|
||||
|
||||
+ '(ignore-file-case-insensitive)' # Ignore-file case sensitivity options
|
||||
'--ignore-file-case-insensitive[process ignore files case insensitively]'
|
||||
$no'--no-ignore-file-case-insensitive[process ignore files case sensitively]'
|
||||
|
||||
+ '(ignore-exclude)' # Local exclude (ignore)-file options
|
||||
"--no-ignore-exclude[don't respect local exclude (ignore) files]"
|
||||
$no'--ignore-exclude[respect local exclude (ignore) files]'
|
||||
|
||||
+ '(ignore-global)' # Global ignore-file options
|
||||
"--no-ignore-global[don't respect global ignore files]"
|
||||
$no'--ignore-global[respect global ignore files]'
|
||||
|
||||
+ '(ignore-parent)' # Parent ignore-file options
|
||||
"--no-ignore-parent[don't respect ignore files in parent directories]"
|
||||
$no'--ignore-parent[respect ignore files in parent directories]'
|
||||
|
||||
+ '(ignore-vcs)' # VCS ignore-file options
|
||||
"--no-ignore-vcs[don't respect version control ignore files]"
|
||||
'(-n -N --line-number --no-line-number)'{-N,--no-line-number}'[suppress line numbers]'
|
||||
'--no-messages[suppress all error messages]'
|
||||
"(--mmap --no-mmap)--no-mmap[don't search using memory maps]"
|
||||
'(-0 --null)'{-0,--null}'[print NUL byte after file names]'
|
||||
'(-o -r --only-matching --passthrough --passthru --replace)'{-o,--only-matching}'[show only matching part of each line]'
|
||||
'(-c -o -r --count --only-matching --passthrough --replace)--passthru[show both matching and non-matching lines]'
|
||||
'!(-c -o -r --count --only-matching --passthru --replace)--passthrough'
|
||||
'--path-separator=[specify path separator to use when printing file names]:separator'
|
||||
'(-p --heading --no-heading --pretty --vimgrep)'{-p,--pretty}'[alias for --color=always --heading -n]'
|
||||
'(-q --quiet)'{-q,--quiet}'[suppress normal output]'
|
||||
'--regex-size-limit=[specify upper size limit of compiled regex]:regex size'
|
||||
'(1 -f --file)*'{-e+,--regexp=}'[specify pattern]:pattern'
|
||||
'(-c -o -r --count --only-matching --passthrough --passthru --replace)'{-r+,--replace=}'[specify string used to replace matches]:replace string'
|
||||
'(-i -s -S --ignore-case --case-sensitive --smart-case)'{-S,--smart-case}'[search case-insensitively if the pattern is all lowercase]'
|
||||
'(-j --threads)--sort-files[sort results by file path (disables parallelism)]'
|
||||
'(-a --text)'{-a,--text}'[search binary files as if they were text]'
|
||||
'(-j --sort-files --threads)'{-j+,--threads=}'[specify approximate number of threads to use]:number of threads'
|
||||
$no'--ignore-vcs[respect version control ignore files]'
|
||||
|
||||
+ '(require-git)' # git specific settings
|
||||
"--no-require-git[don't require git repository to respect gitignore rules]"
|
||||
$no'--require-git[require git repository to respect gitignore rules]'
|
||||
|
||||
+ '(ignore-dot)' # .ignore options
|
||||
"--no-ignore-dot[don't respect .ignore files]"
|
||||
$no'--ignore-dot[respect .ignore files]'
|
||||
|
||||
+ '(ignore-files)' # custom global ignore file options
|
||||
"--no-ignore-files[don't respect --ignore-file flags]"
|
||||
$no'--ignore-files[respect --ignore-file files]'
|
||||
|
||||
+ '(json)' # JSON options
|
||||
'--json[output results in JSON Lines format]'
|
||||
$no"--no-json[don't output results in JSON Lines format]"
|
||||
|
||||
+ '(line-number)' # Line-number options
|
||||
{-n,--line-number}'[show line numbers for matches]'
|
||||
{-N,--no-line-number}"[don't show line numbers for matches]"
|
||||
|
||||
+ '(line-terminator)' # Line-terminator options
|
||||
'--crlf[use CRLF as line terminator]'
|
||||
$no"--no-crlf[don't use CRLF as line terminator]"
|
||||
'(text)--null-data[use NUL as line terminator]'
|
||||
|
||||
+ '(max-columns-preview)' # max column preview options
|
||||
'--max-columns-preview[show preview for long lines (with -M)]'
|
||||
$no"--no-max-columns-preview[don't show preview for long lines (with -M)]"
|
||||
|
||||
+ '(max-depth)' # Directory-depth options
|
||||
'--max-depth=[specify max number of directories to descend]:number of directories'
|
||||
'!--maxdepth=:number of directories'
|
||||
|
||||
+ '(messages)' # Error-message options
|
||||
'(--no-ignore-messages)--no-messages[suppress some error messages]'
|
||||
$no"--messages[don't suppress error messages affected by --no-messages]"
|
||||
|
||||
+ '(messages-ignore)' # Ignore-error message options
|
||||
"--no-ignore-messages[don't show ignore-file parse error messages]"
|
||||
$no'--ignore-messages[show ignore-file parse error messages]'
|
||||
|
||||
+ '(mmap)' # mmap options
|
||||
'--mmap[search using memory maps when possible]'
|
||||
"--no-mmap[don't search using memory maps]"
|
||||
|
||||
+ '(multiline)' # Multiline options
|
||||
{-U,--multiline}'[permit matching across multiple lines]'
|
||||
$no'(multiline-dotall)--no-multiline[restrict matches to at most one line each]'
|
||||
|
||||
+ '(multiline-dotall)' # Multiline DOTALL options
|
||||
'(--no-multiline)--multiline-dotall[allow "." to match newline (with -U)]'
|
||||
$no"(--no-multiline)--no-multiline-dotall[don't allow \".\" to match newline (with -U)]"
|
||||
|
||||
+ '(only)' # Only-match options
|
||||
{-o,--only-matching}'[show only matching part of each line]'
|
||||
|
||||
+ '(passthru)' # Pass-through options
|
||||
'(--vimgrep)--passthru[show both matching and non-matching lines]'
|
||||
'!(--vimgrep)--passthrough'
|
||||
|
||||
+ '(pcre2)' # PCRE2 options
|
||||
{-P,--pcre2}'[enable matching with PCRE2]'
|
||||
$no'(pcre2-unicode)--no-pcre2[disable matching with PCRE2]'
|
||||
|
||||
+ '(pcre2-unicode)' # PCRE2 Unicode options
|
||||
$no'(--no-pcre2 --no-pcre2-unicode)--pcre2-unicode[enable PCRE2 Unicode mode (with -P)]'
|
||||
'(--no-pcre2 --pcre2-unicode)--no-pcre2-unicode[disable PCRE2 Unicode mode (with -P)]'
|
||||
|
||||
+ '(pre)' # Preprocessing options
|
||||
'(-z --search-zip)--pre=[specify preprocessor utility]:preprocessor utility:_command_names -e'
|
||||
$no'--no-pre[disable preprocessor utility]'
|
||||
|
||||
+ pre-glob # Preprocessing glob options
|
||||
'*--pre-glob[include/exclude files for preprocessing with --pre]'
|
||||
|
||||
+ '(pretty-vimgrep)' # Pretty/vimgrep display options
|
||||
'(heading)'{-p,--pretty}'[alias for --color=always --heading -n]'
|
||||
'(heading passthru)--vimgrep[show results in vim-compatible format]'
|
||||
|
||||
+ regexp # Explicit pattern options
|
||||
'(1 file)*'{-e+,--regexp=}'[specify pattern]:pattern'
|
||||
|
||||
+ '(replace)' # Replacement options
|
||||
{-r+,--replace=}'[specify string used to replace matches]:replace string'
|
||||
|
||||
+ '(sort)' # File-sorting options
|
||||
'(threads)--sort=[sort results in ascending order (disables parallelism)]:sort method:((
|
||||
none\:"no sorting"
|
||||
path\:"sort by file path"
|
||||
modified\:"sort by last modified time"
|
||||
accessed\:"sort by last accessed time"
|
||||
created\:"sort by creation time"
|
||||
))'
|
||||
'(threads)--sortr=[sort results in descending order (disables parallelism)]:sort method:((
|
||||
none\:"no sorting"
|
||||
path\:"sort by file path"
|
||||
modified\:"sort by last modified time"
|
||||
accessed\:"sort by last accessed time"
|
||||
created\:"sort by creation time"
|
||||
))'
|
||||
'!(threads)--sort-files[sort results by file path (disables parallelism)]'
|
||||
|
||||
+ '(stats)' # Statistics options
|
||||
'(--files file-match)--stats[show search statistics]'
|
||||
$no"--no-stats[don't show search statistics]"
|
||||
|
||||
+ '(text)' # Binary-search options
|
||||
{-a,--text}'[search binary files as if they were text]'
|
||||
"--binary[search binary files, don't print binary data]"
|
||||
$no"--no-binary[don't search binary files]"
|
||||
$no"(--null-data)--no-text[don't search binary files as if they were text]"
|
||||
|
||||
+ '(threads)' # Thread-count options
|
||||
'(sort)'{-j+,--threads=}'[specify approximate number of threads to use]:number of threads'
|
||||
|
||||
+ '(trim)' # Trim options
|
||||
'--trim[trim any ASCII whitespace prefix from each line]'
|
||||
$no"--no-trim[don't trim ASCII whitespace prefix from each line]"
|
||||
|
||||
+ type # Type options
|
||||
'*'{-t+,--type=}'[only search files matching specified type]: :_rg_types'
|
||||
'*--type-add=[add new glob for file type]: :->typespec'
|
||||
'*--type-add=[add new glob for specified file type]: :->typespec'
|
||||
'*--type-clear=[clear globs previously defined for specified file type]: :_rg_types'
|
||||
# This should actually be exclusive with everything but other type options
|
||||
'(:)--type-list[show all supported file types and their associated globs]'
|
||||
'*'{-T+,--type-not=}"[don't search files matching specified type]: :_rg_types"
|
||||
'(: *)--type-list[show all supported file types and their associated globs]'
|
||||
'*'{-T+,--type-not=}"[don't search files matching specified file type]: :_rg_types"
|
||||
|
||||
+ '(word-line)' # Whole-word/line match options
|
||||
{-w,--word-regexp}'[only show matches surrounded by word boundaries]'
|
||||
{-x,--line-regexp}'[only show matches surrounded by line boundaries]'
|
||||
|
||||
+ '(unicode)' # Unicode options
|
||||
$no'--unicode[enable Unicode mode]'
|
||||
'--no-unicode[disable Unicode mode]'
|
||||
|
||||
+ '(zip)' # Compression options
|
||||
'(--pre)'{-z,--search-zip}'[search in compressed files]'
|
||||
$no"--no-search-zip[don't search in compressed files]"
|
||||
|
||||
+ misc # Other options — no need to separate these at the moment
|
||||
'(-b --byte-offset)'{-b,--byte-offset}'[show 0-based byte offset for each matching line]'
|
||||
'--color=[specify when to use colors in output]:when:((
|
||||
never\:"never use colors"
|
||||
auto\:"use colors or not based on stdout, TERM, etc."
|
||||
always\:"always use colors"
|
||||
ansi\:"always use ANSI colors (even on Windows)"
|
||||
))'
|
||||
'*--colors=[specify color and style settings]: :->colorspec'
|
||||
'--context-separator=[specify string used to separate non-continuous context lines in output]:separator'
|
||||
$no"--no-context-separator[don't print context separators]"
|
||||
'--debug[show debug messages]'
|
||||
'--field-context-separator[set string to delimit fields in context lines]'
|
||||
'--field-match-separator[set string to delimit fields in matching lines]'
|
||||
'--trace[show more verbose debug messages]'
|
||||
'--dfa-size-limit=[specify upper size limit of generated DFA]:DFA size (bytes)'
|
||||
"(1 stats)--files[show each file that would be searched (but don't search)]"
|
||||
'*--ignore-file=[specify additional ignore file]:ignore file:_files'
|
||||
'(-v --invert-match)'{-v,--invert-match}'[invert matching]'
|
||||
'(-M --max-columns)'{-M+,--max-columns=}'[specify max length of lines to print]:number of bytes'
|
||||
'(-m --max-count)'{-m+,--max-count=}'[specify max number of matches per file]:number of matches'
|
||||
'--max-filesize=[specify size above which files should be ignored]:file size (bytes)'
|
||||
"--no-config[don't load configuration files]"
|
||||
'(-0 --null)'{-0,--null}'[print NUL byte after file names]'
|
||||
'--path-separator=[specify path separator to use when printing file names]:separator'
|
||||
'(-q --quiet)'{-q,--quiet}'[suppress normal output]'
|
||||
'--regex-size-limit=[specify upper size limit of compiled regex]:regex size (bytes)'
|
||||
'*'{-u,--unrestricted}'[reduce level of "smart" searching]'
|
||||
'(: -)'{-V,--version}'[display version information]'
|
||||
'(-p --heading --no-heading --pretty)--vimgrep[show results in vim-compatible format]'
|
||||
'(-H --no-filename --with-filename)'{-H,--with-filename}'[display the file name for matches]'
|
||||
'(-w -x --line-regexp --word-regexp)'{-w,--word-regexp}'[only show matches surrounded by word boundaries]'
|
||||
'(-e -f --file --files --regexp --type-list)1: :_rg_pattern'
|
||||
'(--type-list)*:file:_files'
|
||||
'(-z --search-zip)'{-z,--search-zip}'[search in compressed files]'
|
||||
|
||||
+ operand # Operands
|
||||
'(--files --type-list file regexp)1: :_guard "^-*" pattern'
|
||||
'(--type-list)*: :_files'
|
||||
)
|
||||
|
||||
[[ ${_RG_COMPLETE_LIST_ARGS:-} == (1|t*|y*) ]] && {
|
||||
printf '%s\n' "${rg_args[@]}"
|
||||
# This is used with test-complete to verify that there are no options
|
||||
# listed in the help output that aren't also defined here
|
||||
[[ $_RG_COMPLETE_LIST_ARGS == (1|t*|y*) ]] && {
|
||||
print -rl - $args
|
||||
return 0
|
||||
}
|
||||
|
||||
_arguments -s -S : "${rg_args[@]}" && return 0
|
||||
# Strip out argument groups where unsupported (see above)
|
||||
[[ $ZSH_VERSION == (4|5.<0-3>)(.*)# ]] &&
|
||||
args=( ${(@)args:#(#i)(+|[a-z0-9][a-z0-9_-]#|\([a-z0-9][a-z0-9_-]#\))} )
|
||||
|
||||
while (( $#state )); do
|
||||
case "${state[1]}" in
|
||||
_arguments -C -s -S : $args && ret=0
|
||||
|
||||
case $state in
|
||||
colorspec)
|
||||
# @todo I don't like this because it allows you to do weird things like
|
||||
# `line:line:bg:`. Also, i would like the `compadd -q` behaviour
|
||||
[[ -prefix *:none: ]] && return 1
|
||||
[[ -prefix *:*:*:* ]] && return 1
|
||||
if [[ ${IPREFIX#--*=}$PREFIX == [^:]# ]]; then
|
||||
suf=( -qS: )
|
||||
tmp=(
|
||||
'column:specify coloring for column numbers'
|
||||
'line:specify coloring for line numbers'
|
||||
'match:specify coloring for match text'
|
||||
'path:specify coloring for file names'
|
||||
)
|
||||
descr='color/style type'
|
||||
elif [[ ${IPREFIX#--*=}$PREFIX == (column|line|match|path):[^:]# ]]; then
|
||||
suf=( -qS: )
|
||||
tmp=(
|
||||
'none:clear color/style for type'
|
||||
'bg:specify background color'
|
||||
'fg:specify foreground color'
|
||||
'style:specify text style'
|
||||
)
|
||||
descr='color/style attribute'
|
||||
elif [[ ${IPREFIX#--*=}$PREFIX == [^:]##:(bg|fg):[^:]# ]]; then
|
||||
tmp=( black blue green red cyan magenta yellow white )
|
||||
descr='color name or r,g,b'
|
||||
elif [[ ${IPREFIX#--*=}$PREFIX == [^:]##:style:[^:]# ]]; then
|
||||
tmp=( {,no}bold {,no}intense {,no}underline )
|
||||
descr='style name'
|
||||
else
|
||||
_message -e colorspec 'no more arguments'
|
||||
fi
|
||||
|
||||
_values -S ':' 'color/style type' \
|
||||
'column[specify coloring for column numbers]: :->attribute' \
|
||||
'line[specify coloring for line numbers]: :->attribute' \
|
||||
'match[specify coloring for match text]: :->attribute' \
|
||||
'path[specify color for file names]: :->attribute' && return 0
|
||||
|
||||
[[ "${state}" == 'attribute' ]] &&
|
||||
_values -S ':' 'color/style attribute' \
|
||||
'none[clear color/style for type]' \
|
||||
'bg[specify background color]: :->color' \
|
||||
'fg[specify foreground color]: :->color' \
|
||||
'style[specify text style]: :->style' && return 0
|
||||
|
||||
[[ "${state}" == 'color' ]] &&
|
||||
_values -S ':' 'color value' \
|
||||
black blue green red cyan magenta yellow white && return 0
|
||||
|
||||
[[ "${state}" == 'style' ]] &&
|
||||
_values -S ':' 'style value' \
|
||||
bold nobold intense nointense && return 0
|
||||
(( $#tmp )) && {
|
||||
compset -P '*:'
|
||||
_describe -t colorspec $descr tmp $suf && ret=0
|
||||
}
|
||||
;;
|
||||
|
||||
typespec)
|
||||
if compset -P '[^:]##:include:'; then
|
||||
_sequence -s ',' _rg_types && return 0
|
||||
_sequence -s , _rg_types && ret=0
|
||||
# @todo This bit in particular could be better, but it's a little
|
||||
# complex, and attempting to solve it seems to run us up against a crash
|
||||
# bug — zsh # 40362
|
||||
# bug — zsh # 40362
|
||||
elif compset -P '[^:]##:'; then
|
||||
_message 'glob or include directive' && return 1
|
||||
_message 'glob or include directive' && ret=1
|
||||
elif [[ ! -prefix *:* ]]; then
|
||||
_rg_types -qS ':' && return 0
|
||||
_rg_types -qS : && ret=0
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift state
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# zsh 5.1 refuses to complete options if a 'match-less' operand like our pattern
|
||||
# could be 'completed' instead. We can use _guard() to avoid this problem, but
|
||||
# it introduces another one: zsh won't print the message if we try to complete
|
||||
# the pattern after having passed `--`. To work around *that* problem, we can
|
||||
# use this function to bypass the _guard() when `--` is on the command line.
|
||||
# This is inaccurate (it'd get confused by e.g. `rg -e --`), but zsh's handling
|
||||
# of `--` isn't accurate anyway
|
||||
_rg_pattern() {
|
||||
if (( ${words[(I)--]} )); then
|
||||
_message 'pattern'
|
||||
else
|
||||
_guard '^-*' 'pattern'
|
||||
fi
|
||||
return ret
|
||||
}
|
||||
|
||||
# Complete encodings
|
||||
@@ -192,10 +421,10 @@ _rg_encodings() {
|
||||
shift{-,_}jis csshiftjis {,x-}sjis ms_kanji ms932
|
||||
utf{,-}8 utf-16{,be,le} unicode-1-1-utf-8
|
||||
windows-{31j,874,949,125{0..8}} dos-874 tis-620 ansi_x3.4-1968
|
||||
x-user-defined auto
|
||||
x-user-defined auto none
|
||||
)
|
||||
|
||||
_wanted rg-encodings expl 'encoding' compadd -a "${@}" - _encodings
|
||||
_wanted encodings expl encoding compadd -a "$@" - _encodings
|
||||
}
|
||||
|
||||
# Complete file types
|
||||
@@ -203,12 +432,163 @@ _rg_types() {
|
||||
local -a expl
|
||||
local -aU _types
|
||||
|
||||
_types=( ${${(f)"$( _call_program rg-types rg --type-list )"}%%:*} )
|
||||
_types=( ${(@)${(f)"$( _call_program types rg --type-list )"}%%:*} )
|
||||
|
||||
_wanted rg-types expl 'file type' compadd -a "${@}" - _types
|
||||
_wanted types expl 'file type' compadd -a "$@" - _types
|
||||
}
|
||||
|
||||
_rg "${@}"
|
||||
_rg "$@"
|
||||
|
||||
################################################################################
|
||||
# ZSH COMPLETION REFERENCE
|
||||
#
|
||||
# For the convenience of developers who aren't especially familiar with zsh
|
||||
# completion functions, a brief reference guide follows. This is in no way
|
||||
# comprehensive; it covers just enough of the basic structure, syntax, and
|
||||
# conventions to help someone make simple changes like adding new options. For
|
||||
# more complete documentation regarding zsh completion functions, please see the
|
||||
# following:
|
||||
#
|
||||
# * http://zsh.sourceforge.net/Doc/Release/Completion-System.html
|
||||
# * https://github.com/zsh-users/zsh/blob/master/Etc/completion-style-guide
|
||||
#
|
||||
# OVERVIEW
|
||||
#
|
||||
# Most zsh completion functions are defined in terms of `_arguments`, which is a
|
||||
# shell function that takes a series of argument specifications. The specs for
|
||||
# `rg` are stored in an array, which is common for more complex functions; the
|
||||
# elements of the array are passed to `_arguments` on invocation.
|
||||
#
|
||||
# ARGUMENT-SPECIFICATION SYNTAX
|
||||
#
|
||||
# The following is a contrived example of the argument specs for a simple tool:
|
||||
#
|
||||
# '(: * -)'{-h,--help}'[display help information]'
|
||||
# '(-q -v --quiet --verbose)'{-q,--quiet}'[decrease output verbosity]'
|
||||
# '!(-q -v --quiet --verbose)--silent'
|
||||
# '(-q -v --quiet --verbose)'{-v,--verbose}'[increase output verbosity]'
|
||||
# '--color=[specify when to use colors]:when:(always never auto)'
|
||||
# '*:example file:_files'
|
||||
#
|
||||
# Although there may appear to be six specs here, there are actually nine; we
|
||||
# use brace expansion to combine specs for options that go by multiple names,
|
||||
# like `-q` and `--quiet`. This is customary, and ties in with the fact that zsh
|
||||
# merges completion possibilities together when they have the same description.
|
||||
#
|
||||
# The first line defines the option `-h`/`--help`. With most tools, it isn't
|
||||
# useful to complete anything after `--help` because it effectively overrides
|
||||
# all others; the `(: * -)` at the beginning of the spec tells zsh not to
|
||||
# complete any other operands (`:` and `*`) or options (`-`) after this one has
|
||||
# been used. The `[...]` at the end associates a description with `-h`/`--help`;
|
||||
# as mentioned, zsh will see the identical descriptions and merge these options
|
||||
# together when offering completion possibilities.
|
||||
#
|
||||
# The next line defines `-q`/`--quiet`. Here we don't want to suppress further
|
||||
# completions entirely, but we don't want to offer `-q` if `--quiet` has been
|
||||
# given (since they do the same thing), nor do we want to offer `-v` (since it
|
||||
# doesn't make sense to be quiet and verbose at the same time). We don't need to
|
||||
# tell zsh not to offer `--quiet` a second time, since that's the default
|
||||
# behaviour, but since this line expands to two specs describing `-q` *and*
|
||||
# `--quiet` we do need to explicitly list all of them here.
|
||||
#
|
||||
# The next line defines a hidden option `--silent` — maybe it's a deprecated
|
||||
# synonym for `--quiet`. The leading `!` indicates that zsh shouldn't offer this
|
||||
# option during completion. The benefit of providing a spec for an option that
|
||||
# shouldn't be completed is that, if someone *does* use it, we can correctly
|
||||
# suppress completion of other options afterwards.
|
||||
#
|
||||
# The next line defines `-v`/`--verbose`; this works just like `-q`/`--quiet`.
|
||||
#
|
||||
# The next line defines `--color`. In this example, `--color` doesn't have a
|
||||
# corresponding short option, so we don't need to use brace expansion. Further,
|
||||
# there are no other options it's exclusive with (just itself), so we don't need
|
||||
# to define those at the beginning. However, it does take a mandatory argument.
|
||||
# The `=` at the end of `--color=` indicates that the argument may appear either
|
||||
# like `--color always` or like `--color=always`; this is how most GNU-style
|
||||
# command-line tools work. The corresponding short option would normally use `+`
|
||||
# — for example, `-c+` would allow either `-c always` or `-calways`. For this
|
||||
# option, the arguments are known ahead of time, so we can simply list them in
|
||||
# parentheses at the end (`when` is used as the description for the argument).
|
||||
#
|
||||
# The last line defines an operand (a non-option argument). In this example, the
|
||||
# operand can be used any number of times (the leading `*`), and it should be a
|
||||
# file path, so we tell zsh to call the `_files` function to complete it. The
|
||||
# `example file` in the middle is the description to use for this operand; we
|
||||
# could use a space instead to accept the default provided by `_files`.
|
||||
#
|
||||
# GROUPING ARGUMENT SPECIFICATIONS
|
||||
#
|
||||
# Newer versions of zsh support grouping argument specs together. All specs
|
||||
# following a `+` and then a group name are considered to be members of the
|
||||
# named group. Grouping is useful mostly for organisational purposes; it makes
|
||||
# the relationship between different options more obvious, and makes it easier
|
||||
# to specify exclusions.
|
||||
#
|
||||
# We could rewrite our example above using grouping as follows:
|
||||
#
|
||||
# '(: * -)'{-h,--help}'[display help information]'
|
||||
# '--color=[specify when to use colors]:when:(always never auto)'
|
||||
# '*:example file:_files'
|
||||
# + '(verbosity)'
|
||||
# {-q,--quiet}'[decrease output verbosity]'
|
||||
# '!--silent'
|
||||
# {-v,--verbose}'[increase output verbosity]'
|
||||
#
|
||||
# Here we take advantage of a useful feature of spec grouping — when the group
|
||||
# name is surrounded by parentheses, as in `(verbosity)`, it tells zsh that all
|
||||
# of the options in that group are exclusive with each other. As a result, we
|
||||
# don't need to manually list out the exclusions at the beginning of each
|
||||
# option.
|
||||
#
|
||||
# Groups can also be referred to by name in other argument specs; for example:
|
||||
#
|
||||
# '(xyz)--aaa' '*: :_files'
|
||||
# + xyz --xxx --yyy --zzz
|
||||
#
|
||||
# Here we use the group name `xyz` to tell zsh that `--xxx`, `--yyy`, and
|
||||
# `--zzz` are not to be completed after `--aaa`. This makes the exclusion list
|
||||
# much more compact and reusable.
|
||||
#
|
||||
# CONVENTIONS
|
||||
#
|
||||
# zsh completion functions generally adhere to the following conventions:
|
||||
#
|
||||
# * Use two spaces for indentation
|
||||
# * Combine specs for options with different names using brace expansion
|
||||
# * In combined specs, list the short option first (as in `{-a,--text}`)
|
||||
# * Use `+` or `=` as described above for options that take arguments
|
||||
# * Provide a description for all options, option-arguments, and operands
|
||||
# * Capitalise/punctuate argument descriptions as phrases, not complete
|
||||
# sentences — 'display help information', never 'Display help information.'
|
||||
# (but still capitalise acronyms and proper names)
|
||||
# * Write argument descriptions as verb phrases — 'display x', 'enable y',
|
||||
# 'use z'
|
||||
# * Word descriptions to make it clear when an option expects an argument;
|
||||
# usually this is done with the word 'specify', as in 'specify x' or
|
||||
# 'use specified x')
|
||||
# * Write argument descriptions as tersely as possible — for example, articles
|
||||
# like 'a' and 'the' should be omitted unless it would be confusing
|
||||
#
|
||||
# Other conventions currently used by this function:
|
||||
#
|
||||
# * Order argument specs alphabetically by group name, then option name
|
||||
# * Group options that are directly related, mutually exclusive, or frequently
|
||||
# referenced by other argument specs
|
||||
# * Use only characters in the set [a-z0-9_-] in group names
|
||||
# * Order exclusion lists as follows: short options, long options, groups
|
||||
# * Use American English in descriptions
|
||||
# * Use 'don't' in descriptions instead of 'do not'
|
||||
# * Word descriptions for related options as similarly as possible. For example,
|
||||
# `--foo[enable foo]` and `--no-foo[disable foo]`, or `--foo[use foo]` and
|
||||
# `--no-foo[don't use foo]`
|
||||
# * Word descriptions to make it clear when an option only makes sense with
|
||||
# another option, usually by adding '(with -x)' to the end
|
||||
# * Don't quote strings or variables unnecessarily. When quotes are required,
|
||||
# prefer single-quotes to double-quotes
|
||||
# * Prefix option specs with `$no` when the option serves only to negate the
|
||||
# behaviour of another option that must be provided explicitly by the user.
|
||||
# This prevents rarely used options from cluttering up the completion menu
|
||||
################################################################################
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Copyright (c) 2011 Github zsh-users - http://github.com/zsh-users
|
||||
|
||||
27
crates/cli/Cargo.toml
Normal file
27
crates/cli/Cargo.toml
Normal file
@@ -0,0 +1,27 @@
|
||||
[package]
|
||||
name = "grep-cli"
|
||||
version = "0.1.7" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Utilities for search oriented command line applications.
|
||||
"""
|
||||
documentation = "https://docs.rs/grep-cli"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/cli"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/cli"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "grep", "cli", "utility", "util"]
|
||||
license = "Unlicense OR MIT"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2.11"
|
||||
bstr = "1.1.0"
|
||||
globset = { version = "0.4.10", path = "../globset" }
|
||||
lazy_static = "1.1.0"
|
||||
log = "0.4.5"
|
||||
regex = "1.1"
|
||||
same-file = "1.0.4"
|
||||
termcolor = "1.0.4"
|
||||
|
||||
[target.'cfg(windows)'.dependencies.winapi-util]
|
||||
version = "0.1.1"
|
||||
31
crates/cli/README.md
Normal file
31
crates/cli/README.md
Normal file
@@ -0,0 +1,31 @@
|
||||
grep-cli
|
||||
--------
|
||||
A utility library that provides common routines desired in search oriented
|
||||
command line applications. This includes, but is not limited to, parsing hex
|
||||
escapes, detecting whether stdin is readable and more. To the extent possible,
|
||||
this crate strives for compatibility across Windows, macOS and Linux.
|
||||
|
||||
[](https://github.com/BurntSushi/ripgrep/actions)
|
||||
[](https://crates.io/crates/grep-cli)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/).
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
[https://docs.rs/grep-cli](https://docs.rs/grep-cli)
|
||||
|
||||
**NOTE:** You probably don't want to use this crate directly. Instead, you
|
||||
should prefer the facade defined in the
|
||||
[`grep`](https://docs.rs/grep)
|
||||
crate.
|
||||
|
||||
|
||||
### Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
grep-cli = "0.1"
|
||||
```
|
||||
510
crates/cli/src/decompress.rs
Normal file
510
crates/cli/src/decompress.rs
Normal file
@@ -0,0 +1,510 @@
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
|
||||
use globset::{Glob, GlobSet, GlobSetBuilder};
|
||||
|
||||
use crate::process::{CommandError, CommandReader, CommandReaderBuilder};
|
||||
|
||||
/// A builder for a matcher that determines which files get decompressed.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DecompressionMatcherBuilder {
|
||||
/// The commands for each matching glob.
|
||||
commands: Vec<DecompressionCommand>,
|
||||
/// Whether to include the default matching rules.
|
||||
defaults: bool,
|
||||
}
|
||||
|
||||
/// A representation of a single command for decompressing data
|
||||
/// out-of-proccess.
|
||||
#[derive(Clone, Debug)]
|
||||
struct DecompressionCommand {
|
||||
/// The glob that matches this command.
|
||||
glob: String,
|
||||
/// The command or binary name.
|
||||
bin: PathBuf,
|
||||
/// The arguments to invoke with the command.
|
||||
args: Vec<OsString>,
|
||||
}
|
||||
|
||||
impl Default for DecompressionMatcherBuilder {
|
||||
fn default() -> DecompressionMatcherBuilder {
|
||||
DecompressionMatcherBuilder::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl DecompressionMatcherBuilder {
|
||||
/// Create a new builder for configuring a decompression matcher.
|
||||
pub fn new() -> DecompressionMatcherBuilder {
|
||||
DecompressionMatcherBuilder { commands: vec![], defaults: true }
|
||||
}
|
||||
|
||||
/// Build a matcher for determining how to decompress files.
|
||||
///
|
||||
/// If there was a problem compiling the matcher, then an error is
|
||||
/// returned.
|
||||
pub fn build(&self) -> Result<DecompressionMatcher, CommandError> {
|
||||
let defaults = if !self.defaults {
|
||||
vec![]
|
||||
} else {
|
||||
default_decompression_commands()
|
||||
};
|
||||
let mut glob_builder = GlobSetBuilder::new();
|
||||
let mut commands = vec![];
|
||||
for decomp_cmd in defaults.iter().chain(&self.commands) {
|
||||
let glob = Glob::new(&decomp_cmd.glob).map_err(|err| {
|
||||
CommandError::io(io::Error::new(io::ErrorKind::Other, err))
|
||||
})?;
|
||||
glob_builder.add(glob);
|
||||
commands.push(decomp_cmd.clone());
|
||||
}
|
||||
let globs = glob_builder.build().map_err(|err| {
|
||||
CommandError::io(io::Error::new(io::ErrorKind::Other, err))
|
||||
})?;
|
||||
Ok(DecompressionMatcher { globs, commands })
|
||||
}
|
||||
|
||||
/// When enabled, the default matching rules will be compiled into this
|
||||
/// matcher before any other associations. When disabled, only the
|
||||
/// rules explicitly given to this builder will be used.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn defaults(&mut self, yes: bool) -> &mut DecompressionMatcherBuilder {
|
||||
self.defaults = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Associates a glob with a command to decompress files matching the glob.
|
||||
///
|
||||
/// If multiple globs match the same file, then the most recently added
|
||||
/// glob takes precedence.
|
||||
///
|
||||
/// The syntax for the glob is documented in the
|
||||
/// [`globset` crate](https://docs.rs/globset/#syntax).
|
||||
///
|
||||
/// The `program` given is resolved with respect to `PATH` and turned
|
||||
/// into an absolute path internally before being executed by the current
|
||||
/// platform. Notably, on Windows, this avoids a security problem where
|
||||
/// passing a relative path to `CreateProcess` will automatically search
|
||||
/// the current directory for a matching program. If the program could
|
||||
/// not be resolved, then it is silently ignored and the association is
|
||||
/// dropped. For this reason, callers should prefer `try_associate`.
|
||||
pub fn associate<P, I, A>(
|
||||
&mut self,
|
||||
glob: &str,
|
||||
program: P,
|
||||
args: I,
|
||||
) -> &mut DecompressionMatcherBuilder
|
||||
where
|
||||
P: AsRef<OsStr>,
|
||||
I: IntoIterator<Item = A>,
|
||||
A: AsRef<OsStr>,
|
||||
{
|
||||
let _ = self.try_associate(glob, program, args);
|
||||
self
|
||||
}
|
||||
|
||||
/// Associates a glob with a command to decompress files matching the glob.
|
||||
///
|
||||
/// If multiple globs match the same file, then the most recently added
|
||||
/// glob takes precedence.
|
||||
///
|
||||
/// The syntax for the glob is documented in the
|
||||
/// [`globset` crate](https://docs.rs/globset/#syntax).
|
||||
///
|
||||
/// The `program` given is resolved with respect to `PATH` and turned
|
||||
/// into an absolute path internally before being executed by the current
|
||||
/// platform. Notably, on Windows, this avoids a security problem where
|
||||
/// passing a relative path to `CreateProcess` will automatically search
|
||||
/// the current directory for a matching program. If the program could not
|
||||
/// be resolved, then an error is returned.
|
||||
pub fn try_associate<P, I, A>(
|
||||
&mut self,
|
||||
glob: &str,
|
||||
program: P,
|
||||
args: I,
|
||||
) -> Result<&mut DecompressionMatcherBuilder, CommandError>
|
||||
where
|
||||
P: AsRef<OsStr>,
|
||||
I: IntoIterator<Item = A>,
|
||||
A: AsRef<OsStr>,
|
||||
{
|
||||
let glob = glob.to_string();
|
||||
let bin = resolve_binary(Path::new(program.as_ref()))?;
|
||||
let args =
|
||||
args.into_iter().map(|a| a.as_ref().to_os_string()).collect();
|
||||
self.commands.push(DecompressionCommand { glob, bin, args });
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// A matcher for determining how to decompress files.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DecompressionMatcher {
|
||||
/// The set of globs to match. Each glob has a corresponding entry in
|
||||
/// `commands`. When a glob matches, the corresponding command should be
|
||||
/// used to perform out-of-process decompression.
|
||||
globs: GlobSet,
|
||||
/// The commands for each matching glob.
|
||||
commands: Vec<DecompressionCommand>,
|
||||
}
|
||||
|
||||
impl Default for DecompressionMatcher {
|
||||
fn default() -> DecompressionMatcher {
|
||||
DecompressionMatcher::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl DecompressionMatcher {
|
||||
/// Create a new matcher with default rules.
|
||||
///
|
||||
/// To add more matching rules, build a matcher with
|
||||
/// [`DecompressionMatcherBuilder`](struct.DecompressionMatcherBuilder.html).
|
||||
pub fn new() -> DecompressionMatcher {
|
||||
DecompressionMatcherBuilder::new()
|
||||
.build()
|
||||
.expect("built-in matching rules should always compile")
|
||||
}
|
||||
|
||||
/// Return a pre-built command based on the given file path that can
|
||||
/// decompress its contents. If no such decompressor is known, then this
|
||||
/// returns `None`.
|
||||
///
|
||||
/// If there are multiple possible commands matching the given path, then
|
||||
/// the command added last takes precedence.
|
||||
pub fn command<P: AsRef<Path>>(&self, path: P) -> Option<Command> {
|
||||
for i in self.globs.matches(path).into_iter().rev() {
|
||||
let decomp_cmd = &self.commands[i];
|
||||
let mut cmd = Command::new(&decomp_cmd.bin);
|
||||
cmd.args(&decomp_cmd.args);
|
||||
return Some(cmd);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given file path has at least one
|
||||
/// matching command to perform decompression on.
|
||||
pub fn has_command<P: AsRef<Path>>(&self, path: P) -> bool {
|
||||
self.globs.is_match(path)
|
||||
}
|
||||
}
|
||||
|
||||
/// Configures and builds a streaming reader for decompressing data.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct DecompressionReaderBuilder {
|
||||
matcher: DecompressionMatcher,
|
||||
command_builder: CommandReaderBuilder,
|
||||
}
|
||||
|
||||
impl DecompressionReaderBuilder {
|
||||
/// Create a new builder with the default configuration.
|
||||
pub fn new() -> DecompressionReaderBuilder {
|
||||
DecompressionReaderBuilder::default()
|
||||
}
|
||||
|
||||
/// Build a new streaming reader for decompressing data.
|
||||
///
|
||||
/// If decompression is done out-of-process and if there was a problem
|
||||
/// spawning the process, then its error is logged at the debug level and a
|
||||
/// passthru reader is returned that does no decompression. This behavior
|
||||
/// typically occurs when the given file path matches a decompression
|
||||
/// command, but is executing in an environment where the decompression
|
||||
/// command is not available.
|
||||
///
|
||||
/// If the given file path could not be matched with a decompression
|
||||
/// strategy, then a passthru reader is returned that does no
|
||||
/// decompression.
|
||||
pub fn build<P: AsRef<Path>>(
|
||||
&self,
|
||||
path: P,
|
||||
) -> Result<DecompressionReader, CommandError> {
|
||||
let path = path.as_ref();
|
||||
let mut cmd = match self.matcher.command(path) {
|
||||
None => return DecompressionReader::new_passthru(path),
|
||||
Some(cmd) => cmd,
|
||||
};
|
||||
cmd.arg(path);
|
||||
|
||||
match self.command_builder.build(&mut cmd) {
|
||||
Ok(cmd_reader) => Ok(DecompressionReader { rdr: Ok(cmd_reader) }),
|
||||
Err(err) => {
|
||||
log::debug!(
|
||||
"{}: error spawning command '{:?}': {} \
|
||||
(falling back to uncompressed reader)",
|
||||
path.display(),
|
||||
cmd,
|
||||
err,
|
||||
);
|
||||
DecompressionReader::new_passthru(path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the matcher to use to look up the decompression command for each
|
||||
/// file path.
|
||||
///
|
||||
/// A set of sensible rules is enabled by default. Setting this will
|
||||
/// completely replace the current rules.
|
||||
pub fn matcher(
|
||||
&mut self,
|
||||
matcher: DecompressionMatcher,
|
||||
) -> &mut DecompressionReaderBuilder {
|
||||
self.matcher = matcher;
|
||||
self
|
||||
}
|
||||
|
||||
/// Get the underlying matcher currently used by this builder.
|
||||
pub fn get_matcher(&self) -> &DecompressionMatcher {
|
||||
&self.matcher
|
||||
}
|
||||
|
||||
/// When enabled, the reader will asynchronously read the contents of the
|
||||
/// command's stderr output. When disabled, stderr is only read after the
|
||||
/// stdout stream has been exhausted (or if the process quits with an error
|
||||
/// code).
|
||||
///
|
||||
/// Note that when enabled, this may require launching an additional
|
||||
/// thread in order to read stderr. This is done so that the process being
|
||||
/// executed is never blocked from writing to stdout or stderr. If this is
|
||||
/// disabled, then it is possible for the process to fill up the stderr
|
||||
/// buffer and deadlock.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn async_stderr(
|
||||
&mut self,
|
||||
yes: bool,
|
||||
) -> &mut DecompressionReaderBuilder {
|
||||
self.command_builder.async_stderr(yes);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A streaming reader for decompressing the contents of a file.
|
||||
///
|
||||
/// The purpose of this reader is to provide a seamless way to decompress the
|
||||
/// contents of file using existing tools in the current environment. This is
|
||||
/// meant to be an alternative to using decompression libraries in favor of the
|
||||
/// simplicity and portability of using external commands such as `gzip` and
|
||||
/// `xz`. This does impose the overhead of spawning a process, so other means
|
||||
/// for performing decompression should be sought if this overhead isn't
|
||||
/// acceptable.
|
||||
///
|
||||
/// A decompression reader comes with a default set of matching rules that are
|
||||
/// meant to associate file paths with the corresponding command to use to
|
||||
/// decompress them. For example, a glob like `*.gz` matches gzip compressed
|
||||
/// files with the command `gzip -d -c`. If a file path does not match any
|
||||
/// existing rules, or if it matches a rule whose command does not exist in the
|
||||
/// current environment, then the decompression reader passes through the
|
||||
/// contents of the underlying file without doing any decompression.
|
||||
///
|
||||
/// The default matching rules are probably good enough for most cases, and if
|
||||
/// they require revision, pull requests are welcome. In cases where they must
|
||||
/// be changed or extended, they can be customized through the use of
|
||||
/// [`DecompressionMatcherBuilder`](struct.DecompressionMatcherBuilder.html)
|
||||
/// and
|
||||
/// [`DecompressionReaderBuilder`](struct.DecompressionReaderBuilder.html).
|
||||
///
|
||||
/// By default, this reader will asynchronously read the processes' stderr.
|
||||
/// This prevents subtle deadlocking bugs for noisy processes that write a lot
|
||||
/// to stderr. Currently, the entire contents of stderr is read on to the heap.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// This example shows how to read the decompressed contents of a file without
|
||||
/// needing to explicitly choose the decompression command to run.
|
||||
///
|
||||
/// Note that if you need to decompress multiple files, it is better to use
|
||||
/// `DecompressionReaderBuilder`, which will amortize the cost of compiling the
|
||||
/// matcher.
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::Read;
|
||||
/// use std::process::Command;
|
||||
/// use grep_cli::DecompressionReader;
|
||||
///
|
||||
/// # fn example() -> Result<(), Box<::std::error::Error>> {
|
||||
/// let mut rdr = DecompressionReader::new("/usr/share/man/man1/ls.1.gz")?;
|
||||
/// let mut contents = vec![];
|
||||
/// rdr.read_to_end(&mut contents)?;
|
||||
/// # Ok(()) }
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct DecompressionReader {
|
||||
rdr: Result<CommandReader, File>,
|
||||
}
|
||||
|
||||
impl DecompressionReader {
|
||||
/// Build a new streaming reader for decompressing data.
|
||||
///
|
||||
/// If decompression is done out-of-process and if there was a problem
|
||||
/// spawning the process, then its error is returned.
|
||||
///
|
||||
/// If the given file path could not be matched with a decompression
|
||||
/// strategy, then a passthru reader is returned that does no
|
||||
/// decompression.
|
||||
///
|
||||
/// This uses the default matching rules for determining how to decompress
|
||||
/// the given file. To change those matching rules, use
|
||||
/// [`DecompressionReaderBuilder`](struct.DecompressionReaderBuilder.html)
|
||||
/// and
|
||||
/// [`DecompressionMatcherBuilder`](struct.DecompressionMatcherBuilder.html).
|
||||
///
|
||||
/// When creating readers for many paths. it is better to use the builder
|
||||
/// since it will amortize the cost of constructing the matcher.
|
||||
pub fn new<P: AsRef<Path>>(
|
||||
path: P,
|
||||
) -> Result<DecompressionReader, CommandError> {
|
||||
DecompressionReaderBuilder::new().build(path)
|
||||
}
|
||||
|
||||
/// Creates a new "passthru" decompression reader that reads from the file
|
||||
/// corresponding to the given path without doing decompression and without
|
||||
/// executing another process.
|
||||
fn new_passthru(path: &Path) -> Result<DecompressionReader, CommandError> {
|
||||
let file = File::open(path)?;
|
||||
Ok(DecompressionReader { rdr: Err(file) })
|
||||
}
|
||||
|
||||
/// Closes this reader, freeing any resources used by its underlying child
|
||||
/// process, if one was used. If the child process exits with a nonzero
|
||||
/// exit code, the returned Err value will include its stderr.
|
||||
///
|
||||
/// `close` is idempotent, meaning it can be safely called multiple times.
|
||||
/// The first call closes the CommandReader and any subsequent calls do
|
||||
/// nothing.
|
||||
///
|
||||
/// This method should be called after partially reading a file to prevent
|
||||
/// resource leakage. However there is no need to call `close` explicitly
|
||||
/// if your code always calls `read` to EOF, as `read` takes care of
|
||||
/// calling `close` in this case.
|
||||
///
|
||||
/// `close` is also called in `drop` as a last line of defense against
|
||||
/// resource leakage. Any error from the child process is then printed as a
|
||||
/// warning to stderr. This can be avoided by explicitly calling `close`
|
||||
/// before the CommandReader is dropped.
|
||||
pub fn close(&mut self) -> io::Result<()> {
|
||||
match self.rdr {
|
||||
Ok(ref mut rdr) => rdr.close(),
|
||||
Err(_) => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl io::Read for DecompressionReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
match self.rdr {
|
||||
Ok(ref mut rdr) => rdr.read(buf),
|
||||
Err(ref mut rdr) => rdr.read(buf),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolves a path to a program to a path by searching for the program in
|
||||
/// `PATH`.
|
||||
///
|
||||
/// If the program could not be resolved, then an error is returned.
|
||||
///
|
||||
/// The purpose of doing this instead of passing the path to the program
|
||||
/// directly to Command::new is that Command::new will hand relative paths
|
||||
/// to CreateProcess on Windows, which will implicitly search the current
|
||||
/// working directory for the executable. This could be undesirable for
|
||||
/// security reasons. e.g., running ripgrep with the -z/--search-zip flag on an
|
||||
/// untrusted directory tree could result in arbitrary programs executing on
|
||||
/// Windows.
|
||||
///
|
||||
/// Note that this could still return a relative path if PATH contains a
|
||||
/// relative path. We permit this since it is assumed that the user has set
|
||||
/// this explicitly, and thus, desires this behavior.
|
||||
///
|
||||
/// On non-Windows, this is a no-op.
|
||||
pub fn resolve_binary<P: AsRef<Path>>(
|
||||
prog: P,
|
||||
) -> Result<PathBuf, CommandError> {
|
||||
use std::env;
|
||||
|
||||
fn is_exe(path: &Path) -> bool {
|
||||
let md = match path.metadata() {
|
||||
Err(_) => return false,
|
||||
Ok(md) => md,
|
||||
};
|
||||
!md.is_dir()
|
||||
}
|
||||
|
||||
let prog = prog.as_ref();
|
||||
if !cfg!(windows) || prog.is_absolute() {
|
||||
return Ok(prog.to_path_buf());
|
||||
}
|
||||
let syspaths = match env::var_os("PATH") {
|
||||
Some(syspaths) => syspaths,
|
||||
None => {
|
||||
let msg = "system PATH environment variable not found";
|
||||
return Err(CommandError::io(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
msg,
|
||||
)));
|
||||
}
|
||||
};
|
||||
for syspath in env::split_paths(&syspaths) {
|
||||
if syspath.as_os_str().is_empty() {
|
||||
continue;
|
||||
}
|
||||
let abs_prog = syspath.join(prog);
|
||||
if is_exe(&abs_prog) {
|
||||
return Ok(abs_prog.to_path_buf());
|
||||
}
|
||||
if abs_prog.extension().is_none() {
|
||||
let abs_prog = abs_prog.with_extension("exe");
|
||||
if is_exe(&abs_prog) {
|
||||
return Ok(abs_prog.to_path_buf());
|
||||
}
|
||||
}
|
||||
}
|
||||
let msg = format!("{}: could not find executable in PATH", prog.display());
|
||||
return Err(CommandError::io(io::Error::new(io::ErrorKind::Other, msg)));
|
||||
}
|
||||
|
||||
fn default_decompression_commands() -> Vec<DecompressionCommand> {
|
||||
const ARGS_GZIP: &[&str] = &["gzip", "-d", "-c"];
|
||||
const ARGS_BZIP: &[&str] = &["bzip2", "-d", "-c"];
|
||||
const ARGS_XZ: &[&str] = &["xz", "-d", "-c"];
|
||||
const ARGS_LZ4: &[&str] = &["lz4", "-d", "-c"];
|
||||
const ARGS_LZMA: &[&str] = &["xz", "--format=lzma", "-d", "-c"];
|
||||
const ARGS_BROTLI: &[&str] = &["brotli", "-d", "-c"];
|
||||
const ARGS_ZSTD: &[&str] = &["zstd", "-q", "-d", "-c"];
|
||||
const ARGS_UNCOMPRESS: &[&str] = &["uncompress", "-c"];
|
||||
|
||||
fn add(glob: &str, args: &[&str], cmds: &mut Vec<DecompressionCommand>) {
|
||||
let bin = match resolve_binary(Path::new(args[0])) {
|
||||
Ok(bin) => bin,
|
||||
Err(err) => {
|
||||
log::debug!("{}", err);
|
||||
return;
|
||||
}
|
||||
};
|
||||
cmds.push(DecompressionCommand {
|
||||
glob: glob.to_string(),
|
||||
bin,
|
||||
args: args
|
||||
.iter()
|
||||
.skip(1)
|
||||
.map(|s| OsStr::new(s).to_os_string())
|
||||
.collect(),
|
||||
});
|
||||
}
|
||||
let mut cmds = vec![];
|
||||
add("*.gz", ARGS_GZIP, &mut cmds);
|
||||
add("*.tgz", ARGS_GZIP, &mut cmds);
|
||||
add("*.bz2", ARGS_BZIP, &mut cmds);
|
||||
add("*.tbz2", ARGS_BZIP, &mut cmds);
|
||||
add("*.xz", ARGS_XZ, &mut cmds);
|
||||
add("*.txz", ARGS_XZ, &mut cmds);
|
||||
add("*.lz4", ARGS_LZ4, &mut cmds);
|
||||
add("*.lzma", ARGS_LZMA, &mut cmds);
|
||||
add("*.br", ARGS_BROTLI, &mut cmds);
|
||||
add("*.zst", ARGS_ZSTD, &mut cmds);
|
||||
add("*.zstd", ARGS_ZSTD, &mut cmds);
|
||||
add("*.Z", ARGS_UNCOMPRESS, &mut cmds);
|
||||
cmds
|
||||
}
|
||||
272
crates/cli/src/escape.rs
Normal file
272
crates/cli/src/escape.rs
Normal file
@@ -0,0 +1,272 @@
|
||||
use std::ffi::OsStr;
|
||||
use std::str;
|
||||
|
||||
use bstr::{ByteSlice, ByteVec};
|
||||
|
||||
/// A single state in the state machine used by `unescape`.
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
enum State {
|
||||
/// The state after seeing a `\`.
|
||||
Escape,
|
||||
/// The state after seeing a `\x`.
|
||||
HexFirst,
|
||||
/// The state after seeing a `\x[0-9A-Fa-f]`.
|
||||
HexSecond(char),
|
||||
/// Default state.
|
||||
Literal,
|
||||
}
|
||||
|
||||
/// Escapes arbitrary bytes into a human readable string.
|
||||
///
|
||||
/// This converts `\t`, `\r` and `\n` into their escaped forms. It also
|
||||
/// converts the non-printable subset of ASCII in addition to invalid UTF-8
|
||||
/// bytes to hexadecimal escape sequences. Everything else is left as is.
|
||||
///
|
||||
/// The dual of this routine is [`unescape`](fn.unescape.html).
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// This example shows how to convert a byte string that contains a `\n` and
|
||||
/// invalid UTF-8 bytes into a `String`.
|
||||
///
|
||||
/// Pay special attention to the use of raw strings. That is, `r"\n"` is
|
||||
/// equivalent to `"\\n"`.
|
||||
///
|
||||
/// ```
|
||||
/// use grep_cli::escape;
|
||||
///
|
||||
/// assert_eq!(r"foo\nbar\xFFbaz", escape(b"foo\nbar\xFFbaz"));
|
||||
/// ```
|
||||
pub fn escape(bytes: &[u8]) -> String {
|
||||
let mut escaped = String::new();
|
||||
for (s, e, ch) in bytes.char_indices() {
|
||||
if ch == '\u{FFFD}' {
|
||||
for b in bytes[s..e].bytes() {
|
||||
escape_byte(b, &mut escaped);
|
||||
}
|
||||
} else {
|
||||
escape_char(ch, &mut escaped);
|
||||
}
|
||||
}
|
||||
escaped
|
||||
}
|
||||
|
||||
/// Escapes an OS string into a human readable string.
|
||||
///
|
||||
/// This is like [`escape`](fn.escape.html), but accepts an OS string.
|
||||
pub fn escape_os(string: &OsStr) -> String {
|
||||
escape(Vec::from_os_str_lossy(string).as_bytes())
|
||||
}
|
||||
|
||||
/// Unescapes a string.
|
||||
///
|
||||
/// It supports a limited set of escape sequences:
|
||||
///
|
||||
/// * `\t`, `\r` and `\n` are mapped to their corresponding ASCII bytes.
|
||||
/// * `\xZZ` hexadecimal escapes are mapped to their byte.
|
||||
///
|
||||
/// Everything else is left as is, including non-hexadecimal escapes like
|
||||
/// `\xGG`.
|
||||
///
|
||||
/// This is useful when it is desirable for a command line argument to be
|
||||
/// capable of specifying arbitrary bytes or otherwise make it easier to
|
||||
/// specify non-printable characters.
|
||||
///
|
||||
/// The dual of this routine is [`escape`](fn.escape.html).
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// This example shows how to convert an escaped string (which is valid UTF-8)
|
||||
/// into a corresponding sequence of bytes. Each escape sequence is mapped to
|
||||
/// its bytes, which may include invalid UTF-8.
|
||||
///
|
||||
/// Pay special attention to the use of raw strings. That is, `r"\n"` is
|
||||
/// equivalent to `"\\n"`.
|
||||
///
|
||||
/// ```
|
||||
/// use grep_cli::unescape;
|
||||
///
|
||||
/// assert_eq!(&b"foo\nbar\xFFbaz"[..], &*unescape(r"foo\nbar\xFFbaz"));
|
||||
/// ```
|
||||
pub fn unescape(s: &str) -> Vec<u8> {
|
||||
use self::State::*;
|
||||
|
||||
let mut bytes = vec![];
|
||||
let mut state = Literal;
|
||||
for c in s.chars() {
|
||||
match state {
|
||||
Escape => match c {
|
||||
'\\' => {
|
||||
bytes.push(b'\\');
|
||||
state = Literal;
|
||||
}
|
||||
'n' => {
|
||||
bytes.push(b'\n');
|
||||
state = Literal;
|
||||
}
|
||||
'r' => {
|
||||
bytes.push(b'\r');
|
||||
state = Literal;
|
||||
}
|
||||
't' => {
|
||||
bytes.push(b'\t');
|
||||
state = Literal;
|
||||
}
|
||||
'x' => {
|
||||
state = HexFirst;
|
||||
}
|
||||
c => {
|
||||
bytes.extend(format!(r"\{}", c).into_bytes());
|
||||
state = Literal;
|
||||
}
|
||||
},
|
||||
HexFirst => match c {
|
||||
'0'..='9' | 'A'..='F' | 'a'..='f' => {
|
||||
state = HexSecond(c);
|
||||
}
|
||||
c => {
|
||||
bytes.extend(format!(r"\x{}", c).into_bytes());
|
||||
state = Literal;
|
||||
}
|
||||
},
|
||||
HexSecond(first) => match c {
|
||||
'0'..='9' | 'A'..='F' | 'a'..='f' => {
|
||||
let ordinal = format!("{}{}", first, c);
|
||||
let byte = u8::from_str_radix(&ordinal, 16).unwrap();
|
||||
bytes.push(byte);
|
||||
state = Literal;
|
||||
}
|
||||
c => {
|
||||
let original = format!(r"\x{}{}", first, c);
|
||||
bytes.extend(original.into_bytes());
|
||||
state = Literal;
|
||||
}
|
||||
},
|
||||
Literal => match c {
|
||||
'\\' => {
|
||||
state = Escape;
|
||||
}
|
||||
c => {
|
||||
bytes.extend(c.to_string().as_bytes());
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
match state {
|
||||
Escape => bytes.push(b'\\'),
|
||||
HexFirst => bytes.extend(b"\\x"),
|
||||
HexSecond(c) => bytes.extend(format!("\\x{}", c).into_bytes()),
|
||||
Literal => {}
|
||||
}
|
||||
bytes
|
||||
}
|
||||
|
||||
/// Unescapes an OS string.
|
||||
///
|
||||
/// This is like [`unescape`](fn.unescape.html), but accepts an OS string.
|
||||
///
|
||||
/// Note that this first lossily decodes the given OS string as UTF-8. That
|
||||
/// is, an escaped string (the thing given) should be valid UTF-8.
|
||||
pub fn unescape_os(string: &OsStr) -> Vec<u8> {
|
||||
unescape(&string.to_string_lossy())
|
||||
}
|
||||
|
||||
/// Adds the given codepoint to the given string, escaping it if necessary.
|
||||
fn escape_char(cp: char, into: &mut String) {
|
||||
if cp.is_ascii() {
|
||||
escape_byte(cp as u8, into);
|
||||
} else {
|
||||
into.push(cp);
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds the given byte to the given string, escaping it if necessary.
|
||||
fn escape_byte(byte: u8, into: &mut String) {
|
||||
match byte {
|
||||
0x21..=0x5B | 0x5D..=0x7D => into.push(byte as char),
|
||||
b'\n' => into.push_str(r"\n"),
|
||||
b'\r' => into.push_str(r"\r"),
|
||||
b'\t' => into.push_str(r"\t"),
|
||||
b'\\' => into.push_str(r"\\"),
|
||||
_ => into.push_str(&format!(r"\x{:02X}", byte)),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{escape, unescape};
|
||||
|
||||
fn b(bytes: &'static [u8]) -> Vec<u8> {
|
||||
bytes.to_vec()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty() {
|
||||
assert_eq!(b(b""), unescape(r""));
|
||||
assert_eq!(r"", escape(b""));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn backslash() {
|
||||
assert_eq!(b(b"\\"), unescape(r"\\"));
|
||||
assert_eq!(r"\\", escape(b"\\"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nul() {
|
||||
assert_eq!(b(b"\x00"), unescape(r"\x00"));
|
||||
assert_eq!(r"\x00", escape(b"\x00"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nl() {
|
||||
assert_eq!(b(b"\n"), unescape(r"\n"));
|
||||
assert_eq!(r"\n", escape(b"\n"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tab() {
|
||||
assert_eq!(b(b"\t"), unescape(r"\t"));
|
||||
assert_eq!(r"\t", escape(b"\t"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn carriage() {
|
||||
assert_eq!(b(b"\r"), unescape(r"\r"));
|
||||
assert_eq!(r"\r", escape(b"\r"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nothing_simple() {
|
||||
assert_eq!(b(b"\\a"), unescape(r"\a"));
|
||||
assert_eq!(b(b"\\a"), unescape(r"\\a"));
|
||||
assert_eq!(r"\\a", escape(b"\\a"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nothing_hex0() {
|
||||
assert_eq!(b(b"\\x"), unescape(r"\x"));
|
||||
assert_eq!(b(b"\\x"), unescape(r"\\x"));
|
||||
assert_eq!(r"\\x", escape(b"\\x"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nothing_hex1() {
|
||||
assert_eq!(b(b"\\xz"), unescape(r"\xz"));
|
||||
assert_eq!(b(b"\\xz"), unescape(r"\\xz"));
|
||||
assert_eq!(r"\\xz", escape(b"\\xz"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nothing_hex2() {
|
||||
assert_eq!(b(b"\\xzz"), unescape(r"\xzz"));
|
||||
assert_eq!(b(b"\\xzz"), unescape(r"\\xzz"));
|
||||
assert_eq!(r"\\xzz", escape(b"\\xzz"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_utf8() {
|
||||
assert_eq!(r"\xFF", escape(b"\xFF"));
|
||||
assert_eq!(r"a\xFFb", escape(b"a\xFFb"));
|
||||
}
|
||||
}
|
||||
165
crates/cli/src/human.rs
Normal file
165
crates/cli/src/human.rs
Normal file
@@ -0,0 +1,165 @@
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::num::ParseIntError;
|
||||
|
||||
use regex::Regex;
|
||||
|
||||
/// An error that occurs when parsing a human readable size description.
|
||||
///
|
||||
/// This error provides an end user friendly message describing why the
|
||||
/// description couldn't be parsed and what the expected format is.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct ParseSizeError {
|
||||
original: String,
|
||||
kind: ParseSizeErrorKind,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
enum ParseSizeErrorKind {
|
||||
InvalidFormat,
|
||||
InvalidInt(ParseIntError),
|
||||
Overflow,
|
||||
}
|
||||
|
||||
impl ParseSizeError {
|
||||
fn format(original: &str) -> ParseSizeError {
|
||||
ParseSizeError {
|
||||
original: original.to_string(),
|
||||
kind: ParseSizeErrorKind::InvalidFormat,
|
||||
}
|
||||
}
|
||||
|
||||
fn int(original: &str, err: ParseIntError) -> ParseSizeError {
|
||||
ParseSizeError {
|
||||
original: original.to_string(),
|
||||
kind: ParseSizeErrorKind::InvalidInt(err),
|
||||
}
|
||||
}
|
||||
|
||||
fn overflow(original: &str) -> ParseSizeError {
|
||||
ParseSizeError {
|
||||
original: original.to_string(),
|
||||
kind: ParseSizeErrorKind::Overflow,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for ParseSizeError {
|
||||
fn description(&self) -> &str {
|
||||
"invalid size"
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ParseSizeError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use self::ParseSizeErrorKind::*;
|
||||
|
||||
match self.kind {
|
||||
InvalidFormat => write!(
|
||||
f,
|
||||
"invalid format for size '{}', which should be a sequence \
|
||||
of digits followed by an optional 'K', 'M' or 'G' \
|
||||
suffix",
|
||||
self.original
|
||||
),
|
||||
InvalidInt(ref err) => write!(
|
||||
f,
|
||||
"invalid integer found in size '{}': {}",
|
||||
self.original, err
|
||||
),
|
||||
Overflow => write!(f, "size too big in '{}'", self.original),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ParseSizeError> for io::Error {
|
||||
fn from(size_err: ParseSizeError) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, size_err)
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a human readable size like `2M` into a corresponding number of bytes.
|
||||
///
|
||||
/// Supported size suffixes are `K` (for kilobyte), `M` (for megabyte) and `G`
|
||||
/// (for gigabyte). If a size suffix is missing, then the size is interpreted
|
||||
/// as bytes. If the size is too big to fit into a `u64`, then this returns an
|
||||
/// error.
|
||||
///
|
||||
/// Additional suffixes may be added over time.
|
||||
pub fn parse_human_readable_size(size: &str) -> Result<u64, ParseSizeError> {
|
||||
lazy_static::lazy_static! {
|
||||
// Normally I'd just parse something this simple by hand to avoid the
|
||||
// regex dep, but we bring regex in any way for glob matching, so might
|
||||
// as well use it.
|
||||
static ref RE: Regex = Regex::new(r"^([0-9]+)([KMG])?$").unwrap();
|
||||
}
|
||||
|
||||
let caps = match RE.captures(size) {
|
||||
Some(caps) => caps,
|
||||
None => return Err(ParseSizeError::format(size)),
|
||||
};
|
||||
let value: u64 =
|
||||
caps[1].parse().map_err(|err| ParseSizeError::int(size, err))?;
|
||||
let suffix = match caps.get(2) {
|
||||
None => return Ok(value),
|
||||
Some(cap) => cap.as_str(),
|
||||
};
|
||||
let bytes = match suffix {
|
||||
"K" => value.checked_mul(1 << 10),
|
||||
"M" => value.checked_mul(1 << 20),
|
||||
"G" => value.checked_mul(1 << 30),
|
||||
// Because if the regex matches this group, it must be [KMG].
|
||||
_ => unreachable!(),
|
||||
};
|
||||
bytes.ok_or_else(|| ParseSizeError::overflow(size))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn suffix_none() {
|
||||
let x = parse_human_readable_size("123").unwrap();
|
||||
assert_eq!(123, x);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn suffix_k() {
|
||||
let x = parse_human_readable_size("123K").unwrap();
|
||||
assert_eq!(123 * (1 << 10), x);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn suffix_m() {
|
||||
let x = parse_human_readable_size("123M").unwrap();
|
||||
assert_eq!(123 * (1 << 20), x);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn suffix_g() {
|
||||
let x = parse_human_readable_size("123G").unwrap();
|
||||
assert_eq!(123 * (1 << 30), x);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_empty() {
|
||||
assert!(parse_human_readable_size("").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_non_digit() {
|
||||
assert!(parse_human_readable_size("a").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_overflow() {
|
||||
assert!(parse_human_readable_size("9999999999999999G").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_suffix() {
|
||||
assert!(parse_human_readable_size("123T").is_err());
|
||||
}
|
||||
}
|
||||
237
crates/cli/src/lib.rs
Normal file
237
crates/cli/src/lib.rs
Normal file
@@ -0,0 +1,237 @@
|
||||
/*!
|
||||
This crate provides common routines used in command line applications, with a
|
||||
focus on routines useful for search oriented applications. As a utility
|
||||
library, there is no central type or function. However, a key focus of this
|
||||
crate is to improve failure modes and provide user friendly error messages
|
||||
when things go wrong.
|
||||
|
||||
To the best extent possible, everything in this crate works on Windows, macOS
|
||||
and Linux.
|
||||
|
||||
|
||||
# Standard I/O
|
||||
|
||||
The
|
||||
[`is_readable_stdin`](fn.is_readable_stdin.html),
|
||||
[`is_tty_stderr`](fn.is_tty_stderr.html),
|
||||
[`is_tty_stdin`](fn.is_tty_stdin.html)
|
||||
and
|
||||
[`is_tty_stdout`](fn.is_tty_stdout.html)
|
||||
routines query aspects of standard I/O. `is_readable_stdin` determines whether
|
||||
stdin can be usefully read from, while the `tty` methods determine whether a
|
||||
tty is attached to stdin/stdout/stderr.
|
||||
|
||||
`is_readable_stdin` is useful when writing an application that changes behavior
|
||||
based on whether the application was invoked with data on stdin. For example,
|
||||
`rg foo` might recursively search the current working directory for
|
||||
occurrences of `foo`, but `rg foo < file` might only search the contents of
|
||||
`file`.
|
||||
|
||||
The `tty` methods are useful for similar reasons. Namely, commands like `ls`
|
||||
will change their output depending on whether they are printing to a terminal
|
||||
or not. For example, `ls` shows a file on each line when stdout is redirected
|
||||
to a file or a pipe, but condenses the output to show possibly many files on
|
||||
each line when stdout is connected to a tty.
|
||||
|
||||
|
||||
# Coloring and buffering
|
||||
|
||||
The
|
||||
[`stdout`](fn.stdout.html),
|
||||
[`stdout_buffered_block`](fn.stdout_buffered_block.html)
|
||||
and
|
||||
[`stdout_buffered_line`](fn.stdout_buffered_line.html)
|
||||
routines are alternative constructors for
|
||||
[`StandardStream`](struct.StandardStream.html).
|
||||
A `StandardStream` implements `termcolor::WriteColor`, which provides a way
|
||||
to emit colors to terminals. Its key use is the encapsulation of buffering
|
||||
style. Namely, `stdout` will return a line buffered `StandardStream` if and
|
||||
only if stdout is connected to a tty, and will otherwise return a block
|
||||
buffered `StandardStream`. Line buffering is important for use with a tty
|
||||
because it typically decreases the latency at which the end user sees output.
|
||||
Block buffering is used otherwise because it is faster, and redirecting stdout
|
||||
to a file typically doesn't benefit from the decreased latency that line
|
||||
buffering provides.
|
||||
|
||||
The `stdout_buffered_block` and `stdout_buffered_line` can be used to
|
||||
explicitly set the buffering strategy regardless of whether stdout is connected
|
||||
to a tty or not.
|
||||
|
||||
|
||||
# Escaping
|
||||
|
||||
The
|
||||
[`escape`](fn.escape.html),
|
||||
[`escape_os`](fn.escape_os.html),
|
||||
[`unescape`](fn.unescape.html)
|
||||
and
|
||||
[`unescape_os`](fn.unescape_os.html)
|
||||
routines provide a user friendly way of dealing with UTF-8 encoded strings that
|
||||
can express arbitrary bytes. For example, you might want to accept a string
|
||||
containing arbitrary bytes as a command line argument, but most interactive
|
||||
shells make such strings difficult to type. Instead, we can ask users to use
|
||||
escape sequences.
|
||||
|
||||
For example, `a\xFFz` is itself a valid UTF-8 string corresponding to the
|
||||
following bytes:
|
||||
|
||||
```ignore
|
||||
[b'a', b'\\', b'x', b'F', b'F', b'z']
|
||||
```
|
||||
|
||||
However, we can
|
||||
interpret `\xFF` as an escape sequence with the `unescape`/`unescape_os`
|
||||
routines, which will yield
|
||||
|
||||
```ignore
|
||||
[b'a', b'\xFF', b'z']
|
||||
```
|
||||
|
||||
instead. For example:
|
||||
|
||||
```
|
||||
use grep_cli::unescape;
|
||||
|
||||
// Note the use of a raw string!
|
||||
assert_eq!(vec![b'a', b'\xFF', b'z'], unescape(r"a\xFFz"));
|
||||
```
|
||||
|
||||
The `escape`/`escape_os` routines provide the reverse transformation, which
|
||||
makes it easy to show user friendly error messages involving arbitrary bytes.
|
||||
|
||||
|
||||
# Building patterns
|
||||
|
||||
Typically, regular expression patterns must be valid UTF-8. However, command
|
||||
line arguments aren't guaranteed to be valid UTF-8. Unfortunately, the
|
||||
standard library's UTF-8 conversion functions from `OsStr`s do not provide
|
||||
good error messages. However, the
|
||||
[`pattern_from_bytes`](fn.pattern_from_bytes.html)
|
||||
and
|
||||
[`pattern_from_os`](fn.pattern_from_os.html)
|
||||
do, including reporting exactly where the first invalid UTF-8 byte is seen.
|
||||
|
||||
Additionally, it can be useful to read patterns from a file while reporting
|
||||
good error messages that include line numbers. The
|
||||
[`patterns_from_path`](fn.patterns_from_path.html),
|
||||
[`patterns_from_reader`](fn.patterns_from_reader.html)
|
||||
and
|
||||
[`patterns_from_stdin`](fn.patterns_from_stdin.html)
|
||||
routines do just that. If any pattern is found that is invalid UTF-8, then the
|
||||
error includes the file path (if available) along with the line number and the
|
||||
byte offset at which the first invalid UTF-8 byte was observed.
|
||||
|
||||
|
||||
# Read process output
|
||||
|
||||
Sometimes a command line application needs to execute other processes and read
|
||||
its stdout in a streaming fashion. The
|
||||
[`CommandReader`](struct.CommandReader.html)
|
||||
provides this functionality with an explicit goal of improving failure modes.
|
||||
In particular, if the process exits with an error code, then stderr is read
|
||||
and converted into a normal Rust error to show to end users. This makes the
|
||||
underlying failure modes explicit and gives more information to end users for
|
||||
debugging the problem.
|
||||
|
||||
As a special case,
|
||||
[`DecompressionReader`](struct.DecompressionReader.html)
|
||||
provides a way to decompress arbitrary files by matching their file extensions
|
||||
up with corresponding decompression programs (such as `gzip` and `xz`). This
|
||||
is useful as a means of performing simplistic decompression in a portable
|
||||
manner without binding to specific compression libraries. This does come with
|
||||
some overhead though, so if you need to decompress lots of small files, this
|
||||
may not be an appropriate convenience to use.
|
||||
|
||||
Each reader has a corresponding builder for additional configuration, such as
|
||||
whether to read stderr asynchronously in order to avoid deadlock (which is
|
||||
enabled by default).
|
||||
|
||||
|
||||
# Miscellaneous parsing
|
||||
|
||||
The
|
||||
[`parse_human_readable_size`](fn.parse_human_readable_size.html)
|
||||
routine parses strings like `2M` and converts them to the corresponding number
|
||||
of bytes (`2 * 1<<20` in this case). If an invalid size is found, then a good
|
||||
error message is crafted that typically tells the user how to fix the problem.
|
||||
*/
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
mod decompress;
|
||||
mod escape;
|
||||
mod human;
|
||||
mod pattern;
|
||||
mod process;
|
||||
mod wtr;
|
||||
|
||||
pub use crate::decompress::{
|
||||
resolve_binary, DecompressionMatcher, DecompressionMatcherBuilder,
|
||||
DecompressionReader, DecompressionReaderBuilder,
|
||||
};
|
||||
pub use crate::escape::{escape, escape_os, unescape, unescape_os};
|
||||
pub use crate::human::{parse_human_readable_size, ParseSizeError};
|
||||
pub use crate::pattern::{
|
||||
pattern_from_bytes, pattern_from_os, patterns_from_path,
|
||||
patterns_from_reader, patterns_from_stdin, InvalidPatternError,
|
||||
};
|
||||
pub use crate::process::{CommandError, CommandReader, CommandReaderBuilder};
|
||||
pub use crate::wtr::{
|
||||
stdout, stdout_buffered_block, stdout_buffered_line, StandardStream,
|
||||
};
|
||||
|
||||
/// Returns true if and only if stdin is believed to be readable.
|
||||
///
|
||||
/// When stdin is readable, command line programs may choose to behave
|
||||
/// differently than when stdin is not readable. For example, `command foo`
|
||||
/// might search the current directory for occurrences of `foo` where as
|
||||
/// `command foo < some-file` or `cat some-file | command foo` might instead
|
||||
/// only search stdin for occurrences of `foo`.
|
||||
pub fn is_readable_stdin() -> bool {
|
||||
#[cfg(unix)]
|
||||
fn imp() -> bool {
|
||||
use same_file::Handle;
|
||||
use std::os::unix::fs::FileTypeExt;
|
||||
|
||||
let ft = match Handle::stdin().and_then(|h| h.as_file().metadata()) {
|
||||
Err(_) => return false,
|
||||
Ok(md) => md.file_type(),
|
||||
};
|
||||
ft.is_file() || ft.is_fifo() || ft.is_socket()
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
fn imp() -> bool {
|
||||
use winapi_util as winutil;
|
||||
|
||||
winutil::file::typ(winutil::HandleRef::stdin())
|
||||
.map(|t| t.is_disk() || t.is_pipe())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
!is_tty_stdin() && imp()
|
||||
}
|
||||
|
||||
/// Returns true if and only if stdin is believed to be connected to a tty
|
||||
/// or a console.
|
||||
pub fn is_tty_stdin() -> bool {
|
||||
atty::is(atty::Stream::Stdin)
|
||||
}
|
||||
|
||||
/// Returns true if and only if stdout is believed to be connected to a tty
|
||||
/// or a console.
|
||||
///
|
||||
/// This is useful for when you want your command line program to produce
|
||||
/// different output depending on whether it's printing directly to a user's
|
||||
/// terminal or whether it's being redirected somewhere else. For example,
|
||||
/// implementations of `ls` will often show one item per line when stdout is
|
||||
/// redirected, but will condensed output when printing to a tty.
|
||||
pub fn is_tty_stdout() -> bool {
|
||||
atty::is(atty::Stream::Stdout)
|
||||
}
|
||||
|
||||
/// Returns true if and only if stderr is believed to be connected to a tty
|
||||
/// or a console.
|
||||
pub fn is_tty_stderr() -> bool {
|
||||
atty::is(atty::Stream::Stderr)
|
||||
}
|
||||
192
crates/cli/src/pattern.rs
Normal file
192
crates/cli/src/pattern.rs
Normal file
@@ -0,0 +1,192 @@
|
||||
use std::error;
|
||||
use std::ffi::OsStr;
|
||||
use std::fmt;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::str;
|
||||
|
||||
use bstr::io::BufReadExt;
|
||||
|
||||
use crate::escape::{escape, escape_os};
|
||||
|
||||
/// An error that occurs when a pattern could not be converted to valid UTF-8.
|
||||
///
|
||||
/// The purpose of this error is to give a more targeted failure mode for
|
||||
/// patterns written by end users that are not valid UTF-8.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct InvalidPatternError {
|
||||
original: String,
|
||||
valid_up_to: usize,
|
||||
}
|
||||
|
||||
impl InvalidPatternError {
|
||||
/// Returns the index in the given string up to which valid UTF-8 was
|
||||
/// verified.
|
||||
pub fn valid_up_to(&self) -> usize {
|
||||
self.valid_up_to
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for InvalidPatternError {
|
||||
fn description(&self) -> &str {
|
||||
"invalid pattern"
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for InvalidPatternError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"found invalid UTF-8 in pattern at byte offset {}: {} \
|
||||
(disable Unicode mode and use hex escape sequences to match \
|
||||
arbitrary bytes in a pattern, e.g., '(?-u)\\xFF')",
|
||||
self.valid_up_to, self.original,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<InvalidPatternError> for io::Error {
|
||||
fn from(paterr: InvalidPatternError) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, paterr)
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert an OS string into a regular expression pattern.
|
||||
///
|
||||
/// This conversion fails if the given pattern is not valid UTF-8, in which
|
||||
/// case, a targeted error with more information about where the invalid UTF-8
|
||||
/// occurs is given. The error also suggests the use of hex escape sequences,
|
||||
/// which are supported by many regex engines.
|
||||
pub fn pattern_from_os(pattern: &OsStr) -> Result<&str, InvalidPatternError> {
|
||||
pattern.to_str().ok_or_else(|| {
|
||||
let valid_up_to = pattern
|
||||
.to_string_lossy()
|
||||
.find('\u{FFFD}')
|
||||
.expect("a Unicode replacement codepoint for invalid UTF-8");
|
||||
InvalidPatternError { original: escape_os(pattern), valid_up_to }
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert arbitrary bytes into a regular expression pattern.
|
||||
///
|
||||
/// This conversion fails if the given pattern is not valid UTF-8, in which
|
||||
/// case, a targeted error with more information about where the invalid UTF-8
|
||||
/// occurs is given. The error also suggests the use of hex escape sequences,
|
||||
/// which are supported by many regex engines.
|
||||
pub fn pattern_from_bytes(
|
||||
pattern: &[u8],
|
||||
) -> Result<&str, InvalidPatternError> {
|
||||
str::from_utf8(pattern).map_err(|err| InvalidPatternError {
|
||||
original: escape(pattern),
|
||||
valid_up_to: err.valid_up_to(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Read patterns from a file path, one per line.
|
||||
///
|
||||
/// If there was a problem reading or if any of the patterns contain invalid
|
||||
/// UTF-8, then an error is returned. If there was a problem with a specific
|
||||
/// pattern, then the error message will include the line number and the file
|
||||
/// path.
|
||||
pub fn patterns_from_path<P: AsRef<Path>>(path: P) -> io::Result<Vec<String>> {
|
||||
let path = path.as_ref();
|
||||
let file = File::open(path).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("{}: {}", path.display(), err),
|
||||
)
|
||||
})?;
|
||||
patterns_from_reader(file).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("{}:{}", path.display(), err),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Read patterns from stdin, one per line.
|
||||
///
|
||||
/// If there was a problem reading or if any of the patterns contain invalid
|
||||
/// UTF-8, then an error is returned. If there was a problem with a specific
|
||||
/// pattern, then the error message will include the line number and the fact
|
||||
/// that it came from stdin.
|
||||
pub fn patterns_from_stdin() -> io::Result<Vec<String>> {
|
||||
let stdin = io::stdin();
|
||||
let locked = stdin.lock();
|
||||
patterns_from_reader(locked).map_err(|err| {
|
||||
io::Error::new(io::ErrorKind::Other, format!("<stdin>:{}", err))
|
||||
})
|
||||
}
|
||||
|
||||
/// Read patterns from any reader, one per line.
|
||||
///
|
||||
/// If there was a problem reading or if any of the patterns contain invalid
|
||||
/// UTF-8, then an error is returned. If there was a problem with a specific
|
||||
/// pattern, then the error message will include the line number.
|
||||
///
|
||||
/// Note that this routine uses its own internal buffer, so the caller should
|
||||
/// not provide their own buffered reader if possible.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// This shows how to parse patterns, one per line.
|
||||
///
|
||||
/// ```
|
||||
/// use grep_cli::patterns_from_reader;
|
||||
///
|
||||
/// # fn example() -> Result<(), Box<::std::error::Error>> {
|
||||
/// let patterns = "\
|
||||
/// foo
|
||||
/// bar\\s+foo
|
||||
/// [a-z]{3}
|
||||
/// ";
|
||||
///
|
||||
/// assert_eq!(patterns_from_reader(patterns.as_bytes())?, vec![
|
||||
/// r"foo",
|
||||
/// r"bar\s+foo",
|
||||
/// r"[a-z]{3}",
|
||||
/// ]);
|
||||
/// # Ok(()) }
|
||||
/// ```
|
||||
pub fn patterns_from_reader<R: io::Read>(rdr: R) -> io::Result<Vec<String>> {
|
||||
let mut patterns = vec![];
|
||||
let mut line_number = 0;
|
||||
io::BufReader::new(rdr).for_byte_line(|line| {
|
||||
line_number += 1;
|
||||
match pattern_from_bytes(line) {
|
||||
Ok(pattern) => {
|
||||
patterns.push(pattern.to_string());
|
||||
Ok(true)
|
||||
}
|
||||
Err(err) => Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("{}: {}", line_number, err),
|
||||
)),
|
||||
}
|
||||
})?;
|
||||
Ok(patterns)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn bytes() {
|
||||
let pat = b"abc\xFFxyz";
|
||||
let err = pattern_from_bytes(pat).unwrap_err();
|
||||
assert_eq!(3, err.valid_up_to());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn os() {
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
let pat = OsStr::from_bytes(b"abc\xFFxyz");
|
||||
let err = pattern_from_os(pat).unwrap_err();
|
||||
assert_eq!(3, err.valid_up_to());
|
||||
}
|
||||
}
|
||||
324
crates/cli/src/process.rs
Normal file
324
crates/cli/src/process.rs
Normal file
@@ -0,0 +1,324 @@
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::io::{self, Read};
|
||||
use std::iter;
|
||||
use std::process;
|
||||
use std::thread::{self, JoinHandle};
|
||||
|
||||
/// An error that can occur while running a command and reading its output.
|
||||
///
|
||||
/// This error can be seamlessly converted to an `io::Error` via a `From`
|
||||
/// implementation.
|
||||
#[derive(Debug)]
|
||||
pub struct CommandError {
|
||||
kind: CommandErrorKind,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum CommandErrorKind {
|
||||
Io(io::Error),
|
||||
Stderr(Vec<u8>),
|
||||
}
|
||||
|
||||
impl CommandError {
|
||||
/// Create an error from an I/O error.
|
||||
pub(crate) fn io(ioerr: io::Error) -> CommandError {
|
||||
CommandError { kind: CommandErrorKind::Io(ioerr) }
|
||||
}
|
||||
|
||||
/// Create an error from the contents of stderr (which may be empty).
|
||||
pub(crate) fn stderr(bytes: Vec<u8>) -> CommandError {
|
||||
CommandError { kind: CommandErrorKind::Stderr(bytes) }
|
||||
}
|
||||
|
||||
/// Returns true if and only if this error has empty data from stderr.
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
match self.kind {
|
||||
CommandErrorKind::Stderr(ref bytes) => bytes.is_empty(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for CommandError {
|
||||
fn description(&self) -> &str {
|
||||
"command error"
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CommandError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.kind {
|
||||
CommandErrorKind::Io(ref e) => e.fmt(f),
|
||||
CommandErrorKind::Stderr(ref bytes) => {
|
||||
let msg = String::from_utf8_lossy(bytes);
|
||||
if msg.trim().is_empty() {
|
||||
write!(f, "<stderr is empty>")
|
||||
} else {
|
||||
let div = iter::repeat('-').take(79).collect::<String>();
|
||||
write!(
|
||||
f,
|
||||
"\n{div}\n{msg}\n{div}",
|
||||
div = div,
|
||||
msg = msg.trim()
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for CommandError {
|
||||
fn from(ioerr: io::Error) -> CommandError {
|
||||
CommandError { kind: CommandErrorKind::Io(ioerr) }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CommandError> for io::Error {
|
||||
fn from(cmderr: CommandError) -> io::Error {
|
||||
match cmderr.kind {
|
||||
CommandErrorKind::Io(ioerr) => ioerr,
|
||||
CommandErrorKind::Stderr(_) => {
|
||||
io::Error::new(io::ErrorKind::Other, cmderr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configures and builds a streaming reader for process output.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct CommandReaderBuilder {
|
||||
async_stderr: bool,
|
||||
}
|
||||
|
||||
impl CommandReaderBuilder {
|
||||
/// Create a new builder with the default configuration.
|
||||
pub fn new() -> CommandReaderBuilder {
|
||||
CommandReaderBuilder::default()
|
||||
}
|
||||
|
||||
/// Build a new streaming reader for the given command's output.
|
||||
///
|
||||
/// The caller should set everything that's required on the given command
|
||||
/// before building a reader, such as its arguments, environment and
|
||||
/// current working directory. Settings such as the stdout and stderr (but
|
||||
/// not stdin) pipes will be overridden so that they can be controlled by
|
||||
/// the reader.
|
||||
///
|
||||
/// If there was a problem spawning the given command, then its error is
|
||||
/// returned.
|
||||
pub fn build(
|
||||
&self,
|
||||
command: &mut process::Command,
|
||||
) -> Result<CommandReader, CommandError> {
|
||||
let mut child = command
|
||||
.stdout(process::Stdio::piped())
|
||||
.stderr(process::Stdio::piped())
|
||||
.spawn()?;
|
||||
let stderr = if self.async_stderr {
|
||||
StderrReader::r#async(child.stderr.take().unwrap())
|
||||
} else {
|
||||
StderrReader::sync(child.stderr.take().unwrap())
|
||||
};
|
||||
Ok(CommandReader { child, stderr, eof: false })
|
||||
}
|
||||
|
||||
/// When enabled, the reader will asynchronously read the contents of the
|
||||
/// command's stderr output. When disabled, stderr is only read after the
|
||||
/// stdout stream has been exhausted (or if the process quits with an error
|
||||
/// code).
|
||||
///
|
||||
/// Note that when enabled, this may require launching an additional
|
||||
/// thread in order to read stderr. This is done so that the process being
|
||||
/// executed is never blocked from writing to stdout or stderr. If this is
|
||||
/// disabled, then it is possible for the process to fill up the stderr
|
||||
/// buffer and deadlock.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn async_stderr(&mut self, yes: bool) -> &mut CommandReaderBuilder {
|
||||
self.async_stderr = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A streaming reader for a command's output.
|
||||
///
|
||||
/// The purpose of this reader is to provide an easy way to execute processes
|
||||
/// whose stdout is read in a streaming way while also making the processes'
|
||||
/// stderr available when the process fails with an exit code. This makes it
|
||||
/// possible to execute processes while surfacing the underlying failure mode
|
||||
/// in the case of an error.
|
||||
///
|
||||
/// Moreover, by default, this reader will asynchronously read the processes'
|
||||
/// stderr. This prevents subtle deadlocking bugs for noisy processes that
|
||||
/// write a lot to stderr. Currently, the entire contents of stderr is read
|
||||
/// on to the heap.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// This example shows how to invoke `gzip` to decompress the contents of a
|
||||
/// file. If the `gzip` command reports a failing exit status, then its stderr
|
||||
/// is returned as an error.
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::Read;
|
||||
/// use std::process::Command;
|
||||
/// use grep_cli::CommandReader;
|
||||
///
|
||||
/// # fn example() -> Result<(), Box<::std::error::Error>> {
|
||||
/// let mut cmd = Command::new("gzip");
|
||||
/// cmd.arg("-d").arg("-c").arg("/usr/share/man/man1/ls.1.gz");
|
||||
///
|
||||
/// let mut rdr = CommandReader::new(&mut cmd)?;
|
||||
/// let mut contents = vec![];
|
||||
/// rdr.read_to_end(&mut contents)?;
|
||||
/// # Ok(()) }
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct CommandReader {
|
||||
child: process::Child,
|
||||
stderr: StderrReader,
|
||||
/// This is set to true once 'read' returns zero bytes. When this isn't
|
||||
/// set and we close the reader, then we anticipate a pipe error when
|
||||
/// reaping the child process and silence it.
|
||||
eof: bool,
|
||||
}
|
||||
|
||||
impl CommandReader {
|
||||
/// Create a new streaming reader for the given command using the default
|
||||
/// configuration.
|
||||
///
|
||||
/// The caller should set everything that's required on the given command
|
||||
/// before building a reader, such as its arguments, environment and
|
||||
/// current working directory. Settings such as the stdout and stderr (but
|
||||
/// not stdin) pipes will be overridden so that they can be controlled by
|
||||
/// the reader.
|
||||
///
|
||||
/// If there was a problem spawning the given command, then its error is
|
||||
/// returned.
|
||||
///
|
||||
/// If the caller requires additional configuration for the reader
|
||||
/// returned, then use
|
||||
/// [`CommandReaderBuilder`](struct.CommandReaderBuilder.html).
|
||||
pub fn new(
|
||||
cmd: &mut process::Command,
|
||||
) -> Result<CommandReader, CommandError> {
|
||||
CommandReaderBuilder::new().build(cmd)
|
||||
}
|
||||
|
||||
/// Closes the CommandReader, freeing any resources used by its underlying
|
||||
/// child process. If the child process exits with a nonzero exit code, the
|
||||
/// returned Err value will include its stderr.
|
||||
///
|
||||
/// `close` is idempotent, meaning it can be safely called multiple times.
|
||||
/// The first call closes the CommandReader and any subsequent calls do
|
||||
/// nothing.
|
||||
///
|
||||
/// This method should be called after partially reading a file to prevent
|
||||
/// resource leakage. However there is no need to call `close` explicitly
|
||||
/// if your code always calls `read` to EOF, as `read` takes care of
|
||||
/// calling `close` in this case.
|
||||
///
|
||||
/// `close` is also called in `drop` as a last line of defense against
|
||||
/// resource leakage. Any error from the child process is then printed as a
|
||||
/// warning to stderr. This can be avoided by explicitly calling `close`
|
||||
/// before the CommandReader is dropped.
|
||||
pub fn close(&mut self) -> io::Result<()> {
|
||||
// Dropping stdout closes the underlying file descriptor, which should
|
||||
// cause a well-behaved child process to exit. If child.stdout is None
|
||||
// we assume that close() has already been called and do nothing.
|
||||
let stdout = match self.child.stdout.take() {
|
||||
None => return Ok(()),
|
||||
Some(stdout) => stdout,
|
||||
};
|
||||
drop(stdout);
|
||||
if self.child.wait()?.success() {
|
||||
Ok(())
|
||||
} else {
|
||||
let err = self.stderr.read_to_end();
|
||||
// In the specific case where we haven't consumed the full data
|
||||
// from the child process, then closing stdout above results in
|
||||
// a pipe signal being thrown in most cases. But I don't think
|
||||
// there is any reliable and portable way of detecting it. Instead,
|
||||
// if we know we haven't hit EOF (so we anticipate a broken pipe
|
||||
// error) and if stderr otherwise doesn't have anything on it, then
|
||||
// we assume total success.
|
||||
if !self.eof && err.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
Err(io::Error::from(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for CommandReader {
|
||||
fn drop(&mut self) {
|
||||
if let Err(error) = self.close() {
|
||||
log::warn!("{}", error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl io::Read for CommandReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let stdout = match self.child.stdout {
|
||||
None => return Ok(0),
|
||||
Some(ref mut stdout) => stdout,
|
||||
};
|
||||
let nread = stdout.read(buf)?;
|
||||
if nread == 0 {
|
||||
self.eof = true;
|
||||
self.close().map(|_| 0)
|
||||
} else {
|
||||
Ok(nread)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A reader that encapsulates the asynchronous or synchronous reading of
|
||||
/// stderr.
|
||||
#[derive(Debug)]
|
||||
enum StderrReader {
|
||||
Async(Option<JoinHandle<CommandError>>),
|
||||
Sync(process::ChildStderr),
|
||||
}
|
||||
|
||||
impl StderrReader {
|
||||
/// Create a reader for stderr that reads contents asynchronously.
|
||||
fn r#async(mut stderr: process::ChildStderr) -> StderrReader {
|
||||
let handle =
|
||||
thread::spawn(move || stderr_to_command_error(&mut stderr));
|
||||
StderrReader::Async(Some(handle))
|
||||
}
|
||||
|
||||
/// Create a reader for stderr that reads contents synchronously.
|
||||
fn sync(stderr: process::ChildStderr) -> StderrReader {
|
||||
StderrReader::Sync(stderr)
|
||||
}
|
||||
|
||||
/// Consumes all of stderr on to the heap and returns it as an error.
|
||||
///
|
||||
/// If there was a problem reading stderr itself, then this returns an I/O
|
||||
/// command error.
|
||||
fn read_to_end(&mut self) -> CommandError {
|
||||
match *self {
|
||||
StderrReader::Async(ref mut handle) => {
|
||||
let handle = handle
|
||||
.take()
|
||||
.expect("read_to_end cannot be called more than once");
|
||||
handle.join().expect("stderr reading thread does not panic")
|
||||
}
|
||||
StderrReader::Sync(ref mut stderr) => {
|
||||
stderr_to_command_error(stderr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn stderr_to_command_error(stderr: &mut process::ChildStderr) -> CommandError {
|
||||
let mut bytes = vec![];
|
||||
match stderr.read_to_end(&mut bytes) {
|
||||
Ok(_) => CommandError::stderr(bytes),
|
||||
Err(err) => CommandError::io(err),
|
||||
}
|
||||
}
|
||||
133
crates/cli/src/wtr.rs
Normal file
133
crates/cli/src/wtr.rs
Normal file
@@ -0,0 +1,133 @@
|
||||
use std::io;
|
||||
|
||||
use termcolor;
|
||||
|
||||
use crate::is_tty_stdout;
|
||||
|
||||
/// A writer that supports coloring with either line or block buffering.
|
||||
pub struct StandardStream(StandardStreamKind);
|
||||
|
||||
/// Returns a possibly buffered writer to stdout for the given color choice.
|
||||
///
|
||||
/// The writer returned is either line buffered or block buffered. The decision
|
||||
/// between these two is made automatically based on whether a tty is attached
|
||||
/// to stdout or not. If a tty is attached, then line buffering is used.
|
||||
/// Otherwise, block buffering is used. In general, block buffering is more
|
||||
/// efficient, but may increase the time it takes for the end user to see the
|
||||
/// first bits of output.
|
||||
///
|
||||
/// If you need more fine grained control over the buffering mode, then use one
|
||||
/// of `stdout_buffered_line` or `stdout_buffered_block`.
|
||||
///
|
||||
/// The color choice given is passed along to the underlying writer. To
|
||||
/// completely disable colors in all cases, use `ColorChoice::Never`.
|
||||
pub fn stdout(color_choice: termcolor::ColorChoice) -> StandardStream {
|
||||
if is_tty_stdout() {
|
||||
stdout_buffered_line(color_choice)
|
||||
} else {
|
||||
stdout_buffered_block(color_choice)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a line buffered writer to stdout for the given color choice.
|
||||
///
|
||||
/// This writer is useful when printing results directly to a tty such that
|
||||
/// users see output as soon as it's written. The downside of this approach
|
||||
/// is that it can be slower, especially when there is a lot of output.
|
||||
///
|
||||
/// You might consider using
|
||||
/// [`stdout`](fn.stdout.html)
|
||||
/// instead, which chooses the buffering strategy automatically based on
|
||||
/// whether stdout is connected to a tty.
|
||||
pub fn stdout_buffered_line(
|
||||
color_choice: termcolor::ColorChoice,
|
||||
) -> StandardStream {
|
||||
let out = termcolor::StandardStream::stdout(color_choice);
|
||||
StandardStream(StandardStreamKind::LineBuffered(out))
|
||||
}
|
||||
|
||||
/// Returns a block buffered writer to stdout for the given color choice.
|
||||
///
|
||||
/// This writer is useful when printing results to a file since it amortizes
|
||||
/// the cost of writing data. The downside of this approach is that it can
|
||||
/// increase the latency of display output when writing to a tty.
|
||||
///
|
||||
/// You might consider using
|
||||
/// [`stdout`](fn.stdout.html)
|
||||
/// instead, which chooses the buffering strategy automatically based on
|
||||
/// whether stdout is connected to a tty.
|
||||
pub fn stdout_buffered_block(
|
||||
color_choice: termcolor::ColorChoice,
|
||||
) -> StandardStream {
|
||||
let out = termcolor::BufferedStandardStream::stdout(color_choice);
|
||||
StandardStream(StandardStreamKind::BlockBuffered(out))
|
||||
}
|
||||
|
||||
enum StandardStreamKind {
|
||||
LineBuffered(termcolor::StandardStream),
|
||||
BlockBuffered(termcolor::BufferedStandardStream),
|
||||
}
|
||||
|
||||
impl io::Write for StandardStream {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref mut w) => w.write(buf),
|
||||
BlockBuffered(ref mut w) => w.write(buf),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref mut w) => w.flush(),
|
||||
BlockBuffered(ref mut w) => w.flush(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl termcolor::WriteColor for StandardStream {
|
||||
#[inline]
|
||||
fn supports_color(&self) -> bool {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref w) => w.supports_color(),
|
||||
BlockBuffered(ref w) => w.supports_color(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_color(&mut self, spec: &termcolor::ColorSpec) -> io::Result<()> {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref mut w) => w.set_color(spec),
|
||||
BlockBuffered(ref mut w) => w.set_color(spec),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reset(&mut self) -> io::Result<()> {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref mut w) => w.reset(),
|
||||
BlockBuffered(ref mut w) => w.reset(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_synchronous(&self) -> bool {
|
||||
use self::StandardStreamKind::*;
|
||||
|
||||
match self.0 {
|
||||
LineBuffered(ref w) => w.is_synchronous(),
|
||||
BlockBuffered(ref w) => w.is_synchronous(),
|
||||
}
|
||||
}
|
||||
}
|
||||
15
crates/core/README.md
Normal file
15
crates/core/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
ripgrep core
|
||||
------------
|
||||
This is the core ripgrep crate. In particular, `main.rs` is where the `main`
|
||||
function lives.
|
||||
|
||||
Most of ripgrep core consists of two things:
|
||||
|
||||
* The definition of the CLI interface, including docs for every flag.
|
||||
* Glue code that brings the `grep-matcher`, `grep-regex`, `grep-searcher` and
|
||||
`grep-printer` crates together to actually execute the search.
|
||||
|
||||
Currently, there are no plans to make ripgrep core available as an independent
|
||||
library. However, much of the heavy lifting of ripgrep is done via its
|
||||
constituent crates, which can be reused independent of ripgrep. Unfortunately,
|
||||
there is no guide or tutorial to teach folks how to do this yet.
|
||||
3109
crates/core/app.rs
Normal file
3109
crates/core/app.rs
Normal file
File diff suppressed because it is too large
Load Diff
1888
crates/core/args.rs
Normal file
1888
crates/core/args.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -4,18 +4,18 @@
|
||||
|
||||
use std::env;
|
||||
use std::error::Error;
|
||||
use std::fs::File;
|
||||
use std::io::{self, BufRead};
|
||||
use std::ffi::OsString;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use Result;
|
||||
use bstr::{io::BufReadExt, ByteSlice};
|
||||
use log;
|
||||
|
||||
use crate::Result;
|
||||
|
||||
/// Return a sequence of arguments derived from ripgrep rc configuration files.
|
||||
///
|
||||
/// If no_messages is false and there was a problem reading a config file,
|
||||
/// then errors are printed to stderr.
|
||||
pub fn args(no_messages: bool) -> Vec<OsString> {
|
||||
pub fn args() -> Vec<OsString> {
|
||||
let config_path = match env::var_os("RIPGREP_CONFIG_PATH") {
|
||||
None => return vec![],
|
||||
Some(config_path) => {
|
||||
@@ -28,20 +28,23 @@ pub fn args(no_messages: bool) -> Vec<OsString> {
|
||||
let (args, errs) = match parse(&config_path) {
|
||||
Ok((args, errs)) => (args, errs),
|
||||
Err(err) => {
|
||||
if !no_messages {
|
||||
eprintln!("{}", err);
|
||||
}
|
||||
message!(
|
||||
"failed to read the file specified in RIPGREP_CONFIG_PATH: {}",
|
||||
err
|
||||
);
|
||||
return vec![];
|
||||
}
|
||||
};
|
||||
if !no_messages && !errs.is_empty() {
|
||||
if !errs.is_empty() {
|
||||
for err in errs {
|
||||
eprintln!("{}:{}", config_path.display(), err);
|
||||
message!("{}:{}", config_path.display(), err);
|
||||
}
|
||||
}
|
||||
debug!(
|
||||
log::debug!(
|
||||
"{}: arguments loaded from config file: {:?}",
|
||||
config_path.display(), args);
|
||||
config_path.display(),
|
||||
args
|
||||
);
|
||||
args
|
||||
}
|
||||
|
||||
@@ -55,11 +58,11 @@ pub fn args(no_messages: bool) -> Vec<OsString> {
|
||||
/// for each line in addition to successfully parsed arguments.
|
||||
fn parse<P: AsRef<Path>>(
|
||||
path: P,
|
||||
) -> Result<(Vec<OsString>, Vec<Box<Error>>)> {
|
||||
) -> Result<(Vec<OsString>, Vec<Box<dyn Error>>)> {
|
||||
let path = path.as_ref();
|
||||
match File::open(&path) {
|
||||
Ok(file) => parse_reader(file),
|
||||
Err(err) => errored!("{}: {}", path.display(), err),
|
||||
Err(err) => Err(From::from(format!("{}: {}", path.display(), err))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,71 +79,39 @@ fn parse<P: AsRef<Path>>(
|
||||
/// in addition to successfully parsed arguments.
|
||||
fn parse_reader<R: io::Read>(
|
||||
rdr: R,
|
||||
) -> Result<(Vec<OsString>, Vec<Box<Error>>)> {
|
||||
) -> Result<(Vec<OsString>, Vec<Box<dyn Error>>)> {
|
||||
let mut bufrdr = io::BufReader::new(rdr);
|
||||
let (mut args, mut errs) = (vec![], vec![]);
|
||||
let mut line = vec![];
|
||||
let mut line_number = 0;
|
||||
while {
|
||||
line.clear();
|
||||
bufrdr.for_byte_line_with_terminator(|line| {
|
||||
line_number += 1;
|
||||
bufrdr.read_until(b'\n', &mut line)? > 0
|
||||
} {
|
||||
trim(&mut line);
|
||||
|
||||
let line = line.trim();
|
||||
if line.is_empty() || line[0] == b'#' {
|
||||
continue;
|
||||
return Ok(true);
|
||||
}
|
||||
match bytes_to_os_string(&line) {
|
||||
match line.to_os_str() {
|
||||
Ok(osstr) => {
|
||||
args.push(osstr);
|
||||
args.push(osstr.to_os_string());
|
||||
}
|
||||
Err(err) => {
|
||||
errs.push(format!("{}: {}", line_number, err).into());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
})?;
|
||||
Ok((args, errs))
|
||||
}
|
||||
|
||||
/// Trim the given bytes of whitespace according to the ASCII definition.
|
||||
fn trim(x: &mut Vec<u8>) {
|
||||
let upto = x.iter().take_while(|b| is_space(**b)).count();
|
||||
x.drain(..upto);
|
||||
let revto = x.len() - x.iter().rev().take_while(|b| is_space(**b)).count();
|
||||
x.drain(revto..);
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given byte is an ASCII space character.
|
||||
fn is_space(b: u8) -> bool {
|
||||
b == b'\t'
|
||||
|| b == b'\n'
|
||||
|| b == b'\x0B'
|
||||
|| b == b'\x0C'
|
||||
|| b == b'\r'
|
||||
|| b == b' '
|
||||
}
|
||||
|
||||
/// On Unix, get an OsString from raw bytes.
|
||||
#[cfg(unix)]
|
||||
fn bytes_to_os_string(bytes: &[u8]) -> Result<OsString> {
|
||||
use std::os::unix::ffi::OsStringExt;
|
||||
Ok(OsString::from_vec(bytes.to_vec()))
|
||||
}
|
||||
|
||||
/// On non-Unix (like Windows), require UTF-8.
|
||||
#[cfg(not(unix))]
|
||||
fn bytes_to_os_string(bytes: &[u8]) -> Result<OsString> {
|
||||
String::from_utf8(bytes.to_vec()).map(OsString::from).map_err(From::from)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ffi::OsString;
|
||||
use super::parse_reader;
|
||||
use std::ffi::OsString;
|
||||
|
||||
#[test]
|
||||
fn basic() {
|
||||
let (args, errs) = parse_reader(&b"\
|
||||
let (args, errs) = parse_reader(
|
||||
&b"\
|
||||
# Test
|
||||
--context=0
|
||||
--smart-case
|
||||
@@ -149,13 +120,13 @@ mod tests {
|
||||
|
||||
# --bar
|
||||
--foo
|
||||
"[..]).unwrap();
|
||||
"[..],
|
||||
)
|
||||
.unwrap();
|
||||
assert!(errs.is_empty());
|
||||
let args: Vec<String> =
|
||||
args.into_iter().map(|s| s.into_string().unwrap()).collect();
|
||||
assert_eq!(args, vec![
|
||||
"--context=0", "--smart-case", "-u", "--foo",
|
||||
]);
|
||||
assert_eq!(args, vec!["--context=0", "--smart-case", "-u", "--foo",]);
|
||||
}
|
||||
|
||||
// We test that we can handle invalid UTF-8 on Unix-like systems.
|
||||
@@ -164,32 +135,38 @@ mod tests {
|
||||
fn error() {
|
||||
use std::os::unix::ffi::OsStringExt;
|
||||
|
||||
let (args, errs) = parse_reader(&b"\
|
||||
let (args, errs) = parse_reader(
|
||||
&b"\
|
||||
quux
|
||||
foo\xFFbar
|
||||
baz
|
||||
"[..]).unwrap();
|
||||
"[..],
|
||||
)
|
||||
.unwrap();
|
||||
assert!(errs.is_empty());
|
||||
assert_eq!(args, vec![
|
||||
assert_eq!(
|
||||
args,
|
||||
vec![
|
||||
OsString::from("quux"),
|
||||
OsString::from_vec(b"foo\xFFbar".to_vec()),
|
||||
OsString::from("baz"),
|
||||
]);
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
// ... but test that invalid UTF-8 fails on Windows.
|
||||
#[test]
|
||||
#[cfg(not(unix))]
|
||||
fn error() {
|
||||
let (args, errs) = parse_reader(&b"\
|
||||
let (args, errs) = parse_reader(
|
||||
&b"\
|
||||
quux
|
||||
foo\xFFbar
|
||||
baz
|
||||
"[..]).unwrap();
|
||||
"[..],
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(errs.len(), 1);
|
||||
assert_eq!(args, vec![
|
||||
OsString::from("quux"),
|
||||
OsString::from("baz"),
|
||||
]);
|
||||
assert_eq!(args, vec![OsString::from("quux"), OsString::from("baz"),]);
|
||||
}
|
||||
}
|
||||
@@ -24,29 +24,40 @@ impl Logger {
|
||||
}
|
||||
|
||||
impl Log for Logger {
|
||||
fn enabled(&self, _: &log::Metadata) -> bool {
|
||||
fn enabled(&self, _: &log::Metadata<'_>) -> bool {
|
||||
// We set the log level via log::set_max_level, so we don't need to
|
||||
// implement filtering here.
|
||||
true
|
||||
}
|
||||
|
||||
fn log(&self, record: &log::Record) {
|
||||
fn log(&self, record: &log::Record<'_>) {
|
||||
match (record.file(), record.line()) {
|
||||
(Some(file), Some(line)) => {
|
||||
eprintln!(
|
||||
"{}/{}/{}:{}: {}",
|
||||
record.level(), record.target(),
|
||||
file, line, record.args());
|
||||
"{}|{}|{}:{}: {}",
|
||||
record.level(),
|
||||
record.target(),
|
||||
file,
|
||||
line,
|
||||
record.args()
|
||||
);
|
||||
}
|
||||
(Some(file), None) => {
|
||||
eprintln!(
|
||||
"{}/{}/{}: {}",
|
||||
record.level(), record.target(), file, record.args());
|
||||
"{}|{}|{}: {}",
|
||||
record.level(),
|
||||
record.target(),
|
||||
file,
|
||||
record.args()
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
eprintln!(
|
||||
"{}/{}: {}",
|
||||
record.level(), record.target(), record.args());
|
||||
"{}|{}: {}",
|
||||
record.level(),
|
||||
record.target(),
|
||||
record.args()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
344
crates/core/main.rs
Normal file
344
crates/core/main.rs
Normal file
@@ -0,0 +1,344 @@
|
||||
use std::error;
|
||||
use std::io::{self, Write};
|
||||
use std::process;
|
||||
use std::sync::Mutex;
|
||||
use std::time::Instant;
|
||||
|
||||
use ignore::WalkState;
|
||||
|
||||
use args::Args;
|
||||
use subject::Subject;
|
||||
|
||||
#[macro_use]
|
||||
mod messages;
|
||||
|
||||
mod app;
|
||||
mod args;
|
||||
mod config;
|
||||
mod logger;
|
||||
mod path_printer;
|
||||
mod search;
|
||||
mod subject;
|
||||
|
||||
// Since Rust no longer uses jemalloc by default, ripgrep will, by default,
|
||||
// use the system allocator. On Linux, this would normally be glibc's
|
||||
// allocator, which is pretty good. In particular, ripgrep does not have a
|
||||
// particularly allocation heavy workload, so there really isn't much
|
||||
// difference (for ripgrep's purposes) between glibc's allocator and jemalloc.
|
||||
//
|
||||
// However, when ripgrep is built with musl, this means ripgrep will use musl's
|
||||
// allocator, which appears to be substantially worse. (musl's goal is not to
|
||||
// have the fastest version of everything. Its goal is to be small and amenable
|
||||
// to static compilation.) Even though ripgrep isn't particularly allocation
|
||||
// heavy, musl's allocator appears to slow down ripgrep quite a bit. Therefore,
|
||||
// when building with musl, we use jemalloc.
|
||||
//
|
||||
// We don't unconditionally use jemalloc because it can be nice to use the
|
||||
// system's default allocator by default. Moreover, jemalloc seems to increase
|
||||
// compilation times by a bit.
|
||||
//
|
||||
// Moreover, we only do this on 64-bit systems since jemalloc doesn't support
|
||||
// i686.
|
||||
#[cfg(all(target_env = "musl", target_pointer_width = "64"))]
|
||||
#[global_allocator]
|
||||
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
||||
|
||||
type Result<T> = ::std::result::Result<T, Box<dyn error::Error>>;
|
||||
|
||||
fn main() {
|
||||
if let Err(err) = Args::parse().and_then(try_main) {
|
||||
eprintln!("{}", err);
|
||||
process::exit(2);
|
||||
}
|
||||
}
|
||||
|
||||
fn try_main(args: Args) -> Result<()> {
|
||||
use args::Command::*;
|
||||
|
||||
let matched = match args.command() {
|
||||
Search => search(&args),
|
||||
SearchParallel => search_parallel(&args),
|
||||
SearchNever => Ok(false),
|
||||
Files => files(&args),
|
||||
FilesParallel => files_parallel(&args),
|
||||
Types => types(&args),
|
||||
PCRE2Version => pcre2_version(&args),
|
||||
}?;
|
||||
if matched && (args.quiet() || !messages::errored()) {
|
||||
process::exit(0)
|
||||
} else if messages::errored() {
|
||||
process::exit(2)
|
||||
} else {
|
||||
process::exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
/// The top-level entry point for single-threaded search. This recursively
|
||||
/// steps through the file list (current directory by default) and searches
|
||||
/// each file sequentially.
|
||||
fn search(args: &Args) -> Result<bool> {
|
||||
let started_at = Instant::now();
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let subject_builder = args.subject_builder();
|
||||
let mut stats = args.stats()?;
|
||||
let mut searcher = args.search_worker(args.stdout())?;
|
||||
let mut matched = false;
|
||||
let mut searched = false;
|
||||
|
||||
for result in args.walker()? {
|
||||
let subject = match subject_builder.build_from_result(result) {
|
||||
Some(subject) => subject,
|
||||
None => continue,
|
||||
};
|
||||
searched = true;
|
||||
let search_result = match searcher.search(&subject) {
|
||||
Ok(search_result) => search_result,
|
||||
Err(err) => {
|
||||
// A broken pipe means graceful termination.
|
||||
if err.kind() == io::ErrorKind::BrokenPipe {
|
||||
break;
|
||||
}
|
||||
err_message!("{}: {}", subject.path().display(), err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
matched = matched || search_result.has_match();
|
||||
if let Some(ref mut stats) = stats {
|
||||
*stats += search_result.stats().unwrap();
|
||||
}
|
||||
if matched && quit_after_match {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if args.using_default_path() && !searched {
|
||||
eprint_nothing_searched();
|
||||
}
|
||||
if let Some(ref stats) = stats {
|
||||
let elapsed = Instant::now().duration_since(started_at);
|
||||
// We don't care if we couldn't print this successfully.
|
||||
let _ = searcher.print_stats(elapsed, stats);
|
||||
}
|
||||
Ok(matched)
|
||||
}
|
||||
|
||||
/// The top-level entry point for multi-threaded search. The parallelism is
|
||||
/// itself achieved by the recursive directory traversal. All we need to do is
|
||||
/// feed it a worker for performing a search on each file.
|
||||
fn search_parallel(args: &Args) -> Result<bool> {
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let started_at = Instant::now();
|
||||
let subject_builder = args.subject_builder();
|
||||
let bufwtr = args.buffer_writer()?;
|
||||
let stats = args.stats()?.map(Mutex::new);
|
||||
let matched = AtomicBool::new(false);
|
||||
let searched = AtomicBool::new(false);
|
||||
let mut searcher_err = None;
|
||||
args.walker_parallel()?.run(|| {
|
||||
let bufwtr = &bufwtr;
|
||||
let stats = &stats;
|
||||
let matched = &matched;
|
||||
let searched = &searched;
|
||||
let subject_builder = &subject_builder;
|
||||
let mut searcher = match args.search_worker(bufwtr.buffer()) {
|
||||
Ok(searcher) => searcher,
|
||||
Err(err) => {
|
||||
searcher_err = Some(err);
|
||||
return Box::new(move |_| WalkState::Quit);
|
||||
}
|
||||
};
|
||||
|
||||
Box::new(move |result| {
|
||||
let subject = match subject_builder.build_from_result(result) {
|
||||
Some(subject) => subject,
|
||||
None => return WalkState::Continue,
|
||||
};
|
||||
searched.store(true, SeqCst);
|
||||
searcher.printer().get_mut().clear();
|
||||
let search_result = match searcher.search(&subject) {
|
||||
Ok(search_result) => search_result,
|
||||
Err(err) => {
|
||||
err_message!("{}: {}", subject.path().display(), err);
|
||||
return WalkState::Continue;
|
||||
}
|
||||
};
|
||||
if search_result.has_match() {
|
||||
matched.store(true, SeqCst);
|
||||
}
|
||||
if let Some(ref locked_stats) = *stats {
|
||||
let mut stats = locked_stats.lock().unwrap();
|
||||
*stats += search_result.stats().unwrap();
|
||||
}
|
||||
if let Err(err) = bufwtr.print(searcher.printer().get_mut()) {
|
||||
// A broken pipe means graceful termination.
|
||||
if err.kind() == io::ErrorKind::BrokenPipe {
|
||||
return WalkState::Quit;
|
||||
}
|
||||
// Otherwise, we continue on our merry way.
|
||||
err_message!("{}: {}", subject.path().display(), err);
|
||||
}
|
||||
if matched.load(SeqCst) && quit_after_match {
|
||||
WalkState::Quit
|
||||
} else {
|
||||
WalkState::Continue
|
||||
}
|
||||
})
|
||||
});
|
||||
if let Some(err) = searcher_err.take() {
|
||||
return Err(err);
|
||||
}
|
||||
if args.using_default_path() && !searched.load(SeqCst) {
|
||||
eprint_nothing_searched();
|
||||
}
|
||||
if let Some(ref locked_stats) = stats {
|
||||
let elapsed = Instant::now().duration_since(started_at);
|
||||
let stats = locked_stats.lock().unwrap();
|
||||
let mut searcher = args.search_worker(args.stdout())?;
|
||||
// We don't care if we couldn't print this successfully.
|
||||
let _ = searcher.print_stats(elapsed, &stats);
|
||||
}
|
||||
Ok(matched.load(SeqCst))
|
||||
}
|
||||
|
||||
fn eprint_nothing_searched() {
|
||||
err_message!(
|
||||
"No files were searched, which means ripgrep probably \
|
||||
applied a filter you didn't expect.\n\
|
||||
Running with --debug will show why files are being skipped."
|
||||
);
|
||||
}
|
||||
|
||||
/// The top-level entry point for listing files without searching them. This
|
||||
/// recursively steps through the file list (current directory by default) and
|
||||
/// prints each path sequentially using a single thread.
|
||||
fn files(args: &Args) -> Result<bool> {
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let subject_builder = args.subject_builder();
|
||||
let mut matched = false;
|
||||
let mut path_printer = args.path_printer(args.stdout())?;
|
||||
for result in args.walker()? {
|
||||
let subject = match subject_builder.build_from_result(result) {
|
||||
Some(subject) => subject,
|
||||
None => continue,
|
||||
};
|
||||
matched = true;
|
||||
if quit_after_match {
|
||||
break;
|
||||
}
|
||||
if let Err(err) = path_printer.write_path(subject.path()) {
|
||||
// A broken pipe means graceful termination.
|
||||
if err.kind() == io::ErrorKind::BrokenPipe {
|
||||
break;
|
||||
}
|
||||
// Otherwise, we have some other error that's preventing us from
|
||||
// writing to stdout, so we should bubble it up.
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
Ok(matched)
|
||||
}
|
||||
|
||||
/// The top-level entry point for listing files without searching them. This
|
||||
/// recursively steps through the file list (current directory by default) and
|
||||
/// prints each path sequentially using multiple threads.
|
||||
fn files_parallel(args: &Args) -> Result<bool> {
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
|
||||
let quit_after_match = args.quit_after_match()?;
|
||||
let subject_builder = args.subject_builder();
|
||||
let mut path_printer = args.path_printer(args.stdout())?;
|
||||
let matched = AtomicBool::new(false);
|
||||
let (tx, rx) = mpsc::channel::<Subject>();
|
||||
|
||||
let print_thread = thread::spawn(move || -> io::Result<()> {
|
||||
for subject in rx.iter() {
|
||||
path_printer.write_path(subject.path())?;
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
args.walker_parallel()?.run(|| {
|
||||
let subject_builder = &subject_builder;
|
||||
let matched = &matched;
|
||||
let tx = tx.clone();
|
||||
|
||||
Box::new(move |result| {
|
||||
let subject = match subject_builder.build_from_result(result) {
|
||||
Some(subject) => subject,
|
||||
None => return WalkState::Continue,
|
||||
};
|
||||
matched.store(true, SeqCst);
|
||||
if quit_after_match {
|
||||
WalkState::Quit
|
||||
} else {
|
||||
match tx.send(subject) {
|
||||
Ok(_) => WalkState::Continue,
|
||||
Err(_) => WalkState::Quit,
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
drop(tx);
|
||||
if let Err(err) = print_thread.join().unwrap() {
|
||||
// A broken pipe means graceful termination, so fall through.
|
||||
// Otherwise, something bad happened while writing to stdout, so bubble
|
||||
// it up.
|
||||
if err.kind() != io::ErrorKind::BrokenPipe {
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
Ok(matched.load(SeqCst))
|
||||
}
|
||||
|
||||
/// The top-level entry point for --type-list.
|
||||
fn types(args: &Args) -> Result<bool> {
|
||||
let mut count = 0;
|
||||
let mut stdout = args.stdout();
|
||||
for def in args.type_defs()? {
|
||||
count += 1;
|
||||
stdout.write_all(def.name().as_bytes())?;
|
||||
stdout.write_all(b": ")?;
|
||||
|
||||
let mut first = true;
|
||||
for glob in def.globs() {
|
||||
if !first {
|
||||
stdout.write_all(b", ")?;
|
||||
}
|
||||
stdout.write_all(glob.as_bytes())?;
|
||||
first = false;
|
||||
}
|
||||
stdout.write_all(b"\n")?;
|
||||
}
|
||||
Ok(count > 0)
|
||||
}
|
||||
|
||||
/// The top-level entry point for --pcre2-version.
|
||||
fn pcre2_version(args: &Args) -> Result<bool> {
|
||||
#[cfg(feature = "pcre2")]
|
||||
fn imp(args: &Args) -> Result<bool> {
|
||||
use grep::pcre2;
|
||||
|
||||
let mut stdout = args.stdout();
|
||||
|
||||
let (major, minor) = pcre2::version();
|
||||
writeln!(stdout, "PCRE2 {}.{} is available", major, minor)?;
|
||||
|
||||
if cfg!(target_pointer_width = "64") && pcre2::is_jit_available() {
|
||||
writeln!(stdout, "JIT is available")?;
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "pcre2"))]
|
||||
fn imp(args: &Args) -> Result<bool> {
|
||||
let mut stdout = args.stdout();
|
||||
writeln!(stdout, "PCRE2 is not available in this build of ripgrep.")?;
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
imp(args)
|
||||
}
|
||||
74
crates/core/messages.rs
Normal file
74
crates/core/messages.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
static MESSAGES: AtomicBool = AtomicBool::new(false);
|
||||
static IGNORE_MESSAGES: AtomicBool = AtomicBool::new(false);
|
||||
static ERRORED: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
/// Emit a non-fatal error message, unless messages were disabled.
|
||||
#[macro_export]
|
||||
macro_rules! message {
|
||||
($($tt:tt)*) => {
|
||||
if crate::messages::messages() {
|
||||
eprintln!($($tt)*);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Like message, but sets ripgrep's "errored" flag, which controls the exit
|
||||
/// status.
|
||||
#[macro_export]
|
||||
macro_rules! err_message {
|
||||
($($tt:tt)*) => {
|
||||
crate::messages::set_errored();
|
||||
message!($($tt)*);
|
||||
}
|
||||
}
|
||||
|
||||
/// Emit a non-fatal ignore-related error message (like a parse error), unless
|
||||
/// ignore-messages were disabled.
|
||||
#[macro_export]
|
||||
macro_rules! ignore_message {
|
||||
($($tt:tt)*) => {
|
||||
if crate::messages::messages() && crate::messages::ignore_messages() {
|
||||
eprintln!($($tt)*);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if messages should be shown.
|
||||
pub fn messages() -> bool {
|
||||
MESSAGES.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Set whether messages should be shown or not.
|
||||
///
|
||||
/// By default, they are not shown.
|
||||
pub fn set_messages(yes: bool) {
|
||||
MESSAGES.store(yes, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Returns true if and only if "ignore" related messages should be shown.
|
||||
pub fn ignore_messages() -> bool {
|
||||
IGNORE_MESSAGES.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Set whether "ignore" related messages should be shown or not.
|
||||
///
|
||||
/// By default, they are not shown.
|
||||
///
|
||||
/// Note that this is overridden if `messages` is disabled. Namely, if
|
||||
/// `messages` is disabled, then "ignore" messages are never shown, regardless
|
||||
/// of this setting.
|
||||
pub fn set_ignore_messages(yes: bool) {
|
||||
IGNORE_MESSAGES.store(yes, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Returns true if and only if ripgrep came across a non-fatal error.
|
||||
pub fn errored() -> bool {
|
||||
ERRORED.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Indicate that ripgrep has come across a non-fatal error.
|
||||
pub fn set_errored() {
|
||||
ERRORED.store(true, Ordering::SeqCst);
|
||||
}
|
||||
98
crates/core/path_printer.rs
Normal file
98
crates/core/path_printer.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
|
||||
use grep::printer::{ColorSpecs, PrinterPath};
|
||||
use termcolor::WriteColor;
|
||||
|
||||
/// A configuration for describing how paths should be written.
|
||||
#[derive(Clone, Debug)]
|
||||
struct Config {
|
||||
colors: ColorSpecs,
|
||||
separator: Option<u8>,
|
||||
terminator: u8,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
colors: ColorSpecs::default(),
|
||||
separator: None,
|
||||
terminator: b'\n',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for constructing things to search over.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PathPrinterBuilder {
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl PathPrinterBuilder {
|
||||
/// Return a new subject builder with a default configuration.
|
||||
pub fn new() -> PathPrinterBuilder {
|
||||
PathPrinterBuilder { config: Config::default() }
|
||||
}
|
||||
|
||||
/// Create a new path printer with the current configuration that writes
|
||||
/// paths to the given writer.
|
||||
pub fn build<W: WriteColor>(&self, wtr: W) -> PathPrinter<W> {
|
||||
PathPrinter { config: self.config.clone(), wtr }
|
||||
}
|
||||
|
||||
/// Set the color specification for this printer.
|
||||
///
|
||||
/// Currently, only the `path` component of the given specification is
|
||||
/// used.
|
||||
pub fn color_specs(
|
||||
&mut self,
|
||||
specs: ColorSpecs,
|
||||
) -> &mut PathPrinterBuilder {
|
||||
self.config.colors = specs;
|
||||
self
|
||||
}
|
||||
|
||||
/// A path separator.
|
||||
///
|
||||
/// When provided, the path's default separator will be replaced with
|
||||
/// the given separator.
|
||||
///
|
||||
/// This is not set by default, and the system's default path separator
|
||||
/// will be used.
|
||||
pub fn separator(&mut self, sep: Option<u8>) -> &mut PathPrinterBuilder {
|
||||
self.config.separator = sep;
|
||||
self
|
||||
}
|
||||
|
||||
/// A path terminator.
|
||||
///
|
||||
/// When printing a path, it will be by terminated by the given byte.
|
||||
///
|
||||
/// This is set to `\n` by default.
|
||||
pub fn terminator(&mut self, terminator: u8) -> &mut PathPrinterBuilder {
|
||||
self.config.terminator = terminator;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A printer for emitting paths to a writer, with optional color support.
|
||||
#[derive(Debug)]
|
||||
pub struct PathPrinter<W> {
|
||||
config: Config,
|
||||
wtr: W,
|
||||
}
|
||||
|
||||
impl<W: WriteColor> PathPrinter<W> {
|
||||
/// Write the given path to the underlying writer.
|
||||
pub fn write_path(&mut self, path: &Path) -> io::Result<()> {
|
||||
let ppath = PrinterPath::with_separator(path, self.config.separator);
|
||||
if !self.wtr.supports_color() {
|
||||
self.wtr.write_all(ppath.as_bytes())?;
|
||||
} else {
|
||||
self.wtr.set_color(self.config.colors.path())?;
|
||||
self.wtr.write_all(ppath.as_bytes())?;
|
||||
self.wtr.reset()?;
|
||||
}
|
||||
self.wtr.write_all(&[self.config.terminator])
|
||||
}
|
||||
}
|
||||
548
crates/core/search.rs
Normal file
548
crates/core/search.rs
Normal file
@@ -0,0 +1,548 @@
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::{Command, Stdio};
|
||||
use std::time::Duration;
|
||||
|
||||
use grep::cli;
|
||||
use grep::matcher::Matcher;
|
||||
#[cfg(feature = "pcre2")]
|
||||
use grep::pcre2::RegexMatcher as PCRE2RegexMatcher;
|
||||
use grep::printer::{Standard, Stats, Summary, JSON};
|
||||
use grep::regex::RegexMatcher as RustRegexMatcher;
|
||||
use grep::searcher::{BinaryDetection, Searcher};
|
||||
use ignore::overrides::Override;
|
||||
use serde_json as json;
|
||||
use serde_json::json;
|
||||
use termcolor::WriteColor;
|
||||
|
||||
use crate::subject::Subject;
|
||||
|
||||
/// The configuration for the search worker. Among a few other things, the
|
||||
/// configuration primarily controls the way we show search results to users
|
||||
/// at a very high level.
|
||||
#[derive(Clone, Debug)]
|
||||
struct Config {
|
||||
json_stats: bool,
|
||||
preprocessor: Option<PathBuf>,
|
||||
preprocessor_globs: Override,
|
||||
search_zip: bool,
|
||||
binary_implicit: BinaryDetection,
|
||||
binary_explicit: BinaryDetection,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
json_stats: false,
|
||||
preprocessor: None,
|
||||
preprocessor_globs: Override::empty(),
|
||||
search_zip: false,
|
||||
binary_implicit: BinaryDetection::none(),
|
||||
binary_explicit: BinaryDetection::none(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for configuring and constructing a search worker.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SearchWorkerBuilder {
|
||||
config: Config,
|
||||
command_builder: cli::CommandReaderBuilder,
|
||||
decomp_builder: cli::DecompressionReaderBuilder,
|
||||
}
|
||||
|
||||
impl Default for SearchWorkerBuilder {
|
||||
fn default() -> SearchWorkerBuilder {
|
||||
SearchWorkerBuilder::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl SearchWorkerBuilder {
|
||||
/// Create a new builder for configuring and constructing a search worker.
|
||||
pub fn new() -> SearchWorkerBuilder {
|
||||
let mut cmd_builder = cli::CommandReaderBuilder::new();
|
||||
cmd_builder.async_stderr(true);
|
||||
|
||||
let mut decomp_builder = cli::DecompressionReaderBuilder::new();
|
||||
decomp_builder.async_stderr(true);
|
||||
|
||||
SearchWorkerBuilder {
|
||||
config: Config::default(),
|
||||
command_builder: cmd_builder,
|
||||
decomp_builder,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new search worker using the given searcher, matcher and
|
||||
/// printer.
|
||||
pub fn build<W: WriteColor>(
|
||||
&self,
|
||||
matcher: PatternMatcher,
|
||||
searcher: Searcher,
|
||||
printer: Printer<W>,
|
||||
) -> SearchWorker<W> {
|
||||
let config = self.config.clone();
|
||||
let command_builder = self.command_builder.clone();
|
||||
let decomp_builder = self.decomp_builder.clone();
|
||||
SearchWorker {
|
||||
config,
|
||||
command_builder,
|
||||
decomp_builder,
|
||||
matcher,
|
||||
searcher,
|
||||
printer,
|
||||
}
|
||||
}
|
||||
|
||||
/// Forcefully use JSON to emit statistics, even if the underlying printer
|
||||
/// is not the JSON printer.
|
||||
///
|
||||
/// This is useful for implementing flag combinations like
|
||||
/// `--json --quiet`, which uses the summary printer for implementing
|
||||
/// `--quiet` but still wants to emit summary statistics, which should
|
||||
/// be JSON formatted because of the `--json` flag.
|
||||
pub fn json_stats(&mut self, yes: bool) -> &mut SearchWorkerBuilder {
|
||||
self.config.json_stats = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the path to a preprocessor command.
|
||||
///
|
||||
/// When this is set, instead of searching files directly, the given
|
||||
/// command will be run with the file path as the first argument, and the
|
||||
/// output of that command will be searched instead.
|
||||
pub fn preprocessor(
|
||||
&mut self,
|
||||
cmd: Option<PathBuf>,
|
||||
) -> crate::Result<&mut SearchWorkerBuilder> {
|
||||
if let Some(ref prog) = cmd {
|
||||
let bin = cli::resolve_binary(prog)?;
|
||||
self.config.preprocessor = Some(bin);
|
||||
} else {
|
||||
self.config.preprocessor = None;
|
||||
}
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Set the globs for determining which files should be run through the
|
||||
/// preprocessor. By default, with no globs and a preprocessor specified,
|
||||
/// every file is run through the preprocessor.
|
||||
pub fn preprocessor_globs(
|
||||
&mut self,
|
||||
globs: Override,
|
||||
) -> &mut SearchWorkerBuilder {
|
||||
self.config.preprocessor_globs = globs;
|
||||
self
|
||||
}
|
||||
|
||||
/// Enable the decompression and searching of common compressed files.
|
||||
///
|
||||
/// When enabled, if a particular file path is recognized as a compressed
|
||||
/// file, then it is decompressed before searching.
|
||||
///
|
||||
/// Note that if a preprocessor command is set, then it overrides this
|
||||
/// setting.
|
||||
pub fn search_zip(&mut self, yes: bool) -> &mut SearchWorkerBuilder {
|
||||
self.config.search_zip = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the binary detection that should be used when searching files
|
||||
/// found via a recursive directory search.
|
||||
///
|
||||
/// Generally, this binary detection may be `BinaryDetection::quit` if
|
||||
/// we want to skip binary files completely.
|
||||
///
|
||||
/// By default, no binary detection is performed.
|
||||
pub fn binary_detection_implicit(
|
||||
&mut self,
|
||||
detection: BinaryDetection,
|
||||
) -> &mut SearchWorkerBuilder {
|
||||
self.config.binary_implicit = detection;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the binary detection that should be used when searching files
|
||||
/// explicitly supplied by an end user.
|
||||
///
|
||||
/// Generally, this binary detection should NOT be `BinaryDetection::quit`,
|
||||
/// since we never want to automatically filter files supplied by the end
|
||||
/// user.
|
||||
///
|
||||
/// By default, no binary detection is performed.
|
||||
pub fn binary_detection_explicit(
|
||||
&mut self,
|
||||
detection: BinaryDetection,
|
||||
) -> &mut SearchWorkerBuilder {
|
||||
self.config.binary_explicit = detection;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of executing a search.
|
||||
///
|
||||
/// Generally speaking, the "result" of a search is sent to a printer, which
|
||||
/// writes results to an underlying writer such as stdout or a file. However,
|
||||
/// every search also has some aggregate statistics or meta data that may be
|
||||
/// useful to higher level routines.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct SearchResult {
|
||||
has_match: bool,
|
||||
stats: Option<Stats>,
|
||||
}
|
||||
|
||||
impl SearchResult {
|
||||
/// Whether the search found a match or not.
|
||||
pub fn has_match(&self) -> bool {
|
||||
self.has_match
|
||||
}
|
||||
|
||||
/// Return aggregate search statistics for a single search, if available.
|
||||
///
|
||||
/// It can be expensive to compute statistics, so these are only present
|
||||
/// if explicitly enabled in the printer provided by the caller.
|
||||
pub fn stats(&self) -> Option<&Stats> {
|
||||
self.stats.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// The pattern matcher used by a search worker.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum PatternMatcher {
|
||||
RustRegex(RustRegexMatcher),
|
||||
#[cfg(feature = "pcre2")]
|
||||
PCRE2(PCRE2RegexMatcher),
|
||||
}
|
||||
|
||||
/// The printer used by a search worker.
|
||||
///
|
||||
/// The `W` type parameter refers to the type of the underlying writer.
|
||||
#[derive(Debug)]
|
||||
pub enum Printer<W> {
|
||||
/// Use the standard printer, which supports the classic grep-like format.
|
||||
Standard(Standard<W>),
|
||||
/// Use the summary printer, which supports aggregate displays of search
|
||||
/// results.
|
||||
Summary(Summary<W>),
|
||||
/// A JSON printer, which emits results in the JSON Lines format.
|
||||
JSON(JSON<W>),
|
||||
}
|
||||
|
||||
impl<W: WriteColor> Printer<W> {
|
||||
fn print_stats(
|
||||
&mut self,
|
||||
total_duration: Duration,
|
||||
stats: &Stats,
|
||||
) -> io::Result<()> {
|
||||
match *self {
|
||||
Printer::JSON(_) => self.print_stats_json(total_duration, stats),
|
||||
Printer::Standard(_) | Printer::Summary(_) => {
|
||||
self.print_stats_human(total_duration, stats)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn print_stats_human(
|
||||
&mut self,
|
||||
total_duration: Duration,
|
||||
stats: &Stats,
|
||||
) -> io::Result<()> {
|
||||
write!(
|
||||
self.get_mut(),
|
||||
"
|
||||
{matches} matches
|
||||
{lines} matched lines
|
||||
{searches_with_match} files contained matches
|
||||
{searches} files searched
|
||||
{bytes_printed} bytes printed
|
||||
{bytes_searched} bytes searched
|
||||
{search_time:0.6} seconds spent searching
|
||||
{process_time:0.6} seconds
|
||||
",
|
||||
matches = stats.matches(),
|
||||
lines = stats.matched_lines(),
|
||||
searches_with_match = stats.searches_with_match(),
|
||||
searches = stats.searches(),
|
||||
bytes_printed = stats.bytes_printed(),
|
||||
bytes_searched = stats.bytes_searched(),
|
||||
search_time = fractional_seconds(stats.elapsed()),
|
||||
process_time = fractional_seconds(total_duration)
|
||||
)
|
||||
}
|
||||
|
||||
fn print_stats_json(
|
||||
&mut self,
|
||||
total_duration: Duration,
|
||||
stats: &Stats,
|
||||
) -> io::Result<()> {
|
||||
// We specifically match the format laid out by the JSON printer in
|
||||
// the grep-printer crate. We simply "extend" it with the 'summary'
|
||||
// message type.
|
||||
let fractional = fractional_seconds(total_duration);
|
||||
json::to_writer(
|
||||
self.get_mut(),
|
||||
&json!({
|
||||
"type": "summary",
|
||||
"data": {
|
||||
"stats": stats,
|
||||
"elapsed_total": {
|
||||
"secs": total_duration.as_secs(),
|
||||
"nanos": total_duration.subsec_nanos(),
|
||||
"human": format!("{:0.6}s", fractional),
|
||||
},
|
||||
}
|
||||
}),
|
||||
)?;
|
||||
write!(self.get_mut(), "\n")
|
||||
}
|
||||
|
||||
/// Return a mutable reference to the underlying printer's writer.
|
||||
pub fn get_mut(&mut self) -> &mut W {
|
||||
match *self {
|
||||
Printer::Standard(ref mut p) => p.get_mut(),
|
||||
Printer::Summary(ref mut p) => p.get_mut(),
|
||||
Printer::JSON(ref mut p) => p.get_mut(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A worker for executing searches.
|
||||
///
|
||||
/// It is intended for a single worker to execute many searches, and is
|
||||
/// generally intended to be used from a single thread. When searching using
|
||||
/// multiple threads, it is better to create a new worker for each thread.
|
||||
#[derive(Debug)]
|
||||
pub struct SearchWorker<W> {
|
||||
config: Config,
|
||||
command_builder: cli::CommandReaderBuilder,
|
||||
decomp_builder: cli::DecompressionReaderBuilder,
|
||||
matcher: PatternMatcher,
|
||||
searcher: Searcher,
|
||||
printer: Printer<W>,
|
||||
}
|
||||
|
||||
impl<W: WriteColor> SearchWorker<W> {
|
||||
/// Execute a search over the given subject.
|
||||
pub fn search(&mut self, subject: &Subject) -> io::Result<SearchResult> {
|
||||
let bin = if subject.is_explicit() {
|
||||
self.config.binary_explicit.clone()
|
||||
} else {
|
||||
self.config.binary_implicit.clone()
|
||||
};
|
||||
let path = subject.path();
|
||||
log::trace!("{}: binary detection: {:?}", path.display(), bin);
|
||||
|
||||
self.searcher.set_binary_detection(bin);
|
||||
if subject.is_stdin() {
|
||||
self.search_reader(path, &mut io::stdin().lock())
|
||||
} else if self.should_preprocess(path) {
|
||||
self.search_preprocessor(path)
|
||||
} else if self.should_decompress(path) {
|
||||
self.search_decompress(path)
|
||||
} else {
|
||||
self.search_path(path)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a mutable reference to the underlying printer.
|
||||
pub fn printer(&mut self) -> &mut Printer<W> {
|
||||
&mut self.printer
|
||||
}
|
||||
|
||||
/// Print the given statistics to the underlying writer in a way that is
|
||||
/// consistent with this searcher's printer's format.
|
||||
///
|
||||
/// While `Stats` contains a duration itself, this only corresponds to the
|
||||
/// time spent searching, where as `total_duration` should roughly
|
||||
/// approximate the lifespan of the ripgrep process itself.
|
||||
pub fn print_stats(
|
||||
&mut self,
|
||||
total_duration: Duration,
|
||||
stats: &Stats,
|
||||
) -> io::Result<()> {
|
||||
if self.config.json_stats {
|
||||
self.printer().print_stats_json(total_duration, stats)
|
||||
} else {
|
||||
self.printer().print_stats(total_duration, stats)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given file path should be
|
||||
/// decompressed before searching.
|
||||
fn should_decompress(&self, path: &Path) -> bool {
|
||||
if !self.config.search_zip {
|
||||
return false;
|
||||
}
|
||||
self.decomp_builder.get_matcher().has_command(path)
|
||||
}
|
||||
|
||||
/// Returns true if and only if the given file path should be run through
|
||||
/// the preprocessor.
|
||||
fn should_preprocess(&self, path: &Path) -> bool {
|
||||
if !self.config.preprocessor.is_some() {
|
||||
return false;
|
||||
}
|
||||
if self.config.preprocessor_globs.is_empty() {
|
||||
return true;
|
||||
}
|
||||
!self.config.preprocessor_globs.matched(path, false).is_ignore()
|
||||
}
|
||||
|
||||
/// Search the given file path by first asking the preprocessor for the
|
||||
/// data to search instead of opening the path directly.
|
||||
fn search_preprocessor(
|
||||
&mut self,
|
||||
path: &Path,
|
||||
) -> io::Result<SearchResult> {
|
||||
let bin = self.config.preprocessor.as_ref().unwrap();
|
||||
let mut cmd = Command::new(bin);
|
||||
cmd.arg(path).stdin(Stdio::from(File::open(path)?));
|
||||
|
||||
let mut rdr = self.command_builder.build(&mut cmd).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"preprocessor command could not start: '{:?}': {}",
|
||||
cmd, err,
|
||||
),
|
||||
)
|
||||
})?;
|
||||
let result = self.search_reader(path, &mut rdr).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("preprocessor command failed: '{:?}': {}", cmd, err),
|
||||
)
|
||||
});
|
||||
let close_result = rdr.close();
|
||||
let search_result = result?;
|
||||
close_result?;
|
||||
Ok(search_result)
|
||||
}
|
||||
|
||||
/// Attempt to decompress the data at the given file path and search the
|
||||
/// result. If the given file path isn't recognized as a compressed file,
|
||||
/// then search it without doing any decompression.
|
||||
fn search_decompress(&mut self, path: &Path) -> io::Result<SearchResult> {
|
||||
let mut rdr = self.decomp_builder.build(path)?;
|
||||
let result = self.search_reader(path, &mut rdr);
|
||||
let close_result = rdr.close();
|
||||
let search_result = result?;
|
||||
close_result?;
|
||||
Ok(search_result)
|
||||
}
|
||||
|
||||
/// Search the contents of the given file path.
|
||||
fn search_path(&mut self, path: &Path) -> io::Result<SearchResult> {
|
||||
use self::PatternMatcher::*;
|
||||
|
||||
let (searcher, printer) = (&mut self.searcher, &mut self.printer);
|
||||
match self.matcher {
|
||||
RustRegex(ref m) => search_path(m, searcher, printer, path),
|
||||
#[cfg(feature = "pcre2")]
|
||||
PCRE2(ref m) => search_path(m, searcher, printer, path),
|
||||
}
|
||||
}
|
||||
|
||||
/// Executes a search on the given reader, which may or may not correspond
|
||||
/// directly to the contents of the given file path. Instead, the reader
|
||||
/// may actually cause something else to be searched (for example, when
|
||||
/// a preprocessor is set or when decompression is enabled). In those
|
||||
/// cases, the file path is used for visual purposes only.
|
||||
///
|
||||
/// Generally speaking, this method should only be used when there is no
|
||||
/// other choice. Searching via `search_path` provides more opportunities
|
||||
/// for optimizations (such as memory maps).
|
||||
fn search_reader<R: io::Read>(
|
||||
&mut self,
|
||||
path: &Path,
|
||||
rdr: &mut R,
|
||||
) -> io::Result<SearchResult> {
|
||||
use self::PatternMatcher::*;
|
||||
|
||||
let (searcher, printer) = (&mut self.searcher, &mut self.printer);
|
||||
match self.matcher {
|
||||
RustRegex(ref m) => search_reader(m, searcher, printer, path, rdr),
|
||||
#[cfg(feature = "pcre2")]
|
||||
PCRE2(ref m) => search_reader(m, searcher, printer, path, rdr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Search the contents of the given file path using the given matcher,
|
||||
/// searcher and printer.
|
||||
fn search_path<M: Matcher, W: WriteColor>(
|
||||
matcher: M,
|
||||
searcher: &mut Searcher,
|
||||
printer: &mut Printer<W>,
|
||||
path: &Path,
|
||||
) -> io::Result<SearchResult> {
|
||||
match *printer {
|
||||
Printer::Standard(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_path(&matcher, path, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: sink.stats().map(|s| s.clone()),
|
||||
})
|
||||
}
|
||||
Printer::Summary(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_path(&matcher, path, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: sink.stats().map(|s| s.clone()),
|
||||
})
|
||||
}
|
||||
Printer::JSON(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_path(&matcher, path, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: Some(sink.stats().clone()),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Search the contents of the given reader using the given matcher, searcher
|
||||
/// and printer.
|
||||
fn search_reader<M: Matcher, R: io::Read, W: WriteColor>(
|
||||
matcher: M,
|
||||
searcher: &mut Searcher,
|
||||
printer: &mut Printer<W>,
|
||||
path: &Path,
|
||||
mut rdr: R,
|
||||
) -> io::Result<SearchResult> {
|
||||
match *printer {
|
||||
Printer::Standard(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_reader(&matcher, &mut rdr, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: sink.stats().map(|s| s.clone()),
|
||||
})
|
||||
}
|
||||
Printer::Summary(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_reader(&matcher, &mut rdr, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: sink.stats().map(|s| s.clone()),
|
||||
})
|
||||
}
|
||||
Printer::JSON(ref mut p) => {
|
||||
let mut sink = p.sink_with_path(&matcher, path);
|
||||
searcher.search_reader(&matcher, &mut rdr, &mut sink)?;
|
||||
Ok(SearchResult {
|
||||
has_match: sink.has_match(),
|
||||
stats: Some(sink.stats().clone()),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the given duration as fractional seconds.
|
||||
fn fractional_seconds(duration: Duration) -> f64 {
|
||||
(duration.as_secs() as f64) + (duration.subsec_nanos() as f64 * 1e-9)
|
||||
}
|
||||
160
crates/core/subject.rs
Normal file
160
crates/core/subject.rs
Normal file
@@ -0,0 +1,160 @@
|
||||
use std::path::Path;
|
||||
|
||||
use ignore::{self, DirEntry};
|
||||
use log;
|
||||
|
||||
/// A configuration for describing how subjects should be built.
|
||||
#[derive(Clone, Debug)]
|
||||
struct Config {
|
||||
strip_dot_prefix: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config { strip_dot_prefix: false }
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for constructing things to search over.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SubjectBuilder {
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl SubjectBuilder {
|
||||
/// Return a new subject builder with a default configuration.
|
||||
pub fn new() -> SubjectBuilder {
|
||||
SubjectBuilder { config: Config::default() }
|
||||
}
|
||||
|
||||
/// Create a new subject from a possibly missing directory entry.
|
||||
///
|
||||
/// If the directory entry isn't present, then the corresponding error is
|
||||
/// logged if messages have been configured. Otherwise, if the subject is
|
||||
/// deemed searchable, then it is returned.
|
||||
pub fn build_from_result(
|
||||
&self,
|
||||
result: Result<DirEntry, ignore::Error>,
|
||||
) -> Option<Subject> {
|
||||
match result {
|
||||
Ok(dent) => self.build(dent),
|
||||
Err(err) => {
|
||||
err_message!("{}", err);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new subject using this builder's configuration.
|
||||
///
|
||||
/// If a subject could not be created or should otherwise not be searched,
|
||||
/// then this returns `None` after emitting any relevant log messages.
|
||||
pub fn build(&self, dent: DirEntry) -> Option<Subject> {
|
||||
let subj =
|
||||
Subject { dent, strip_dot_prefix: self.config.strip_dot_prefix };
|
||||
if let Some(ignore_err) = subj.dent.error() {
|
||||
ignore_message!("{}", ignore_err);
|
||||
}
|
||||
// If this entry was explicitly provided by an end user, then we always
|
||||
// want to search it.
|
||||
if subj.is_explicit() {
|
||||
return Some(subj);
|
||||
}
|
||||
// At this point, we only want to search something if it's explicitly a
|
||||
// file. This omits symlinks. (If ripgrep was configured to follow
|
||||
// symlinks, then they have already been followed by the directory
|
||||
// traversal.)
|
||||
if subj.is_file() {
|
||||
return Some(subj);
|
||||
}
|
||||
// We got nothing. Emit a debug message, but only if this isn't a
|
||||
// directory. Otherwise, emitting messages for directories is just
|
||||
// noisy.
|
||||
if !subj.is_dir() {
|
||||
log::debug!(
|
||||
"ignoring {}: failed to pass subject filter: \
|
||||
file type: {:?}, metadata: {:?}",
|
||||
subj.dent.path().display(),
|
||||
subj.dent.file_type(),
|
||||
subj.dent.metadata()
|
||||
);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// When enabled, if the subject's file path starts with `./` then it is
|
||||
/// stripped.
|
||||
///
|
||||
/// This is useful when implicitly searching the current working directory.
|
||||
pub fn strip_dot_prefix(&mut self, yes: bool) -> &mut SubjectBuilder {
|
||||
self.config.strip_dot_prefix = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A subject is a thing we want to search. Generally, a subject is either a
|
||||
/// file or stdin.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Subject {
|
||||
dent: DirEntry,
|
||||
strip_dot_prefix: bool,
|
||||
}
|
||||
|
||||
impl Subject {
|
||||
/// Return the file path corresponding to this subject.
|
||||
///
|
||||
/// If this subject corresponds to stdin, then a special `<stdin>` path
|
||||
/// is returned instead.
|
||||
pub fn path(&self) -> &Path {
|
||||
if self.strip_dot_prefix && self.dent.path().starts_with("./") {
|
||||
self.dent.path().strip_prefix("./").unwrap()
|
||||
} else {
|
||||
self.dent.path()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry corresponds to stdin.
|
||||
pub fn is_stdin(&self) -> bool {
|
||||
self.dent.is_stdin()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry corresponds to a subject to
|
||||
/// search that was explicitly supplied by an end user.
|
||||
///
|
||||
/// Generally, this corresponds to either stdin or an explicit file path
|
||||
/// argument. e.g., in `rg foo some-file ./some-dir/`, `some-file` is
|
||||
/// an explicit subject, but, e.g., `./some-dir/some-other-file` is not.
|
||||
///
|
||||
/// However, note that ripgrep does not see through shell globbing. e.g.,
|
||||
/// in `rg foo ./some-dir/*`, `./some-dir/some-other-file` will be treated
|
||||
/// as an explicit subject.
|
||||
pub fn is_explicit(&self) -> bool {
|
||||
// stdin is obvious. When an entry has a depth of 0, that means it
|
||||
// was explicitly provided to our directory iterator, which means it
|
||||
// was in turn explicitly provided by the end user. The !is_dir check
|
||||
// means that we want to search files even if their symlinks, again,
|
||||
// because they were explicitly provided. (And we never want to try
|
||||
// to search a directory.)
|
||||
self.is_stdin() || (self.dent.depth() == 0 && !self.is_dir())
|
||||
}
|
||||
|
||||
/// Returns true if and only if this subject points to a directory after
|
||||
/// following symbolic links.
|
||||
fn is_dir(&self) -> bool {
|
||||
let ft = match self.dent.file_type() {
|
||||
None => return false,
|
||||
Some(ft) => ft,
|
||||
};
|
||||
if ft.is_dir() {
|
||||
return true;
|
||||
}
|
||||
// If this is a symlink, then we want to follow it to determine
|
||||
// whether it's a directory or not.
|
||||
self.dent.path_is_symlink() && self.dent.path().is_dir()
|
||||
}
|
||||
|
||||
/// Returns true if and only if this subject points to a file.
|
||||
fn is_file(&self) -> bool {
|
||||
self.dent.file_type().map_or(false, |ft| ft.is_file())
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "globset"
|
||||
version = "0.3.0" #:version
|
||||
version = "0.4.10" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Cross platform single glob and glob set matching. Glob set matching is the
|
||||
@@ -8,25 +8,31 @@ process of matching one or more glob patterns against a single candidate path
|
||||
simultaneously, and returning all of the globs that matched.
|
||||
"""
|
||||
documentation = "https://docs.rs/globset"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/globset"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/globset"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/globset"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/globset"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "glob", "multiple", "set", "pattern"]
|
||||
license = "Unlicense/MIT"
|
||||
license = "Unlicense OR MIT"
|
||||
edition = "2018"
|
||||
|
||||
[lib]
|
||||
name = "globset"
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
aho-corasick = "0.6.0"
|
||||
fnv = "1.0"
|
||||
log = "0.4"
|
||||
memchr = "2"
|
||||
regex = "0.2.1"
|
||||
aho-corasick = "0.7.3"
|
||||
bstr = { version = "1.1.0", default-features = false, features = ["std"] }
|
||||
fnv = "1.0.6"
|
||||
log = { version = "0.4.5", optional = true }
|
||||
regex = { version = "1.1.5", default-features = false, features = ["perf", "std"] }
|
||||
serde = { version = "1.0.104", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
glob = "0.2"
|
||||
glob = "0.3.0"
|
||||
lazy_static = "1"
|
||||
serde_json = "1.0.45"
|
||||
|
||||
[features]
|
||||
simd-accel = ["regex/simd-accel"]
|
||||
default = ["log"]
|
||||
simd-accel = []
|
||||
serde1 = ["serde"]
|
||||
@@ -4,11 +4,10 @@ Cross platform single glob and glob set matching. Glob set matching is the
|
||||
process of matching one or more glob patterns against a single candidate path
|
||||
simultaneously, and returning all of the globs that matched.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://github.com/BurntSushi/ripgrep/actions)
|
||||
[](https://crates.io/crates/globset)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/).
|
||||
|
||||
### Documentation
|
||||
|
||||
@@ -20,14 +19,12 @@ Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
globset = "0.2"
|
||||
globset = "0.4"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
### Features
|
||||
|
||||
```rust
|
||||
extern crate globset;
|
||||
```
|
||||
* `serde1`: Enables implementing Serde traits on the `Glob` type.
|
||||
|
||||
### Example: one glob
|
||||
|
||||
@@ -81,12 +78,12 @@ assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]);
|
||||
|
||||
This crate implements globs by converting them to regular expressions, and
|
||||
executing them with the
|
||||
[`regex`](https://github.com/rust-lang-nursery/regex)
|
||||
[`regex`](https://github.com/rust-lang/regex)
|
||||
crate.
|
||||
|
||||
For single glob matching, performance of this crate should be roughly on par
|
||||
with the performance of the
|
||||
[`glob`](https://github.com/rust-lang-nursery/glob)
|
||||
[`glob`](https://github.com/rust-lang/glob)
|
||||
crate. (`*_regex` correspond to benchmarks for this library while `*_glob`
|
||||
correspond to benchmarks for the `glob` library.)
|
||||
Optimizations in the `regex` crate may propel this library past `glob`,
|
||||
@@ -111,7 +108,7 @@ test many_short_glob ... bench: 1,063 ns/iter (+/- 47)
|
||||
test many_short_regex_set ... bench: 186 ns/iter (+/- 11)
|
||||
```
|
||||
|
||||
### Comparison with the [`glob`](https://github.com/rust-lang-nursery/glob) crate
|
||||
### Comparison with the [`glob`](https://github.com/rust-lang/glob) crate
|
||||
|
||||
* Supports alternate "or" globs, e.g., `*.{foo,bar}`.
|
||||
* Can match non-UTF-8 file paths correctly.
|
||||
@@ -4,16 +4,8 @@ tool itself, see the benchsuite directory.
|
||||
*/
|
||||
#![feature(test)]
|
||||
|
||||
extern crate glob;
|
||||
extern crate globset;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate regex;
|
||||
extern crate test;
|
||||
|
||||
use std::ffi::OsStr;
|
||||
use std::path::Path;
|
||||
|
||||
use globset::{Candidate, Glob, GlobMatcher, GlobSet, GlobSetBuilder};
|
||||
|
||||
const EXT: &'static str = "some/a/bigger/path/to/the/crazy/needle.txt";
|
||||
@@ -2,13 +2,13 @@ use std::fmt;
|
||||
use std::hash;
|
||||
use std::iter;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::path::{Path, is_separator};
|
||||
use std::path::{is_separator, Path};
|
||||
use std::str;
|
||||
|
||||
use regex;
|
||||
use regex::bytes::Regex;
|
||||
|
||||
use {Candidate, Error, ErrorKind, new_regex};
|
||||
use crate::{new_regex, Candidate, Error, ErrorKind};
|
||||
|
||||
/// Describes a matching strategy for a particular pattern.
|
||||
///
|
||||
@@ -98,11 +98,19 @@ impl hash::Hash for Glob {
|
||||
}
|
||||
|
||||
impl fmt::Display for Glob {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.glob.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl str::FromStr for Glob {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(glob: &str) -> Result<Self, Self::Err> {
|
||||
Self::new(glob)
|
||||
}
|
||||
}
|
||||
|
||||
/// A matcher for a single pattern.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GlobMatcher {
|
||||
@@ -119,9 +127,14 @@ impl GlobMatcher {
|
||||
}
|
||||
|
||||
/// Tests whether the given path matches this pattern or not.
|
||||
pub fn is_match_candidate(&self, path: &Candidate) -> bool {
|
||||
pub fn is_match_candidate(&self, path: &Candidate<'_>) -> bool {
|
||||
self.re.is_match(&path.path)
|
||||
}
|
||||
|
||||
/// Returns the `Glob` used to compile this matcher.
|
||||
pub fn glob(&self) -> &Glob {
|
||||
&self.pat
|
||||
}
|
||||
}
|
||||
|
||||
/// A strategic matcher for a single pattern.
|
||||
@@ -130,8 +143,6 @@ impl GlobMatcher {
|
||||
struct GlobStrategic {
|
||||
/// The match strategy to use.
|
||||
strategy: MatchStrategy,
|
||||
/// The underlying pattern.
|
||||
pat: Glob,
|
||||
/// The pattern, as a compiled regex.
|
||||
re: Regex,
|
||||
}
|
||||
@@ -144,7 +155,7 @@ impl GlobStrategic {
|
||||
}
|
||||
|
||||
/// Tests whether the given path matches this pattern or not.
|
||||
fn is_match_candidate(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match_candidate(&self, candidate: &Candidate<'_>) -> bool {
|
||||
let byte_path = &*candidate.path;
|
||||
|
||||
match self.strategy {
|
||||
@@ -187,13 +198,26 @@ pub struct GlobBuilder<'a> {
|
||||
opts: GlobOptions,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)]
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
struct GlobOptions {
|
||||
/// Whether to match case insensitively.
|
||||
case_insensitive: bool,
|
||||
/// Whether to require a literal separator to match a separator in a file
|
||||
/// path. e.g., when enabled, `*` won't match `/`.
|
||||
literal_separator: bool,
|
||||
/// Whether or not to use `\` to escape special characters.
|
||||
/// e.g., when enabled, `\*` will match a literal `*`.
|
||||
backslash_escape: bool,
|
||||
}
|
||||
|
||||
impl GlobOptions {
|
||||
fn default() -> GlobOptions {
|
||||
GlobOptions {
|
||||
case_insensitive: false,
|
||||
literal_separator: false,
|
||||
backslash_escape: !is_separator('\\'),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Eq, PartialEq)]
|
||||
@@ -201,11 +225,15 @@ struct Tokens(Vec<Token>);
|
||||
|
||||
impl Deref for Tokens {
|
||||
type Target = Vec<Token>;
|
||||
fn deref(&self) -> &Vec<Token> { &self.0 }
|
||||
fn deref(&self) -> &Vec<Token> {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for Tokens {
|
||||
fn deref_mut(&mut self) -> &mut Vec<Token> { &mut self.0 }
|
||||
fn deref_mut(&mut self) -> &mut Vec<Token> {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
@@ -216,10 +244,7 @@ enum Token {
|
||||
RecursivePrefix,
|
||||
RecursiveSuffix,
|
||||
RecursiveZeroOrMore,
|
||||
Class {
|
||||
negated: bool,
|
||||
ranges: Vec<(char, char)>,
|
||||
},
|
||||
Class { negated: bool, ranges: Vec<(char, char)> },
|
||||
Alternates(Vec<Tokens>),
|
||||
}
|
||||
|
||||
@@ -231,12 +256,9 @@ impl Glob {
|
||||
|
||||
/// Returns a matcher for this pattern.
|
||||
pub fn compile_matcher(&self) -> GlobMatcher {
|
||||
let re = new_regex(&self.re)
|
||||
.expect("regex compilation shouldn't fail");
|
||||
GlobMatcher {
|
||||
pat: self.clone(),
|
||||
re: re,
|
||||
}
|
||||
let re =
|
||||
new_regex(&self.re).expect("regex compilation shouldn't fail");
|
||||
GlobMatcher { pat: self.clone(), re: re }
|
||||
}
|
||||
|
||||
/// Returns a strategic matcher.
|
||||
@@ -247,13 +269,9 @@ impl Glob {
|
||||
#[cfg(test)]
|
||||
fn compile_strategic_matcher(&self) -> GlobStrategic {
|
||||
let strategy = MatchStrategy::new(self);
|
||||
let re = new_regex(&self.re)
|
||||
.expect("regex compilation shouldn't fail");
|
||||
GlobStrategic {
|
||||
strategy: strategy,
|
||||
pat: self.clone(),
|
||||
re: re,
|
||||
}
|
||||
let re =
|
||||
new_regex(&self.re).expect("regex compilation shouldn't fail");
|
||||
GlobStrategic { strategy: strategy, re: re }
|
||||
}
|
||||
|
||||
/// Returns the original glob pattern used to build this pattern.
|
||||
@@ -262,6 +280,19 @@ impl Glob {
|
||||
}
|
||||
|
||||
/// Returns the regular expression string for this glob.
|
||||
///
|
||||
/// Note that regular expressions for globs are intended to be matched on
|
||||
/// arbitrary bytes (`&[u8]`) instead of Unicode strings (`&str`). In
|
||||
/// particular, globs are frequently used on file paths, where there is no
|
||||
/// general guarantee that file paths are themselves valid UTF-8. As a
|
||||
/// result, callers will need to ensure that they are using a regex API
|
||||
/// that can match on arbitrary bytes. For example, the
|
||||
/// [`regex`](https://crates.io/regex)
|
||||
/// crate's
|
||||
/// [`Regex`](https://docs.rs/regex/*/regex/struct.Regex.html)
|
||||
/// API is not suitable for this since it matches on `&str`, but its
|
||||
/// [`bytes::Regex`](https://docs.rs/regex/*/regex/bytes/struct.Regex.html)
|
||||
/// API is suitable for this.
|
||||
pub fn regex(&self) -> &str {
|
||||
&self.re
|
||||
}
|
||||
@@ -334,7 +365,7 @@ impl Glob {
|
||||
}
|
||||
}
|
||||
|
||||
/// This is like `ext`, but returns an extension even if it isn't sufficent
|
||||
/// This is like `ext`, but returns an extension even if it isn't sufficient
|
||||
/// to imply a match. Namely, if an extension is returned, then it is
|
||||
/// necessary but not sufficient for a match.
|
||||
fn required_ext(&self) -> Option<String> {
|
||||
@@ -370,7 +401,7 @@ impl Glob {
|
||||
if self.opts.case_insensitive {
|
||||
return None;
|
||||
}
|
||||
let end = match self.tokens.last() {
|
||||
let (end, need_sep) = match self.tokens.last() {
|
||||
Some(&Token::ZeroOrMore) => {
|
||||
if self.opts.literal_separator {
|
||||
// If a trailing `*` can't match a `/`, then we can't
|
||||
@@ -381,9 +412,10 @@ impl Glob {
|
||||
// literal prefix.
|
||||
return None;
|
||||
}
|
||||
self.tokens.len() - 1
|
||||
(self.tokens.len() - 1, false)
|
||||
}
|
||||
_ => self.tokens.len(),
|
||||
Some(&Token::RecursiveSuffix) => (self.tokens.len() - 1, true),
|
||||
_ => (self.tokens.len(), false),
|
||||
};
|
||||
let mut lit = String::new();
|
||||
for t in &self.tokens[0..end] {
|
||||
@@ -392,6 +424,9 @@ impl Glob {
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
if need_sep {
|
||||
lit.push('/');
|
||||
}
|
||||
if lit.is_empty() {
|
||||
None
|
||||
} else {
|
||||
@@ -498,7 +533,7 @@ impl Glob {
|
||||
| Token::RecursiveZeroOrMore => {
|
||||
return None;
|
||||
}
|
||||
Token::Class{..} | Token::Alternates(..) => {
|
||||
Token::Class { .. } | Token::Alternates(..) => {
|
||||
// We *could* be a little smarter here, but either one
|
||||
// of these is going to prevent our literal optimizations
|
||||
// anyway, so give up.
|
||||
@@ -535,10 +570,7 @@ impl<'a> GlobBuilder<'a> {
|
||||
///
|
||||
/// The pattern is not compiled until `build` is called.
|
||||
pub fn new(glob: &'a str) -> GlobBuilder<'a> {
|
||||
GlobBuilder {
|
||||
glob: glob,
|
||||
opts: GlobOptions::default(),
|
||||
}
|
||||
GlobBuilder { glob: glob, opts: GlobOptions::default() }
|
||||
}
|
||||
|
||||
/// Parses and builds the pattern.
|
||||
@@ -549,6 +581,7 @@ impl<'a> GlobBuilder<'a> {
|
||||
chars: self.glob.chars().peekable(),
|
||||
prev: None,
|
||||
cur: None,
|
||||
opts: &self.opts,
|
||||
};
|
||||
p.parse()?;
|
||||
if p.stack.is_empty() {
|
||||
@@ -581,10 +614,25 @@ impl<'a> GlobBuilder<'a> {
|
||||
}
|
||||
|
||||
/// Toggle whether a literal `/` is required to match a path separator.
|
||||
///
|
||||
/// By default this is false: `*` and `?` will match `/`.
|
||||
pub fn literal_separator(&mut self, yes: bool) -> &mut GlobBuilder<'a> {
|
||||
self.opts.literal_separator = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// When enabled, a back slash (`\`) may be used to escape
|
||||
/// special characters in a glob pattern. Additionally, this will
|
||||
/// prevent `\` from being interpreted as a path separator on all
|
||||
/// platforms.
|
||||
///
|
||||
/// This is enabled by default on platforms where `\` is not a
|
||||
/// path separator and disabled by default on platforms where `\`
|
||||
/// is a path separator.
|
||||
pub fn backslash_escape(&mut self, yes: bool) -> &mut GlobBuilder<'a> {
|
||||
self.opts.backslash_escape = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Tokens {
|
||||
@@ -639,7 +687,7 @@ impl Tokens {
|
||||
re.push_str("(?:/?|.*/)");
|
||||
}
|
||||
Token::RecursiveSuffix => {
|
||||
re.push_str("(?:/?|/.*)");
|
||||
re.push_str("/.*");
|
||||
}
|
||||
Token::RecursiveZeroOrMore => {
|
||||
re.push_str("(?:/|/.*/)");
|
||||
@@ -710,6 +758,7 @@ struct Parser<'a> {
|
||||
chars: iter::Peekable<str::Chars<'a>>,
|
||||
prev: Option<char>,
|
||||
cur: Option<char>,
|
||||
opts: &'a GlobOptions,
|
||||
}
|
||||
|
||||
impl<'a> Parser<'a> {
|
||||
@@ -726,14 +775,8 @@ impl<'a> Parser<'a> {
|
||||
'{' => self.push_alternate()?,
|
||||
'}' => self.pop_alternate()?,
|
||||
',' => self.parse_comma()?,
|
||||
c => {
|
||||
if is_separator(c) {
|
||||
// Normalize all patterns to use / as a separator.
|
||||
self.push_token(Token::Literal('/'))?
|
||||
} else {
|
||||
self.push_token(Token::Literal(c))?
|
||||
}
|
||||
}
|
||||
'\\' => self.parse_backslash()?,
|
||||
c => self.push_token(Token::Literal(c))?,
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -786,42 +829,79 @@ impl<'a> Parser<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_backslash(&mut self) -> Result<(), Error> {
|
||||
if self.opts.backslash_escape {
|
||||
match self.bump() {
|
||||
None => Err(self.error(ErrorKind::DanglingEscape)),
|
||||
Some(c) => self.push_token(Token::Literal(c)),
|
||||
}
|
||||
} else if is_separator('\\') {
|
||||
// Normalize all patterns to use / as a separator.
|
||||
self.push_token(Token::Literal('/'))
|
||||
} else {
|
||||
self.push_token(Token::Literal('\\'))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_star(&mut self) -> Result<(), Error> {
|
||||
let prev = self.prev;
|
||||
if self.chars.peek() != Some(&'*') {
|
||||
if self.peek() != Some('*') {
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
return Ok(());
|
||||
}
|
||||
assert!(self.bump() == Some('*'));
|
||||
if !self.have_tokens()? {
|
||||
if !self.peek().map_or(true, is_separator) {
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
} else {
|
||||
self.push_token(Token::RecursivePrefix)?;
|
||||
let next = self.bump();
|
||||
if !next.map(is_separator).unwrap_or(true) {
|
||||
return Err(self.error(ErrorKind::InvalidRecursive));
|
||||
assert!(self.bump().map_or(true, is_separator));
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
self.pop_token()?;
|
||||
|
||||
if !prev.map(is_separator).unwrap_or(false) {
|
||||
if self.stack.len() <= 1
|
||||
|| (prev != Some(',') && prev != Some('{')) {
|
||||
return Err(self.error(ErrorKind::InvalidRecursive));
|
||||
|| (prev != Some(',') && prev != Some('{'))
|
||||
{
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
match self.chars.peek() {
|
||||
let is_suffix = match self.peek() {
|
||||
None => {
|
||||
assert!(self.bump().is_none());
|
||||
self.push_token(Token::RecursiveSuffix)
|
||||
true
|
||||
}
|
||||
Some(&',') | Some(&'}') if self.stack.len() >= 2 => {
|
||||
self.push_token(Token::RecursiveSuffix)
|
||||
}
|
||||
Some(&c) if is_separator(c) => {
|
||||
Some(',') | Some('}') if self.stack.len() >= 2 => true,
|
||||
Some(c) if is_separator(c) => {
|
||||
assert!(self.bump().map(is_separator).unwrap_or(false));
|
||||
self.push_token(Token::RecursiveZeroOrMore)
|
||||
false
|
||||
}
|
||||
_ => Err(self.error(ErrorKind::InvalidRecursive)),
|
||||
_ => {
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
self.push_token(Token::ZeroOrMore)?;
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
match self.pop_token()? {
|
||||
Token::RecursivePrefix => {
|
||||
self.push_token(Token::RecursivePrefix)?;
|
||||
}
|
||||
Token::RecursiveSuffix => {
|
||||
self.push_token(Token::RecursiveSuffix)?;
|
||||
}
|
||||
_ => {
|
||||
if is_suffix {
|
||||
self.push_token(Token::RecursiveSuffix)?;
|
||||
} else {
|
||||
self.push_token(Token::RecursiveZeroOrMore)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_class(&mut self) -> Result<(), Error> {
|
||||
@@ -885,7 +965,10 @@ impl<'a> Parser<'a> {
|
||||
// invariant: in_range is only set when there is
|
||||
// already at least one character seen.
|
||||
add_to_last_range(
|
||||
&self.glob, ranges.last_mut().unwrap(), c)?;
|
||||
&self.glob,
|
||||
ranges.last_mut().unwrap(),
|
||||
c,
|
||||
)?;
|
||||
} else {
|
||||
ranges.push((c, c));
|
||||
}
|
||||
@@ -899,10 +982,7 @@ impl<'a> Parser<'a> {
|
||||
// it as a literal.
|
||||
ranges.push(('-', '-'));
|
||||
}
|
||||
self.push_token(Token::Class {
|
||||
negated: negated,
|
||||
ranges: ranges,
|
||||
})
|
||||
self.push_token(Token::Class { negated: negated, ranges: ranges })
|
||||
}
|
||||
|
||||
fn bump(&mut self) -> Option<char> {
|
||||
@@ -910,6 +990,10 @@ impl<'a> Parser<'a> {
|
||||
self.cur = self.chars.next();
|
||||
self.cur
|
||||
}
|
||||
|
||||
fn peek(&mut self) -> Option<char> {
|
||||
self.chars.peek().map(|&ch| ch)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -927,14 +1011,15 @@ fn ends_with(needle: &[u8], haystack: &[u8]) -> bool {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {GlobSetBuilder, ErrorKind};
|
||||
use super::{Glob, GlobBuilder, Token};
|
||||
use super::Token::*;
|
||||
use super::{Glob, GlobBuilder, Token};
|
||||
use crate::{ErrorKind, GlobSetBuilder};
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct Options {
|
||||
casei: bool,
|
||||
litsep: bool,
|
||||
casei: Option<bool>,
|
||||
litsep: Option<bool>,
|
||||
bsesc: Option<bool>,
|
||||
}
|
||||
|
||||
macro_rules! syntax {
|
||||
@@ -944,7 +1029,7 @@ mod tests {
|
||||
let pat = Glob::new($pat).unwrap();
|
||||
assert_eq!($tokens, pat.tokens.0);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! syntaxerr {
|
||||
@@ -954,7 +1039,7 @@ mod tests {
|
||||
let err = Glob::new($pat).unwrap_err();
|
||||
assert_eq!(&$err, err.kind());
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! toregex {
|
||||
@@ -964,11 +1049,17 @@ mod tests {
|
||||
($name:ident, $pat:expr, $re:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($options.casei)
|
||||
.literal_separator($options.litsep)
|
||||
.build()
|
||||
.unwrap();
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
assert_eq!(format!("(?-u){}", $re), pat.regex());
|
||||
}
|
||||
};
|
||||
@@ -981,11 +1072,17 @@ mod tests {
|
||||
($name:ident, $pat:expr, $path:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($options.casei)
|
||||
.literal_separator($options.litsep)
|
||||
.build()
|
||||
.unwrap();
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
let matcher = pat.compile_matcher();
|
||||
let strategic = pat.compile_strategic_matcher();
|
||||
let set = GlobSetBuilder::new().add(pat).build().unwrap();
|
||||
@@ -1003,11 +1100,17 @@ mod tests {
|
||||
($name:ident, $pat:expr, $path:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($options.casei)
|
||||
.literal_separator($options.litsep)
|
||||
.build()
|
||||
.unwrap();
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
let matcher = pat.compile_matcher();
|
||||
let strategic = pat.compile_strategic_matcher();
|
||||
let set = GlobSetBuilder::new().add(pat).build().unwrap();
|
||||
@@ -1018,7 +1121,9 @@ mod tests {
|
||||
};
|
||||
}
|
||||
|
||||
fn s(string: &str) -> String { string.to_string() }
|
||||
fn s(string: &str) -> String {
|
||||
string.to_string()
|
||||
}
|
||||
|
||||
fn class(s: char, e: char) -> Token {
|
||||
Class { negated: false, ranges: vec![(s, e)] }
|
||||
@@ -1042,16 +1147,20 @@ mod tests {
|
||||
syntax!(any2, "a?b", vec![Literal('a'), Any, Literal('b')]);
|
||||
syntax!(seq1, "*", vec![ZeroOrMore]);
|
||||
syntax!(seq2, "a*b", vec![Literal('a'), ZeroOrMore, Literal('b')]);
|
||||
syntax!(seq3, "*a*b*", vec![
|
||||
ZeroOrMore, Literal('a'), ZeroOrMore, Literal('b'), ZeroOrMore,
|
||||
]);
|
||||
syntax!(
|
||||
seq3,
|
||||
"*a*b*",
|
||||
vec![ZeroOrMore, Literal('a'), ZeroOrMore, Literal('b'), ZeroOrMore,]
|
||||
);
|
||||
syntax!(rseq1, "**", vec![RecursivePrefix]);
|
||||
syntax!(rseq2, "**/", vec![RecursivePrefix]);
|
||||
syntax!(rseq3, "/**", vec![RecursiveSuffix]);
|
||||
syntax!(rseq4, "/**/", vec![RecursiveZeroOrMore]);
|
||||
syntax!(rseq5, "a/**/b", vec![
|
||||
Literal('a'), RecursiveZeroOrMore, Literal('b'),
|
||||
]);
|
||||
syntax!(
|
||||
rseq5,
|
||||
"a/**/b",
|
||||
vec![Literal('a'), RecursiveZeroOrMore, Literal('b'),]
|
||||
);
|
||||
syntax!(cls1, "[a]", vec![class('a', 'a')]);
|
||||
syntax!(cls2, "[!a]", vec![classn('a', 'a')]);
|
||||
syntax!(cls3, "[a-z]", vec![class('a', 'z')]);
|
||||
@@ -1063,9 +1172,11 @@ mod tests {
|
||||
syntax!(cls9, "[a-]", vec![rclass(&[('a', 'a'), ('-', '-')])]);
|
||||
syntax!(cls10, "[-a-z]", vec![rclass(&[('-', '-'), ('a', 'z')])]);
|
||||
syntax!(cls11, "[a-z-]", vec![rclass(&[('a', 'z'), ('-', '-')])]);
|
||||
syntax!(cls12, "[-a-z-]", vec![
|
||||
rclass(&[('-', '-'), ('a', 'z'), ('-', '-')]),
|
||||
]);
|
||||
syntax!(
|
||||
cls12,
|
||||
"[-a-z-]",
|
||||
vec![rclass(&[('-', '-'), ('a', 'z'), ('-', '-')]),]
|
||||
);
|
||||
syntax!(cls13, "[]-z]", vec![class(']', 'z')]);
|
||||
syntax!(cls14, "[--z]", vec![class('-', 'z')]);
|
||||
syntax!(cls15, "[ --]", vec![class(' ', '-')]);
|
||||
@@ -1076,13 +1187,6 @@ mod tests {
|
||||
syntax!(cls20, "[^a]", vec![classn('a', 'a')]);
|
||||
syntax!(cls21, "[^a-z]", vec![classn('a', 'z')]);
|
||||
|
||||
syntaxerr!(err_rseq1, "a**", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq2, "**a", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq3, "a**b", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq4, "***", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq5, "/a**", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq6, "/**a", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_rseq7, "/a**b", ErrorKind::InvalidRecursive);
|
||||
syntaxerr!(err_unclosed1, "[", ErrorKind::UnclosedClass);
|
||||
syntaxerr!(err_unclosed2, "[]", ErrorKind::UnclosedClass);
|
||||
syntaxerr!(err_unclosed3, "[!", ErrorKind::UnclosedClass);
|
||||
@@ -1090,14 +1194,14 @@ mod tests {
|
||||
syntaxerr!(err_range1, "[z-a]", ErrorKind::InvalidRange('z', 'a'));
|
||||
syntaxerr!(err_range2, "[z--]", ErrorKind::InvalidRange('z', '-'));
|
||||
|
||||
const CASEI: Options = Options {
|
||||
casei: true,
|
||||
litsep: false,
|
||||
};
|
||||
const SLASHLIT: Options = Options {
|
||||
casei: false,
|
||||
litsep: true,
|
||||
};
|
||||
const CASEI: Options =
|
||||
Options { casei: Some(true), litsep: None, bsesc: None };
|
||||
const SLASHLIT: Options =
|
||||
Options { casei: None, litsep: Some(true), bsesc: None };
|
||||
const NOBSESC: Options =
|
||||
Options { casei: None, litsep: None, bsesc: Some(false) };
|
||||
const BSESC: Options =
|
||||
Options { casei: None, litsep: None, bsesc: Some(true) };
|
||||
|
||||
toregex!(re_casei, "a", "(?i)^a$", &CASEI);
|
||||
|
||||
@@ -1114,8 +1218,30 @@ mod tests {
|
||||
toregex!(re8, "[*]", r"^[\*]$");
|
||||
toregex!(re9, "[+]", r"^[\+]$");
|
||||
toregex!(re10, "+", r"^\+$");
|
||||
toregex!(re11, "**", r"^.*$");
|
||||
toregex!(re12, "☃", r"^\xe2\x98\x83$");
|
||||
toregex!(re11, "☃", r"^\xe2\x98\x83$");
|
||||
toregex!(re12, "**", r"^.*$");
|
||||
toregex!(re13, "**/", r"^.*$");
|
||||
toregex!(re14, "**/*", r"^(?:/?|.*/).*$");
|
||||
toregex!(re15, "**/**", r"^.*$");
|
||||
toregex!(re16, "**/**/*", r"^(?:/?|.*/).*$");
|
||||
toregex!(re17, "**/**/**", r"^.*$");
|
||||
toregex!(re18, "**/**/**/*", r"^(?:/?|.*/).*$");
|
||||
toregex!(re19, "a/**", r"^a/.*$");
|
||||
toregex!(re20, "a/**/**", r"^a/.*$");
|
||||
toregex!(re21, "a/**/**/**", r"^a/.*$");
|
||||
toregex!(re22, "a/**/b", r"^a(?:/|/.*/)b$");
|
||||
toregex!(re23, "a/**/**/b", r"^a(?:/|/.*/)b$");
|
||||
toregex!(re24, "a/**/**/**/b", r"^a(?:/|/.*/)b$");
|
||||
toregex!(re25, "**/b", r"^(?:/?|.*/)b$");
|
||||
toregex!(re26, "**/**/b", r"^(?:/?|.*/)b$");
|
||||
toregex!(re27, "**/**/**/b", r"^(?:/?|.*/)b$");
|
||||
toregex!(re28, "a**", r"^a.*.*$");
|
||||
toregex!(re29, "**a", r"^.*.*a$");
|
||||
toregex!(re30, "a**b", r"^a.*.*b$");
|
||||
toregex!(re31, "***", r"^.*.*.*$");
|
||||
toregex!(re32, "/a**", r"^/a.*.*$");
|
||||
toregex!(re33, "/**a", r"^/.*.*a$");
|
||||
toregex!(re34, "/a**b", r"^/a.*.*b$");
|
||||
|
||||
matches!(match1, "a", "a");
|
||||
matches!(match2, "a*b", "a_b");
|
||||
@@ -1148,11 +1274,12 @@ mod tests {
|
||||
matches!(matchrec18, "/**/test", "/test");
|
||||
matches!(matchrec19, "**/.*", ".abc");
|
||||
matches!(matchrec20, "**/.*", "abc/.abc");
|
||||
matches!(matchrec21, ".*/**", ".abc");
|
||||
matches!(matchrec21, "**/foo/bar", "foo/bar");
|
||||
matches!(matchrec22, ".*/**", ".abc/abc");
|
||||
matches!(matchrec23, "foo/**", "foo");
|
||||
matches!(matchrec24, "**/foo/bar", "foo/bar");
|
||||
matches!(matchrec25, "some/*/needle.txt", "some/one/needle.txt");
|
||||
matches!(matchrec23, "test/**", "test/");
|
||||
matches!(matchrec24, "test/**", "test/one");
|
||||
matches!(matchrec25, "test/**", "test/one/two");
|
||||
matches!(matchrec26, "some/*/needle.txt", "some/one/needle.txt");
|
||||
|
||||
matches!(matchrange1, "a[0-9]b", "a0b");
|
||||
matches!(matchrange2, "a[0-9]b", "a9b");
|
||||
@@ -1173,8 +1300,11 @@ mod tests {
|
||||
matches!(matchpat4, "*hello.txt", "some\\path\\to\\hello.txt");
|
||||
matches!(matchpat5, "*hello.txt", "/an/absolute/path/to/hello.txt");
|
||||
matches!(matchpat6, "*some/path/to/hello.txt", "some/path/to/hello.txt");
|
||||
matches!(matchpat7, "*some/path/to/hello.txt",
|
||||
"a/bigger/some/path/to/hello.txt");
|
||||
matches!(
|
||||
matchpat7,
|
||||
"*some/path/to/hello.txt",
|
||||
"a/bigger/some/path/to/hello.txt"
|
||||
);
|
||||
|
||||
matches!(matchescape, "_[[]_[]]_[?]_[*]_!_", "_[_]_?_*_!_");
|
||||
|
||||
@@ -1209,6 +1339,17 @@ mod tests {
|
||||
#[cfg(not(unix))]
|
||||
matches!(matchslash5, "abc\\def", "abc/def", SLASHLIT);
|
||||
|
||||
matches!(matchbackslash1, "\\[", "[", BSESC);
|
||||
matches!(matchbackslash2, "\\?", "?", BSESC);
|
||||
matches!(matchbackslash3, "\\*", "*", BSESC);
|
||||
matches!(matchbackslash4, "\\[a-z]", "\\a", NOBSESC);
|
||||
matches!(matchbackslash5, "\\?", "\\a", NOBSESC);
|
||||
matches!(matchbackslash6, "\\*", "\\\\", NOBSESC);
|
||||
#[cfg(unix)]
|
||||
matches!(matchbackslash7, "\\a", "a");
|
||||
#[cfg(not(unix))]
|
||||
matches!(matchbackslash8, "\\a", "/a");
|
||||
|
||||
nmatches!(matchnot1, "a*b*c", "abcd");
|
||||
nmatches!(matchnot2, "abc*abc*abc", "abcabcabcabcabcabcabca");
|
||||
nmatches!(matchnot3, "some/**/needle.txt", "some/other/notthis.txt");
|
||||
@@ -1226,40 +1367,65 @@ mod tests {
|
||||
nmatches!(matchnot15, "[!-]", "-");
|
||||
nmatches!(matchnot16, "*hello.txt", "hello.txt-and-then-some");
|
||||
nmatches!(matchnot17, "*hello.txt", "goodbye.txt");
|
||||
nmatches!(matchnot18, "*some/path/to/hello.txt",
|
||||
"some/path/to/hello.txt-and-then-some");
|
||||
nmatches!(matchnot19, "*some/path/to/hello.txt",
|
||||
"some/other/path/to/hello.txt");
|
||||
nmatches!(
|
||||
matchnot18,
|
||||
"*some/path/to/hello.txt",
|
||||
"some/path/to/hello.txt-and-then-some"
|
||||
);
|
||||
nmatches!(
|
||||
matchnot19,
|
||||
"*some/path/to/hello.txt",
|
||||
"some/other/path/to/hello.txt"
|
||||
);
|
||||
nmatches!(matchnot20, "a", "foo/a");
|
||||
nmatches!(matchnot21, "./foo", "foo");
|
||||
nmatches!(matchnot22, "**/foo", "foofoo");
|
||||
nmatches!(matchnot23, "**/foo/bar", "foofoo/bar");
|
||||
nmatches!(matchnot24, "/*.c", "mozilla-sha1/sha1.c");
|
||||
nmatches!(matchnot25, "*.c", "mozilla-sha1/sha1.c", SLASHLIT);
|
||||
nmatches!(matchnot26, "**/m4/ltoptions.m4",
|
||||
"csharp/src/packages/repositories.config", SLASHLIT);
|
||||
nmatches!(
|
||||
matchnot26,
|
||||
"**/m4/ltoptions.m4",
|
||||
"csharp/src/packages/repositories.config",
|
||||
SLASHLIT
|
||||
);
|
||||
nmatches!(matchnot27, "a[^0-9]b", "a0b");
|
||||
nmatches!(matchnot28, "a[^0-9]b", "a9b");
|
||||
nmatches!(matchnot29, "[^-]", "-");
|
||||
nmatches!(matchnot30, "some/*/needle.txt", "some/needle.txt");
|
||||
nmatches!(
|
||||
matchrec31,
|
||||
"some/*/needle.txt", "some/one/two/needle.txt", SLASHLIT);
|
||||
"some/*/needle.txt",
|
||||
"some/one/two/needle.txt",
|
||||
SLASHLIT
|
||||
);
|
||||
nmatches!(
|
||||
matchrec32,
|
||||
"some/*/needle.txt", "some/one/two/three/needle.txt", SLASHLIT);
|
||||
"some/*/needle.txt",
|
||||
"some/one/two/three/needle.txt",
|
||||
SLASHLIT
|
||||
);
|
||||
nmatches!(matchrec33, ".*/**", ".abc");
|
||||
nmatches!(matchrec34, "foo/**", "foo");
|
||||
|
||||
macro_rules! extract {
|
||||
($which:ident, $name:ident, $pat:expr, $expect:expr) => {
|
||||
extract!($which, $name, $pat, $expect, Options::default());
|
||||
};
|
||||
($which:ident, $name:ident, $pat:expr, $expect:expr, $opts:expr) => {
|
||||
($which:ident, $name:ident, $pat:expr, $expect:expr, $options:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let pat = GlobBuilder::new($pat)
|
||||
.case_insensitive($opts.casei)
|
||||
.literal_separator($opts.litsep)
|
||||
.build().unwrap();
|
||||
let mut builder = GlobBuilder::new($pat);
|
||||
if let Some(casei) = $options.casei {
|
||||
builder.case_insensitive(casei);
|
||||
}
|
||||
if let Some(litsep) = $options.litsep {
|
||||
builder.literal_separator(litsep);
|
||||
}
|
||||
if let Some(bsesc) = $options.bsesc {
|
||||
builder.backslash_escape(bsesc);
|
||||
}
|
||||
let pat = builder.build().unwrap();
|
||||
assert_eq!($expect, pat.$which());
|
||||
}
|
||||
};
|
||||
@@ -1302,19 +1468,27 @@ mod tests {
|
||||
literal!(extract_lit7, "foo/bar", Some(s("foo/bar")));
|
||||
literal!(extract_lit8, "**/foo/bar", None);
|
||||
|
||||
basetokens!(extract_basetoks1, "**/foo", Some(&*vec![
|
||||
Literal('f'), Literal('o'), Literal('o'),
|
||||
]));
|
||||
basetokens!(
|
||||
extract_basetoks1,
|
||||
"**/foo",
|
||||
Some(&*vec![Literal('f'), Literal('o'), Literal('o'),])
|
||||
);
|
||||
basetokens!(extract_basetoks2, "**/foo", None, CASEI);
|
||||
basetokens!(extract_basetoks3, "**/foo", Some(&*vec![
|
||||
Literal('f'), Literal('o'), Literal('o'),
|
||||
]), SLASHLIT);
|
||||
basetokens!(
|
||||
extract_basetoks3,
|
||||
"**/foo",
|
||||
Some(&*vec![Literal('f'), Literal('o'), Literal('o'),]),
|
||||
SLASHLIT
|
||||
);
|
||||
basetokens!(extract_basetoks4, "*foo", None, SLASHLIT);
|
||||
basetokens!(extract_basetoks5, "*foo", None);
|
||||
basetokens!(extract_basetoks6, "**/fo*o", None);
|
||||
basetokens!(extract_basetoks7, "**/fo*o", Some(&*vec![
|
||||
Literal('f'), Literal('o'), ZeroOrMore, Literal('o'),
|
||||
]), SLASHLIT);
|
||||
basetokens!(
|
||||
extract_basetoks7,
|
||||
"**/fo*o",
|
||||
Some(&*vec![Literal('f'), Literal('o'), ZeroOrMore, Literal('o'),]),
|
||||
SLASHLIT
|
||||
);
|
||||
|
||||
ext!(extract_ext1, "**/*.rs", Some(s(".rs")));
|
||||
ext!(extract_ext2, "**/*.rs.bak", None);
|
||||
@@ -1337,7 +1511,7 @@ mod tests {
|
||||
prefix!(extract_prefix1, "/foo", Some(s("/foo")));
|
||||
prefix!(extract_prefix2, "/foo/*", Some(s("/foo/")));
|
||||
prefix!(extract_prefix3, "**/foo", None);
|
||||
prefix!(extract_prefix4, "foo/**", None);
|
||||
prefix!(extract_prefix4, "foo/**", Some(s("foo/")));
|
||||
|
||||
suffix!(extract_suffix1, "**/foo/bar", Some((s("/foo/bar"), true)));
|
||||
suffix!(extract_suffix2, "*/foo/bar", Some((s("/foo/bar"), false)));
|
||||
@@ -91,6 +91,11 @@ Standard Unix-style glob syntax is supported:
|
||||
`[!ab]` to match any character except for `a` and `b`.
|
||||
* Metacharacters such as `*` and `?` can be escaped with character class
|
||||
notation. e.g., `[*]` matches `*`.
|
||||
* When backslash escapes are enabled, a backslash (`\`) will escape all meta
|
||||
characters in a glob. If it precedes a non-meta character, then the slash is
|
||||
ignored. A `\\` will match a literal `\\`. Note that this mode is only
|
||||
enabled on Unix platforms by default, but can be enabled on any platform
|
||||
via the `backslash_escape` setting on `Glob`.
|
||||
|
||||
A `GlobBuilder` can be used to prevent wildcards from matching path separators,
|
||||
or to enable case insensitive matching.
|
||||
@@ -98,34 +103,38 @@ or to enable case insensitive matching.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
extern crate aho_corasick;
|
||||
extern crate fnv;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate memchr;
|
||||
extern crate regex;
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::error::Error as StdError;
|
||||
use std::ffi::OsStr;
|
||||
use std::fmt;
|
||||
use std::hash;
|
||||
use std::path::Path;
|
||||
use std::str;
|
||||
|
||||
use aho_corasick::{Automaton, AcAutomaton, FullAcAutomaton};
|
||||
use aho_corasick::AhoCorasick;
|
||||
use bstr::{ByteSlice, ByteVec, B};
|
||||
use regex::bytes::{Regex, RegexBuilder, RegexSet};
|
||||
|
||||
use pathutil::{
|
||||
file_name, file_name_ext, normalize_path, os_str_bytes, path_bytes,
|
||||
};
|
||||
use glob::MatchStrategy;
|
||||
pub use glob::{Glob, GlobBuilder, GlobMatcher};
|
||||
use crate::glob::MatchStrategy;
|
||||
pub use crate::glob::{Glob, GlobBuilder, GlobMatcher};
|
||||
use crate::pathutil::{file_name, file_name_ext, normalize_path};
|
||||
|
||||
mod glob;
|
||||
mod pathutil;
|
||||
|
||||
#[cfg(feature = "serde1")]
|
||||
mod serde_impl;
|
||||
|
||||
#[cfg(feature = "log")]
|
||||
macro_rules! debug {
|
||||
($($token:tt)*) => (::log::debug!($($token)*);)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "log"))]
|
||||
macro_rules! debug {
|
||||
($($token:tt)*) => {};
|
||||
}
|
||||
|
||||
/// Represents an error that can occur when parsing a glob pattern.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct Error {
|
||||
@@ -138,8 +147,13 @@ pub struct Error {
|
||||
/// The kind of error that can occur when parsing a glob pattern.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum ErrorKind {
|
||||
/// Occurs when a use of `**` is invalid. Namely, `**` can only appear
|
||||
/// adjacent to a path separator, or the beginning/end of a glob.
|
||||
/// **DEPRECATED**.
|
||||
///
|
||||
/// This error used to occur for consistency with git's glob specification,
|
||||
/// but the specification now accepts all uses of `**`. When `**` does not
|
||||
/// appear adjacent to a path separator or at the beginning/end of a glob,
|
||||
/// it is now treated as two consecutive `*` patterns. As such, this error
|
||||
/// is no longer used.
|
||||
InvalidRecursive,
|
||||
/// Occurs when a character class (e.g., `[abc]`) is not closed.
|
||||
UnclosedClass,
|
||||
@@ -154,8 +168,17 @@ pub enum ErrorKind {
|
||||
/// Occurs when an alternating group is nested inside another alternating
|
||||
/// group, e.g., `{{a,b},{c,d}}`.
|
||||
NestedAlternates,
|
||||
/// Occurs when an unescaped '\' is found at the end of a glob.
|
||||
DanglingEscape,
|
||||
/// An error associated with parsing or compiling a regex.
|
||||
Regex(String),
|
||||
/// Hints that destructuring should not be exhaustive.
|
||||
///
|
||||
/// This enum may grow additional variants, so this makes sure clients
|
||||
/// don't count on exhaustive matching. (Otherwise, adding a new variant
|
||||
/// could break existing code.)
|
||||
#[doc(hidden)]
|
||||
__Nonexhaustive,
|
||||
}
|
||||
|
||||
impl StdError for Error {
|
||||
@@ -185,9 +208,7 @@ impl ErrorKind {
|
||||
ErrorKind::UnclosedClass => {
|
||||
"unclosed character class; missing ']'"
|
||||
}
|
||||
ErrorKind::InvalidRange(_, _) => {
|
||||
"invalid character range"
|
||||
}
|
||||
ErrorKind::InvalidRange(_, _) => "invalid character range",
|
||||
ErrorKind::UnopenedAlternates => {
|
||||
"unopened alternate group; missing '{' \
|
||||
(maybe escape '}' with '[}]'?)"
|
||||
@@ -199,13 +220,15 @@ impl ErrorKind {
|
||||
ErrorKind::NestedAlternates => {
|
||||
"nested alternate groups are not allowed"
|
||||
}
|
||||
ErrorKind::DanglingEscape => "dangling '\\'",
|
||||
ErrorKind::Regex(ref err) => err,
|
||||
ErrorKind::__Nonexhaustive => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.glob {
|
||||
None => self.kind.fmt(f),
|
||||
Some(ref glob) => {
|
||||
@@ -216,19 +239,19 @@ impl fmt::Display for Error {
|
||||
}
|
||||
|
||||
impl fmt::Display for ErrorKind {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
ErrorKind::InvalidRecursive
|
||||
| ErrorKind::UnclosedClass
|
||||
| ErrorKind::UnopenedAlternates
|
||||
| ErrorKind::UnclosedAlternates
|
||||
| ErrorKind::NestedAlternates
|
||||
| ErrorKind::Regex(_) => {
|
||||
write!(f, "{}", self.description())
|
||||
}
|
||||
| ErrorKind::DanglingEscape
|
||||
| ErrorKind::Regex(_) => write!(f, "{}", self.description()),
|
||||
ErrorKind::InvalidRange(s, e) => {
|
||||
write!(f, "invalid range; '{}' > '{}'", s, e)
|
||||
}
|
||||
ErrorKind::__Nonexhaustive => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -239,21 +262,20 @@ fn new_regex(pat: &str) -> Result<Regex, Error> {
|
||||
.size_limit(10 * (1 << 20))
|
||||
.dfa_size_limit(10 * (1 << 20))
|
||||
.build()
|
||||
.map_err(|err| {
|
||||
Error {
|
||||
.map_err(|err| Error {
|
||||
glob: Some(pat.to_string()),
|
||||
kind: ErrorKind::Regex(err.to_string()),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn new_regex_set<I, S>(pats: I) -> Result<RegexSet, Error>
|
||||
where S: AsRef<str>, I: IntoIterator<Item=S> {
|
||||
RegexSet::new(pats).map_err(|err| {
|
||||
Error {
|
||||
where
|
||||
S: AsRef<str>,
|
||||
I: IntoIterator<Item = S>,
|
||||
{
|
||||
RegexSet::new(pats).map_err(|err| Error {
|
||||
glob: None,
|
||||
kind: ErrorKind::Regex(err.to_string()),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -268,12 +290,20 @@ pub struct GlobSet {
|
||||
}
|
||||
|
||||
impl GlobSet {
|
||||
/// Create an empty `GlobSet`. An empty set matches nothing.
|
||||
#[inline]
|
||||
pub fn empty() -> GlobSet {
|
||||
GlobSet { len: 0, strats: vec![] }
|
||||
}
|
||||
|
||||
/// Returns true if this set is empty, and therefore matches nothing.
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len == 0
|
||||
}
|
||||
|
||||
/// Returns the number of globs in this set.
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
@@ -287,7 +317,7 @@ impl GlobSet {
|
||||
///
|
||||
/// This takes a Candidate as input, which can be used to amortize the
|
||||
/// cost of preparing a path for matching.
|
||||
pub fn is_match_candidate(&self, path: &Candidate) -> bool {
|
||||
pub fn is_match_candidate(&self, path: &Candidate<'_>) -> bool {
|
||||
if self.is_empty() {
|
||||
return false;
|
||||
}
|
||||
@@ -310,7 +340,7 @@ impl GlobSet {
|
||||
///
|
||||
/// This takes a Candidate as input, which can be used to amortize the
|
||||
/// cost of preparing a path for matching.
|
||||
pub fn matches_candidate(&self, path: &Candidate) -> Vec<usize> {
|
||||
pub fn matches_candidate(&self, path: &Candidate<'_>) -> Vec<usize> {
|
||||
let mut into = vec![];
|
||||
if self.is_empty() {
|
||||
return into;
|
||||
@@ -322,7 +352,7 @@ impl GlobSet {
|
||||
/// Adds the sequence number of every glob pattern that matches the given
|
||||
/// path to the vec given.
|
||||
///
|
||||
/// `into` is is cleared before matching begins, and contains the set of
|
||||
/// `into` is cleared before matching begins, and contains the set of
|
||||
/// sequence numbers (in ascending order) after matching ends. If no globs
|
||||
/// were matched, then `into` will be empty.
|
||||
pub fn matches_into<P: AsRef<Path>>(
|
||||
@@ -336,7 +366,7 @@ impl GlobSet {
|
||||
/// Adds the sequence number of every glob pattern that matches the given
|
||||
/// path to the vec given.
|
||||
///
|
||||
/// `into` is is cleared before matching begins, and contains the set of
|
||||
/// `into` is cleared before matching begins, and contains the set of
|
||||
/// sequence numbers (in ascending order) after matching ends. If no globs
|
||||
/// were matched, then `into` will be empty.
|
||||
///
|
||||
@@ -344,7 +374,7 @@ impl GlobSet {
|
||||
/// cost of preparing a path for matching.
|
||||
pub fn matches_candidate_into(
|
||||
&self,
|
||||
path: &Candidate,
|
||||
path: &Candidate<'_>,
|
||||
into: &mut Vec<usize>,
|
||||
) {
|
||||
into.clear();
|
||||
@@ -398,11 +428,17 @@ impl GlobSet {
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!("built glob set; {} literals, {} basenames, {} extensions, \
|
||||
debug!(
|
||||
"built glob set; {} literals, {} basenames, {} extensions, \
|
||||
{} prefixes, {} suffixes, {} required extensions, {} regexes",
|
||||
lits.0.len(), base_lits.0.len(), exts.0.len(),
|
||||
prefixes.literals.len(), suffixes.literals.len(),
|
||||
required_exts.0.len(), regexes.literals.len());
|
||||
lits.0.len(),
|
||||
base_lits.0.len(),
|
||||
exts.0.len(),
|
||||
prefixes.literals.len(),
|
||||
suffixes.literals.len(),
|
||||
required_exts.0.len(),
|
||||
regexes.literals.len()
|
||||
);
|
||||
Ok(GlobSet {
|
||||
len: pats.len(),
|
||||
strats: vec![
|
||||
@@ -412,15 +448,24 @@ impl GlobSet {
|
||||
GlobSetMatchStrategy::Suffix(suffixes.suffix()),
|
||||
GlobSetMatchStrategy::Prefix(prefixes.prefix()),
|
||||
GlobSetMatchStrategy::RequiredExtension(
|
||||
required_exts.build()?),
|
||||
required_exts.build()?,
|
||||
),
|
||||
GlobSetMatchStrategy::Regex(regexes.regex_set()?),
|
||||
],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for GlobSet {
|
||||
/// Create a default empty GlobSet.
|
||||
fn default() -> Self {
|
||||
GlobSet::empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// GlobSetBuilder builds a group of patterns that can be used to
|
||||
/// simultaneously match a file path.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GlobSetBuilder {
|
||||
pats: Vec<Glob>,
|
||||
}
|
||||
@@ -441,7 +486,6 @@ impl GlobSetBuilder {
|
||||
}
|
||||
|
||||
/// Add a new pattern to this set.
|
||||
#[allow(dead_code)]
|
||||
pub fn add(&mut self, pat: Glob) -> &mut GlobSetBuilder {
|
||||
self.pats.push(pat);
|
||||
self
|
||||
@@ -464,13 +508,10 @@ pub struct Candidate<'a> {
|
||||
impl<'a> Candidate<'a> {
|
||||
/// Create a new candidate for matching from the given path.
|
||||
pub fn new<P: AsRef<Path> + ?Sized>(path: &'a P) -> Candidate<'a> {
|
||||
let path = path.as_ref();
|
||||
let basename = file_name(path).unwrap_or(OsStr::new(""));
|
||||
Candidate {
|
||||
path: normalize_path(path_bytes(path)),
|
||||
basename: os_str_bytes(basename),
|
||||
ext: file_name_ext(basename).unwrap_or(Cow::Borrowed(b"")),
|
||||
}
|
||||
let path = normalize_path(Vec::from_path_lossy(path.as_ref()));
|
||||
let basename = file_name(&path).unwrap_or(Cow::Borrowed(B("")));
|
||||
let ext = file_name_ext(&basename).unwrap_or(Cow::Borrowed(B("")));
|
||||
Candidate { path: path, basename: basename, ext: ext }
|
||||
}
|
||||
|
||||
fn path_prefix(&self, max: usize) -> &[u8] {
|
||||
@@ -502,7 +543,7 @@ enum GlobSetMatchStrategy {
|
||||
}
|
||||
|
||||
impl GlobSetMatchStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
use self::GlobSetMatchStrategy::*;
|
||||
match *self {
|
||||
Literal(ref s) => s.is_match(candidate),
|
||||
@@ -515,7 +556,11 @@ impl GlobSetMatchStrategy {
|
||||
}
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
use self::GlobSetMatchStrategy::*;
|
||||
match *self {
|
||||
Literal(ref s) => s.matches_into(candidate, matches),
|
||||
@@ -541,13 +586,17 @@ impl LiteralStrategy {
|
||||
self.0.entry(lit.into_bytes()).or_insert(vec![]).push(global_index);
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
self.0.contains_key(&*candidate.path)
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
self.0.contains_key(candidate.path.as_bytes())
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
if let Some(hits) = self.0.get(&*candidate.path) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
if let Some(hits) = self.0.get(candidate.path.as_bytes()) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
@@ -565,19 +614,23 @@ impl BasenameLiteralStrategy {
|
||||
self.0.entry(lit.into_bytes()).or_insert(vec![]).push(global_index);
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
if candidate.basename.is_empty() {
|
||||
return false;
|
||||
}
|
||||
self.0.contains_key(&*candidate.basename)
|
||||
self.0.contains_key(candidate.basename.as_bytes())
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
if candidate.basename.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(hits) = self.0.get(&*candidate.basename) {
|
||||
if let Some(hits) = self.0.get(candidate.basename.as_bytes()) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
@@ -595,19 +648,23 @@ impl ExtensionStrategy {
|
||||
self.0.entry(ext.into_bytes()).or_insert(vec![]).push(global_index);
|
||||
}
|
||||
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
self.0.contains_key(&*candidate.ext)
|
||||
self.0.contains_key(candidate.ext.as_bytes())
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(hits) = self.0.get(&*candidate.ext) {
|
||||
if let Some(hits) = self.0.get(candidate.ext.as_bytes()) {
|
||||
matches.extend(hits);
|
||||
}
|
||||
}
|
||||
@@ -615,27 +672,31 @@ impl ExtensionStrategy {
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct PrefixStrategy {
|
||||
matcher: FullAcAutomaton<Vec<u8>>,
|
||||
matcher: AhoCorasick,
|
||||
map: Vec<usize>,
|
||||
longest: usize,
|
||||
}
|
||||
|
||||
impl PrefixStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
let path = candidate.path_prefix(self.longest);
|
||||
for m in self.matcher.find_overlapping(path) {
|
||||
if m.start == 0 {
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.start() == 0 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
let path = candidate.path_prefix(self.longest);
|
||||
for m in self.matcher.find_overlapping(path) {
|
||||
if m.start == 0 {
|
||||
matches.push(self.map[m.pati]);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.start() == 0 {
|
||||
matches.push(self.map[m.pattern()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -643,27 +704,31 @@ impl PrefixStrategy {
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct SuffixStrategy {
|
||||
matcher: FullAcAutomaton<Vec<u8>>,
|
||||
matcher: AhoCorasick,
|
||||
map: Vec<usize>,
|
||||
longest: usize,
|
||||
}
|
||||
|
||||
impl SuffixStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
let path = candidate.path_suffix(self.longest);
|
||||
for m in self.matcher.find_overlapping(path) {
|
||||
if m.end == path.len() {
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.end() == path.len() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
let path = candidate.path_suffix(self.longest);
|
||||
for m in self.matcher.find_overlapping(path) {
|
||||
if m.end == path.len() {
|
||||
matches.push(self.map[m.pati]);
|
||||
for m in self.matcher.find_overlapping_iter(path) {
|
||||
if m.end() == path.len() {
|
||||
matches.push(self.map[m.pattern()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -673,15 +738,15 @@ impl SuffixStrategy {
|
||||
struct RequiredExtensionStrategy(HashMap<Vec<u8>, Vec<(usize, Regex)>, Fnv>);
|
||||
|
||||
impl RequiredExtensionStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
if candidate.ext.is_empty() {
|
||||
return false;
|
||||
}
|
||||
match self.0.get(&*candidate.ext) {
|
||||
match self.0.get(candidate.ext.as_bytes()) {
|
||||
None => false,
|
||||
Some(regexes) => {
|
||||
for &(_, ref re) in regexes {
|
||||
if re.is_match(&*candidate.path) {
|
||||
if re.is_match(candidate.path.as_bytes()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -691,13 +756,17 @@ impl RequiredExtensionStrategy {
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
if candidate.ext.is_empty() {
|
||||
return;
|
||||
}
|
||||
if let Some(regexes) = self.0.get(&*candidate.ext) {
|
||||
if let Some(regexes) = self.0.get(candidate.ext.as_bytes()) {
|
||||
for &(global_index, ref re) in regexes {
|
||||
if re.is_match(&*candidate.path) {
|
||||
if re.is_match(candidate.path.as_bytes()) {
|
||||
matches.push(global_index);
|
||||
}
|
||||
}
|
||||
@@ -712,12 +781,16 @@ struct RegexSetStrategy {
|
||||
}
|
||||
|
||||
impl RegexSetStrategy {
|
||||
fn is_match(&self, candidate: &Candidate) -> bool {
|
||||
self.matcher.is_match(&*candidate.path)
|
||||
fn is_match(&self, candidate: &Candidate<'_>) -> bool {
|
||||
self.matcher.is_match(candidate.path.as_bytes())
|
||||
}
|
||||
|
||||
fn matches_into(&self, candidate: &Candidate, matches: &mut Vec<usize>) {
|
||||
for i in self.matcher.matches(&*candidate.path) {
|
||||
fn matches_into(
|
||||
&self,
|
||||
candidate: &Candidate<'_>,
|
||||
matches: &mut Vec<usize>,
|
||||
) {
|
||||
for i in self.matcher.matches(candidate.path.as_bytes()) {
|
||||
matches.push(self.map[i]);
|
||||
}
|
||||
}
|
||||
@@ -732,11 +805,7 @@ struct MultiStrategyBuilder {
|
||||
|
||||
impl MultiStrategyBuilder {
|
||||
fn new() -> MultiStrategyBuilder {
|
||||
MultiStrategyBuilder {
|
||||
literals: vec![],
|
||||
map: vec![],
|
||||
longest: 0,
|
||||
}
|
||||
MultiStrategyBuilder { literals: vec![], map: vec![], longest: 0 }
|
||||
}
|
||||
|
||||
fn add(&mut self, global_index: usize, literal: String) {
|
||||
@@ -748,18 +817,16 @@ impl MultiStrategyBuilder {
|
||||
}
|
||||
|
||||
fn prefix(self) -> PrefixStrategy {
|
||||
let it = self.literals.into_iter().map(|s| s.into_bytes());
|
||||
PrefixStrategy {
|
||||
matcher: AcAutomaton::new(it).into_full(),
|
||||
matcher: AhoCorasick::new_auto_configured(&self.literals),
|
||||
map: self.map,
|
||||
longest: self.longest,
|
||||
}
|
||||
}
|
||||
|
||||
fn suffix(self) -> SuffixStrategy {
|
||||
let it = self.literals.into_iter().map(|s| s.into_bytes());
|
||||
SuffixStrategy {
|
||||
matcher: AcAutomaton::new(it).into_full(),
|
||||
matcher: AhoCorasick::new_auto_configured(&self.literals),
|
||||
map: self.map,
|
||||
longest: self.longest,
|
||||
}
|
||||
@@ -805,8 +872,8 @@ impl RequiredExtensionStrategyBuilder {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::GlobSetBuilder;
|
||||
use glob::Glob;
|
||||
use super::{GlobSet, GlobSetBuilder};
|
||||
use crate::glob::Glob;
|
||||
|
||||
#[test]
|
||||
fn set_works() {
|
||||
@@ -835,4 +902,11 @@ mod tests {
|
||||
assert!(!set.is_match(""));
|
||||
assert!(!set.is_match("a"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_set_is_empty_works() {
|
||||
let set: GlobSet = Default::default();
|
||||
assert!(!set.is_match(""));
|
||||
assert!(!set.is_match("a"));
|
||||
}
|
||||
}
|
||||
@@ -1,41 +1,26 @@
|
||||
use std::borrow::Cow;
|
||||
use std::ffi::OsStr;
|
||||
use std::path::Path;
|
||||
|
||||
use bstr::{ByteSlice, ByteVec};
|
||||
|
||||
/// The final component of the path, if it is a normal file.
|
||||
///
|
||||
/// If the path terminates in ., .., or consists solely of a root of prefix,
|
||||
/// file_name will return None.
|
||||
#[cfg(unix)]
|
||||
pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
path: &'a P,
|
||||
) -> Option<&'a OsStr> {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use memchr::memrchr;
|
||||
|
||||
let path = path.as_ref().as_os_str().as_bytes();
|
||||
pub fn file_name<'a>(path: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
|
||||
if path.is_empty() {
|
||||
return None;
|
||||
} else if path.len() == 1 && path[0] == b'.' {
|
||||
return None;
|
||||
} else if path.last() == Some(&b'.') {
|
||||
return None;
|
||||
} else if path.len() >= 2 && &path[path.len() - 2..] == &b".."[..] {
|
||||
} else if path.last_byte() == Some(b'.') {
|
||||
return None;
|
||||
}
|
||||
let last_slash = memrchr(b'/', path).map(|i| i + 1).unwrap_or(0);
|
||||
Some(OsStr::from_bytes(&path[last_slash..]))
|
||||
}
|
||||
|
||||
/// The final component of the path, if it is a normal file.
|
||||
///
|
||||
/// If the path terminates in ., .., or consists solely of a root of prefix,
|
||||
/// file_name will return None.
|
||||
#[cfg(not(unix))]
|
||||
pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
path: &'a P,
|
||||
) -> Option<&'a OsStr> {
|
||||
path.as_ref().file_name()
|
||||
let last_slash = path.rfind_byte(b'/').map(|i| i + 1).unwrap_or(0);
|
||||
Some(match *path {
|
||||
Cow::Borrowed(path) => Cow::Borrowed(&path[last_slash..]),
|
||||
Cow::Owned(ref path) => {
|
||||
let mut path = path.clone();
|
||||
path.drain_bytes(..last_slash);
|
||||
Cow::Owned(path)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a file extension given a path's file name.
|
||||
@@ -54,59 +39,28 @@ pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
/// a pattern like `*.rs` is obviously trying to match files with a `rs`
|
||||
/// extension, but it also matches files like `.rs`, which doesn't have an
|
||||
/// extension according to std::path::Path::extension.
|
||||
pub fn file_name_ext(name: &OsStr) -> Option<Cow<[u8]>> {
|
||||
pub fn file_name_ext<'a>(name: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
|
||||
if name.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let name = os_str_bytes(name);
|
||||
let last_dot_at = {
|
||||
let result = name
|
||||
.iter().enumerate().rev()
|
||||
.find(|&(_, &b)| b == b'.')
|
||||
.map(|(i, _)| i);
|
||||
match result {
|
||||
let last_dot_at = match name.rfind_byte(b'.') {
|
||||
None => return None,
|
||||
Some(i) => i,
|
||||
}
|
||||
};
|
||||
Some(match name {
|
||||
Some(match *name {
|
||||
Cow::Borrowed(name) => Cow::Borrowed(&name[last_dot_at..]),
|
||||
Cow::Owned(mut name) => {
|
||||
name.drain(..last_dot_at);
|
||||
Cow::Owned(ref name) => {
|
||||
let mut name = name.clone();
|
||||
name.drain_bytes(..last_dot_at);
|
||||
Cow::Owned(name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Return raw bytes of a path, transcoded to UTF-8 if necessary.
|
||||
pub fn path_bytes(path: &Path) -> Cow<[u8]> {
|
||||
os_str_bytes(path.as_os_str())
|
||||
}
|
||||
|
||||
/// Return the raw bytes of the given OS string, possibly transcoded to UTF-8.
|
||||
#[cfg(unix)]
|
||||
pub fn os_str_bytes(s: &OsStr) -> Cow<[u8]> {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
Cow::Borrowed(s.as_bytes())
|
||||
}
|
||||
|
||||
/// Return the raw bytes of the given OS string, possibly transcoded to UTF-8.
|
||||
#[cfg(not(unix))]
|
||||
pub fn os_str_bytes(s: &OsStr) -> Cow<[u8]> {
|
||||
// TODO(burntsushi): On Windows, OS strings are WTF-8, which is a superset
|
||||
// of UTF-8, so even if we could get at the raw bytes, they wouldn't
|
||||
// be useful. We *must* convert to UTF-8 before doing path matching.
|
||||
// Unfortunate, but necessary.
|
||||
match s.to_string_lossy() {
|
||||
Cow::Owned(s) => Cow::Owned(s.into_bytes()),
|
||||
Cow::Borrowed(s) => Cow::Borrowed(s.as_bytes()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Normalizes a path to use `/` as a separator everywhere, even on platforms
|
||||
/// that recognize other characters as separators.
|
||||
#[cfg(unix)]
|
||||
pub fn normalize_path(path: Cow<[u8]>) -> Cow<[u8]> {
|
||||
pub fn normalize_path(path: Cow<'_, [u8]>) -> Cow<'_, [u8]> {
|
||||
// UNIX only uses /, so we're good.
|
||||
path
|
||||
}
|
||||
@@ -129,7 +83,8 @@ pub fn normalize_path(mut path: Cow<[u8]>) -> Cow<[u8]> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::borrow::Cow;
|
||||
use std::ffi::OsStr;
|
||||
|
||||
use bstr::{ByteVec, B};
|
||||
|
||||
use super::{file_name_ext, normalize_path};
|
||||
|
||||
@@ -137,8 +92,9 @@ mod tests {
|
||||
($name:ident, $file_name:expr, $ext:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let got = file_name_ext(OsStr::new($file_name));
|
||||
assert_eq!($ext.map(|s| Cow::Borrowed(s.as_bytes())), got);
|
||||
let bs = Vec::from($file_name);
|
||||
let got = file_name_ext(&Cow::Owned(bs));
|
||||
assert_eq!($ext.map(|s| Cow::Borrowed(B(s))), got);
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -153,7 +109,8 @@ mod tests {
|
||||
($name:ident, $path:expr, $expected:expr) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
let got = normalize_path(Cow::Owned($path.to_vec()));
|
||||
let bs = Vec::from_slice($path);
|
||||
let got = normalize_path(Cow::Owned(bs));
|
||||
assert_eq!($expected.to_vec(), got.into_owned());
|
||||
}
|
||||
};
|
||||
38
crates/globset/src/serde_impl.rs
Normal file
38
crates/globset/src/serde_impl.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
use serde::de::Error;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use crate::Glob;
|
||||
|
||||
impl Serialize for Glob {
|
||||
fn serialize<S: Serializer>(
|
||||
&self,
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error> {
|
||||
serializer.serialize_str(self.glob())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Glob {
|
||||
fn deserialize<D: Deserializer<'de>>(
|
||||
deserializer: D,
|
||||
) -> Result<Self, D::Error> {
|
||||
let glob = <&str as Deserialize>::deserialize(deserializer)?;
|
||||
Glob::new(glob).map_err(D::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use Glob;
|
||||
|
||||
#[test]
|
||||
fn glob_json_works() {
|
||||
let test_glob = Glob::new("src/**/*.rs").unwrap();
|
||||
|
||||
let ser = serde_json::to_string(&test_glob).unwrap();
|
||||
assert_eq!(ser, "\"src/**/*.rs\"");
|
||||
|
||||
let de: Glob = serde_json::from_str(&ser).unwrap();
|
||||
assert_eq!(test_glob, de);
|
||||
}
|
||||
}
|
||||
33
crates/grep/Cargo.toml
Normal file
33
crates/grep/Cargo.toml
Normal file
@@ -0,0 +1,33 @@
|
||||
[package]
|
||||
name = "grep"
|
||||
version = "0.2.10" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
Fast line oriented regex searching as a library.
|
||||
"""
|
||||
documentation = "https://docs.rs/grep"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/grep"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/grep"
|
||||
readme = "README.md"
|
||||
keywords = ["regex", "grep", "egrep", "search", "pattern"]
|
||||
license = "Unlicense OR MIT"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
grep-cli = { version = "0.1.7", path = "../cli" }
|
||||
grep-matcher = { version = "0.1.6", path = "../matcher" }
|
||||
grep-pcre2 = { version = "0.1.6", path = "../pcre2", optional = true }
|
||||
grep-printer = { version = "0.1.6", path = "../printer" }
|
||||
grep-regex = { version = "0.1.11", path = "../regex" }
|
||||
grep-searcher = { version = "0.1.11", path = "../searcher" }
|
||||
|
||||
[dev-dependencies]
|
||||
termcolor = "1.0.4"
|
||||
walkdir = "2.2.7"
|
||||
|
||||
[features]
|
||||
simd-accel = ["grep-searcher/simd-accel"]
|
||||
pcre2 = ["grep-pcre2"]
|
||||
|
||||
# This feature is DEPRECATED. Runtime dispatch is used for SIMD now.
|
||||
avx-accel = []
|
||||
34
crates/grep/README.md
Normal file
34
crates/grep/README.md
Normal file
@@ -0,0 +1,34 @@
|
||||
grep
|
||||
----
|
||||
ripgrep, as a library.
|
||||
|
||||
[](https://github.com/BurntSushi/ripgrep/actions)
|
||||
[](https://crates.io/crates/grep)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/).
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
[https://docs.rs/grep](https://docs.rs/grep)
|
||||
|
||||
NOTE: This crate isn't ready for wide use yet. Ambitious individuals can
|
||||
probably piece together the parts, but there is no high level documentation
|
||||
describing how all of the pieces fit together.
|
||||
|
||||
|
||||
### Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
grep = "0.2"
|
||||
```
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
This crate provides a `pcre2` feature (disabled by default) which, when
|
||||
enabled, re-exports the `grep-pcre2` crate as an alternative `Matcher`
|
||||
implementation to the standard `grep-regex` implementation.
|
||||
68
crates/grep/examples/simplegrep.rs
Normal file
68
crates/grep/examples/simplegrep.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
use std::env;
|
||||
use std::error::Error;
|
||||
use std::ffi::OsString;
|
||||
use std::process;
|
||||
|
||||
use grep::cli;
|
||||
use grep::printer::{ColorSpecs, StandardBuilder};
|
||||
use grep::regex::RegexMatcher;
|
||||
use grep::searcher::{BinaryDetection, SearcherBuilder};
|
||||
use termcolor::ColorChoice;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
fn main() {
|
||||
if let Err(err) = try_main() {
|
||||
eprintln!("{}", err);
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
fn try_main() -> Result<(), Box<dyn Error>> {
|
||||
let mut args: Vec<OsString> = env::args_os().collect();
|
||||
if args.len() < 2 {
|
||||
return Err("Usage: simplegrep <pattern> [<path> ...]".into());
|
||||
}
|
||||
if args.len() == 2 {
|
||||
args.push(OsString::from("./"));
|
||||
}
|
||||
search(cli::pattern_from_os(&args[1])?, &args[2..])
|
||||
}
|
||||
|
||||
fn search(pattern: &str, paths: &[OsString]) -> Result<(), Box<dyn Error>> {
|
||||
let matcher = RegexMatcher::new_line_matcher(&pattern)?;
|
||||
let mut searcher = SearcherBuilder::new()
|
||||
.binary_detection(BinaryDetection::quit(b'\x00'))
|
||||
.line_number(false)
|
||||
.build();
|
||||
let mut printer = StandardBuilder::new()
|
||||
.color_specs(ColorSpecs::default_with_color())
|
||||
.build(cli::stdout(if cli::is_tty_stdout() {
|
||||
ColorChoice::Auto
|
||||
} else {
|
||||
ColorChoice::Never
|
||||
}));
|
||||
|
||||
for path in paths {
|
||||
for result in WalkDir::new(path) {
|
||||
let dent = match result {
|
||||
Ok(dent) => dent,
|
||||
Err(err) => {
|
||||
eprintln!("{}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if !dent.file_type().is_file() {
|
||||
continue;
|
||||
}
|
||||
let result = searcher.search_path(
|
||||
&matcher,
|
||||
dent.path(),
|
||||
printer.sink_with_path(&matcher, dent.path()),
|
||||
);
|
||||
if let Err(err) = result {
|
||||
eprintln!("{}: {}", dent.path().display(), err);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
23
crates/grep/src/lib.rs
Normal file
23
crates/grep/src/lib.rs
Normal file
@@ -0,0 +1,23 @@
|
||||
/*!
|
||||
ripgrep, as a library.
|
||||
|
||||
This library is intended to provide a high level facade to the crates that
|
||||
make up ripgrep's core searching routines. However, there is no high level
|
||||
documentation available yet guiding users on how to fit all of the pieces
|
||||
together.
|
||||
|
||||
Every public API item in the constituent crates is documented, but examples
|
||||
are sparse.
|
||||
|
||||
A cookbook and a guide are planned.
|
||||
*/
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
pub extern crate grep_cli as cli;
|
||||
pub extern crate grep_matcher as matcher;
|
||||
#[cfg(feature = "pcre2")]
|
||||
pub extern crate grep_pcre2 as pcre2;
|
||||
pub extern crate grep_printer as printer;
|
||||
pub extern crate grep_regex as regex;
|
||||
pub extern crate grep_searcher as searcher;
|
||||
@@ -1,39 +1,38 @@
|
||||
[package]
|
||||
name = "ignore"
|
||||
version = "0.3.1" #:version
|
||||
version = "0.4.19" #:version
|
||||
authors = ["Andrew Gallant <jamslam@gmail.com>"]
|
||||
description = """
|
||||
A fast library for efficiently matching ignore files such as `.gitignore`
|
||||
against file paths.
|
||||
"""
|
||||
documentation = "https://docs.rs/ignore"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/ignore"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/ignore"
|
||||
homepage = "https://github.com/BurntSushi/ripgrep/tree/master/crates/ignore"
|
||||
repository = "https://github.com/BurntSushi/ripgrep/tree/master/crates/ignore"
|
||||
readme = "README.md"
|
||||
keywords = ["glob", "ignore", "gitignore", "pattern", "file"]
|
||||
license = "Unlicense/MIT"
|
||||
license = "Unlicense OR MIT"
|
||||
edition = "2018"
|
||||
|
||||
[lib]
|
||||
name = "ignore"
|
||||
bench = false
|
||||
|
||||
[dependencies]
|
||||
crossbeam = "0.3"
|
||||
globset = { version = "0.3.0", path = "../globset" }
|
||||
lazy_static = "1"
|
||||
log = "0.4"
|
||||
memchr = "2"
|
||||
regex = "0.2.1"
|
||||
same-file = "1"
|
||||
thread_local = "0.3.2"
|
||||
walkdir = "2"
|
||||
globset = { version = "0.4.10", path = "../globset" }
|
||||
lazy_static = "1.1"
|
||||
log = "0.4.5"
|
||||
memchr = "2.1"
|
||||
regex = "1.1"
|
||||
same-file = "1.0.4"
|
||||
thread_local = "1"
|
||||
walkdir = "2.2.7"
|
||||
|
||||
[target.'cfg(windows)'.dependencies.winapi]
|
||||
version = "0.3"
|
||||
features = ["std", "winnt"]
|
||||
[target.'cfg(windows)'.dependencies.winapi-util]
|
||||
version = "0.1.2"
|
||||
|
||||
[dev-dependencies]
|
||||
tempdir = "0.3.5"
|
||||
crossbeam-channel = "0.5.0"
|
||||
|
||||
[features]
|
||||
simd-accel = ["globset/simd-accel"]
|
||||
@@ -4,11 +4,10 @@ The ignore crate provides a fast recursive directory iterator that respects
|
||||
various filters such as globs, file types and `.gitignore` files. This crate
|
||||
also provides lower level direct access to gitignore and file type matchers.
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/ripgrep)
|
||||
[](https://ci.appveyor.com/project/BurntSushi/ripgrep)
|
||||
[](https://github.com/BurntSushi/ripgrep/actions)
|
||||
[](https://crates.io/crates/ignore)
|
||||
|
||||
Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org).
|
||||
Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/).
|
||||
|
||||
### Documentation
|
||||
|
||||
@@ -20,13 +19,7 @@ Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
ignore = "0.3"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
```rust
|
||||
extern crate ignore;
|
||||
ignore = "0.4"
|
||||
```
|
||||
|
||||
### Example
|
||||
@@ -1,17 +1,8 @@
|
||||
#![allow(dead_code, unused_imports, unused_mut, unused_variables)]
|
||||
|
||||
extern crate crossbeam;
|
||||
extern crate ignore;
|
||||
extern crate walkdir;
|
||||
|
||||
use std::env;
|
||||
use std::io::{self, Write};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::thread;
|
||||
|
||||
use crossbeam::sync::MsQueue;
|
||||
use ignore::WalkBuilder;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
@@ -19,7 +10,7 @@ fn main() {
|
||||
let mut path = env::args().nth(1).unwrap();
|
||||
let mut parallel = false;
|
||||
let mut simple = false;
|
||||
let queue: Arc<MsQueue<Option<DirEntry>>> = Arc::new(MsQueue::new());
|
||||
let (tx, rx) = crossbeam_channel::bounded::<DirEntry>(100);
|
||||
if path == "parallel" {
|
||||
path = env::args().nth(2).unwrap();
|
||||
parallel = true;
|
||||
@@ -28,10 +19,9 @@ fn main() {
|
||||
simple = true;
|
||||
}
|
||||
|
||||
let stdout_queue = queue.clone();
|
||||
let stdout_thread = thread::spawn(move || {
|
||||
let mut stdout = io::BufWriter::new(io::stdout());
|
||||
while let Some(dent) = stdout_queue.pop() {
|
||||
for dent in rx {
|
||||
write_path(&mut stdout, dent.path());
|
||||
}
|
||||
});
|
||||
@@ -39,28 +29,26 @@ fn main() {
|
||||
if parallel {
|
||||
let walker = WalkBuilder::new(path).threads(6).build_parallel();
|
||||
walker.run(|| {
|
||||
let queue = queue.clone();
|
||||
let tx = tx.clone();
|
||||
Box::new(move |result| {
|
||||
use ignore::WalkState::*;
|
||||
|
||||
queue.push(Some(DirEntry::Y(result.unwrap())));
|
||||
tx.send(DirEntry::Y(result.unwrap())).unwrap();
|
||||
Continue
|
||||
})
|
||||
});
|
||||
} else if simple {
|
||||
let mut stdout = io::BufWriter::new(io::stdout());
|
||||
let walker = WalkDir::new(path);
|
||||
for result in walker {
|
||||
queue.push(Some(DirEntry::X(result.unwrap())));
|
||||
tx.send(DirEntry::X(result.unwrap())).unwrap();
|
||||
}
|
||||
} else {
|
||||
let mut stdout = io::BufWriter::new(io::stdout());
|
||||
let walker = WalkBuilder::new(path).build();
|
||||
for result in walker {
|
||||
queue.push(Some(DirEntry::Y(result.unwrap())));
|
||||
tx.send(DirEntry::Y(result.unwrap())).unwrap();
|
||||
}
|
||||
}
|
||||
queue.push(None);
|
||||
drop(tx);
|
||||
stdout_thread.join().unwrap();
|
||||
}
|
||||
|
||||
316
crates/ignore/src/default_types.rs
Normal file
316
crates/ignore/src/default_types.rs
Normal file
@@ -0,0 +1,316 @@
|
||||
/// This list represents the default file types that ripgrep ships with. In
|
||||
/// general, any file format is fair game, although it should generally be
|
||||
/// limited to reasonably popular open formats. For other cases, you can add
|
||||
/// types to each invocation of ripgrep with the '--type-add' flag.
|
||||
///
|
||||
/// If you would like to add or improve this list, please file a PR:
|
||||
/// <https://github.com/BurntSushi/ripgrep>.
|
||||
///
|
||||
/// Please try to keep this list sorted lexicographically and wrapped to 79
|
||||
/// columns (inclusive).
|
||||
#[rustfmt::skip]
|
||||
pub const DEFAULT_TYPES: &[(&str, &[&str])] = &[
|
||||
("agda", &["*.agda", "*.lagda"]),
|
||||
("aidl", &["*.aidl"]),
|
||||
("amake", &["*.mk", "*.bp"]),
|
||||
("asciidoc", &["*.adoc", "*.asc", "*.asciidoc"]),
|
||||
("asm", &["*.asm", "*.s", "*.S"]),
|
||||
("asp", &[
|
||||
"*.aspx", "*.aspx.cs", "*.aspx.vb", "*.ascx", "*.ascx.cs",
|
||||
"*.ascx.vb", "*.asp"
|
||||
]),
|
||||
("ats", &["*.ats", "*.dats", "*.sats", "*.hats"]),
|
||||
("avro", &["*.avdl", "*.avpr", "*.avsc"]),
|
||||
("awk", &["*.awk"]),
|
||||
("bazel", &[
|
||||
"*.bazel", "*.bzl", "*.BUILD", "*.bazelrc", "BUILD", "MODULE.bazel",
|
||||
"WORKSPACE", "WORKSPACE.bazel",
|
||||
]),
|
||||
("bitbake", &["*.bb", "*.bbappend", "*.bbclass", "*.conf", "*.inc"]),
|
||||
("brotli", &["*.br"]),
|
||||
("buildstream", &["*.bst"]),
|
||||
("bzip2", &["*.bz2", "*.tbz2"]),
|
||||
("c", &["*.[chH]", "*.[chH].in", "*.cats"]),
|
||||
("cabal", &["*.cabal"]),
|
||||
("candid", &["*.did"]),
|
||||
("carp", &["*.carp"]),
|
||||
("cbor", &["*.cbor"]),
|
||||
("ceylon", &["*.ceylon"]),
|
||||
("clojure", &["*.clj", "*.cljc", "*.cljs", "*.cljx"]),
|
||||
("cmake", &["*.cmake", "CMakeLists.txt"]),
|
||||
("coffeescript", &["*.coffee"]),
|
||||
("config", &["*.cfg", "*.conf", "*.config", "*.ini"]),
|
||||
("coq", &["*.v"]),
|
||||
("cpp", &[
|
||||
"*.[ChH]", "*.cc", "*.[ch]pp", "*.[ch]xx", "*.hh", "*.inl",
|
||||
"*.[ChH].in", "*.cc.in", "*.[ch]pp.in", "*.[ch]xx.in", "*.hh.in",
|
||||
]),
|
||||
("creole", &["*.creole"]),
|
||||
("crystal", &["Projectfile", "*.cr", "*.ecr", "shard.yml"]),
|
||||
("cs", &["*.cs"]),
|
||||
("csharp", &["*.cs"]),
|
||||
("cshtml", &["*.cshtml"]),
|
||||
("css", &["*.css", "*.scss"]),
|
||||
("csv", &["*.csv"]),
|
||||
("cuda", &["*.cu", "*.cuh"]),
|
||||
("cython", &["*.pyx", "*.pxi", "*.pxd"]),
|
||||
("d", &["*.d"]),
|
||||
("dart", &["*.dart"]),
|
||||
("devicetree", &["*.dts", "*.dtsi"]),
|
||||
("dhall", &["*.dhall"]),
|
||||
("diff", &["*.patch", "*.diff"]),
|
||||
("docker", &["*Dockerfile*"]),
|
||||
("dts", &["*.dts", "*.dtsi"]),
|
||||
("dvc", &["Dvcfile", "*.dvc"]),
|
||||
("ebuild", &["*.ebuild"]),
|
||||
("edn", &["*.edn"]),
|
||||
("elisp", &["*.el"]),
|
||||
("elixir", &["*.ex", "*.eex", "*.exs"]),
|
||||
("elm", &["*.elm"]),
|
||||
("erb", &["*.erb"]),
|
||||
("erlang", &["*.erl", "*.hrl"]),
|
||||
("fennel", &["*.fnl"]),
|
||||
("fidl", &["*.fidl"]),
|
||||
("fish", &["*.fish"]),
|
||||
("flatbuffers", &["*.fbs"]),
|
||||
("fortran", &[
|
||||
"*.f", "*.F", "*.f77", "*.F77", "*.pfo",
|
||||
"*.f90", "*.F90", "*.f95", "*.F95",
|
||||
]),
|
||||
("fsharp", &["*.fs", "*.fsx", "*.fsi"]),
|
||||
("fut", &["*.fut"]),
|
||||
("gap", &["*.g", "*.gap", "*.gi", "*.gd", "*.tst"]),
|
||||
("gn", &["*.gn", "*.gni"]),
|
||||
("go", &["*.go"]),
|
||||
("gradle", &["*.gradle"]),
|
||||
("groovy", &["*.groovy", "*.gradle"]),
|
||||
("gzip", &["*.gz", "*.tgz"]),
|
||||
("h", &["*.h", "*.hh", "*.hpp"]),
|
||||
("haml", &["*.haml"]),
|
||||
("hare", &["*.ha"]),
|
||||
("haskell", &["*.hs", "*.lhs", "*.cpphs", "*.c2hs", "*.hsc"]),
|
||||
("hbs", &["*.hbs"]),
|
||||
("hs", &["*.hs", "*.lhs"]),
|
||||
("html", &["*.htm", "*.html", "*.ejs"]),
|
||||
("hy", &["*.hy"]),
|
||||
("idris", &["*.idr", "*.lidr"]),
|
||||
("janet", &["*.janet"]),
|
||||
("java", &["*.java", "*.jsp", "*.jspx", "*.properties"]),
|
||||
("jinja", &["*.j2", "*.jinja", "*.jinja2"]),
|
||||
("jl", &["*.jl"]),
|
||||
("js", &["*.js", "*.jsx", "*.vue", "*.cjs", "*.mjs"]),
|
||||
("json", &["*.json", "composer.lock"]),
|
||||
("jsonl", &["*.jsonl"]),
|
||||
("julia", &["*.jl"]),
|
||||
("jupyter", &["*.ipynb", "*.jpynb"]),
|
||||
("k", &["*.k"]),
|
||||
("kotlin", &["*.kt", "*.kts"]),
|
||||
("less", &["*.less"]),
|
||||
("license", &[
|
||||
// General
|
||||
"COPYING", "COPYING[.-]*",
|
||||
"COPYRIGHT", "COPYRIGHT[.-]*",
|
||||
"EULA", "EULA[.-]*",
|
||||
"licen[cs]e", "licen[cs]e.*",
|
||||
"LICEN[CS]E", "LICEN[CS]E[.-]*", "*[.-]LICEN[CS]E*",
|
||||
"NOTICE", "NOTICE[.-]*",
|
||||
"PATENTS", "PATENTS[.-]*",
|
||||
"UNLICEN[CS]E", "UNLICEN[CS]E[.-]*",
|
||||
// GPL (gpl.txt, etc.)
|
||||
"agpl[.-]*",
|
||||
"gpl[.-]*",
|
||||
"lgpl[.-]*",
|
||||
// Other license-specific (APACHE-2.0.txt, etc.)
|
||||
"AGPL-*[0-9]*",
|
||||
"APACHE-*[0-9]*",
|
||||
"BSD-*[0-9]*",
|
||||
"CC-BY-*",
|
||||
"GFDL-*[0-9]*",
|
||||
"GNU-*[0-9]*",
|
||||
"GPL-*[0-9]*",
|
||||
"LGPL-*[0-9]*",
|
||||
"MIT-*[0-9]*",
|
||||
"MPL-*[0-9]*",
|
||||
"OFL-*[0-9]*",
|
||||
]),
|
||||
("lilypond", &["*.ly", "*.ily"]),
|
||||
("lisp", &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]),
|
||||
("lock", &["*.lock", "package-lock.json"]),
|
||||
("log", &["*.log"]),
|
||||
("lua", &["*.lua"]),
|
||||
("lz4", &["*.lz4"]),
|
||||
("lzma", &["*.lzma"]),
|
||||
("m4", &["*.ac", "*.m4"]),
|
||||
("make", &[
|
||||
"[Gg][Nn][Uu]makefile", "[Mm]akefile",
|
||||
"[Gg][Nn][Uu]makefile.am", "[Mm]akefile.am",
|
||||
"[Gg][Nn][Uu]makefile.in", "[Mm]akefile.in",
|
||||
"*.mk", "*.mak"
|
||||
]),
|
||||
("mako", &["*.mako", "*.mao"]),
|
||||
("man", &["*.[0-9lnpx]", "*.[0-9][cEFMmpSx]"]),
|
||||
("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkd", "*.mkdn"]),
|
||||
("matlab", &["*.m"]),
|
||||
("md", &["*.markdown", "*.md", "*.mdown", "*.mkd", "*.mkdn"]),
|
||||
("meson", &["meson.build", "meson_options.txt"]),
|
||||
("minified", &["*.min.html", "*.min.css", "*.min.js"]),
|
||||
("mint", &["*.mint"]),
|
||||
("mk", &["mkfile"]),
|
||||
("ml", &["*.ml"]),
|
||||
("motoko", &["*.mo"]),
|
||||
("msbuild", &[
|
||||
"*.csproj", "*.fsproj", "*.vcxproj", "*.proj", "*.props", "*.targets",
|
||||
]),
|
||||
("nim", &["*.nim", "*.nimf", "*.nimble", "*.nims"]),
|
||||
("nix", &["*.nix"]),
|
||||
("objc", &["*.h", "*.m"]),
|
||||
("objcpp", &["*.h", "*.mm"]),
|
||||
("ocaml", &["*.ml", "*.mli", "*.mll", "*.mly"]),
|
||||
("org", &["*.org", "*.org_archive"]),
|
||||
("pants", &["BUILD"]),
|
||||
("pascal", &["*.pas", "*.dpr", "*.lpr", "*.pp", "*.inc"]),
|
||||
("pdf", &["*.pdf"]),
|
||||
("perl", &["*.perl", "*.pl", "*.PL", "*.plh", "*.plx", "*.pm", "*.t"]),
|
||||
("php", &[
|
||||
// note that PHP 6 doesn't exist
|
||||
// See: https://wiki.php.net/rfc/php6
|
||||
"*.php", "*.php3", "*.php4", "*.php5", "*.php7", "*.php8",
|
||||
"*.pht", "*.phtml"
|
||||
]),
|
||||
("po", &["*.po"]),
|
||||
("pod", &["*.pod"]),
|
||||
("postscript", &["*.eps", "*.ps"]),
|
||||
("protobuf", &["*.proto"]),
|
||||
("ps", &["*.cdxml", "*.ps1", "*.ps1xml", "*.psd1", "*.psm1"]),
|
||||
("puppet", &["*.epp", "*.erb", "*.pp", "*.rb"]),
|
||||
("purs", &["*.purs"]),
|
||||
("py", &["*.py"]),
|
||||
("qmake", &["*.pro", "*.pri", "*.prf"]),
|
||||
("qml", &["*.qml"]),
|
||||
("r", &["*.R", "*.r", "*.Rmd", "*.Rnw"]),
|
||||
("racket", &["*.rkt"]),
|
||||
("rdoc", &["*.rdoc"]),
|
||||
("readme", &["README*", "*README"]),
|
||||
("reasonml", &["*.re", "*.rei"]),
|
||||
("red", &["*.r", "*.red", "*.reds"]),
|
||||
("rescript", &["*.res", "*.resi"]),
|
||||
("robot", &["*.robot"]),
|
||||
("rst", &["*.rst"]),
|
||||
("ruby", &[
|
||||
// Idiomatic files
|
||||
"config.ru", "Gemfile", ".irbrc", "Rakefile",
|
||||
// Extensions
|
||||
"*.gemspec", "*.rb", "*.rbw"
|
||||
]),
|
||||
("rust", &["*.rs"]),
|
||||
("sass", &["*.sass", "*.scss"]),
|
||||
("scala", &["*.scala", "*.sbt"]),
|
||||
("sh", &[
|
||||
// Portable/misc. init files
|
||||
".login", ".logout", ".profile", "profile",
|
||||
// bash-specific init files
|
||||
".bash_login", "bash_login",
|
||||
".bash_logout", "bash_logout",
|
||||
".bash_profile", "bash_profile",
|
||||
".bashrc", "bashrc", "*.bashrc",
|
||||
// csh-specific init files
|
||||
".cshrc", "*.cshrc",
|
||||
// ksh-specific init files
|
||||
".kshrc", "*.kshrc",
|
||||
// tcsh-specific init files
|
||||
".tcshrc",
|
||||
// zsh-specific init files
|
||||
".zshenv", "zshenv",
|
||||
".zlogin", "zlogin",
|
||||
".zlogout", "zlogout",
|
||||
".zprofile", "zprofile",
|
||||
".zshrc", "zshrc",
|
||||
// Extensions
|
||||
"*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh", "*.zsh",
|
||||
]),
|
||||
("slim", &["*.skim", "*.slim", "*.slime"]),
|
||||
("smarty", &["*.tpl"]),
|
||||
("sml", &["*.sml", "*.sig"]),
|
||||
("solidity", &["*.sol"]),
|
||||
("soy", &["*.soy"]),
|
||||
("spark", &["*.spark"]),
|
||||
("spec", &["*.spec"]),
|
||||
("sql", &["*.sql", "*.psql"]),
|
||||
("stylus", &["*.styl"]),
|
||||
("sv", &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]),
|
||||
("svg", &["*.svg"]),
|
||||
("swift", &["*.swift"]),
|
||||
("swig", &["*.def", "*.i"]),
|
||||
("systemd", &[
|
||||
"*.automount", "*.conf", "*.device", "*.link", "*.mount", "*.path",
|
||||
"*.scope", "*.service", "*.slice", "*.socket", "*.swap", "*.target",
|
||||
"*.timer",
|
||||
]),
|
||||
("taskpaper", &["*.taskpaper"]),
|
||||
("tcl", &["*.tcl"]),
|
||||
("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib", "*.dtx", "*.ins"]),
|
||||
("texinfo", &["*.texi"]),
|
||||
("textile", &["*.textile"]),
|
||||
("tf", &["*.tf"]),
|
||||
("thrift", &["*.thrift"]),
|
||||
("toml", &["*.toml", "Cargo.lock"]),
|
||||
("ts", &["*.ts", "*.tsx", "*.cts", "*.mts"]),
|
||||
("twig", &["*.twig"]),
|
||||
("txt", &["*.txt"]),
|
||||
("typoscript", &["*.typoscript", "*.ts"]),
|
||||
("vala", &["*.vala"]),
|
||||
("vb", &["*.vb"]),
|
||||
("vcl", &["*.vcl"]),
|
||||
("verilog", &["*.v", "*.vh", "*.sv", "*.svh"]),
|
||||
("vhdl", &["*.vhd", "*.vhdl"]),
|
||||
("vim", &[
|
||||
"*.vim", ".vimrc", ".gvimrc", "vimrc", "gvimrc", "_vimrc", "_gvimrc",
|
||||
]),
|
||||
("vimscript", &[
|
||||
"*.vim", ".vimrc", ".gvimrc", "vimrc", "gvimrc", "_vimrc", "_gvimrc",
|
||||
]),
|
||||
("webidl", &["*.idl", "*.webidl", "*.widl"]),
|
||||
("wiki", &["*.mediawiki", "*.wiki"]),
|
||||
("xml", &[
|
||||
"*.xml", "*.xml.dist", "*.dtd", "*.xsl", "*.xslt", "*.xsd", "*.xjb",
|
||||
"*.rng", "*.sch", "*.xhtml",
|
||||
]),
|
||||
("xz", &["*.xz", "*.txz"]),
|
||||
("yacc", &["*.y"]),
|
||||
("yaml", &["*.yaml", "*.yml"]),
|
||||
("yang", &["*.yang"]),
|
||||
("z", &["*.Z"]),
|
||||
("zig", &["*.zig"]),
|
||||
("zsh", &[
|
||||
".zshenv", "zshenv",
|
||||
".zlogin", "zlogin",
|
||||
".zlogout", "zlogout",
|
||||
".zprofile", "zprofile",
|
||||
".zshrc", "zshrc",
|
||||
"*.zsh",
|
||||
]),
|
||||
("zstd", &["*.zst", "*.zstd"]),
|
||||
];
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::DEFAULT_TYPES;
|
||||
|
||||
#[test]
|
||||
fn default_types_are_sorted() {
|
||||
let mut names = DEFAULT_TYPES.iter().map(|(name, _exts)| name);
|
||||
|
||||
let Some(mut previous_name) = names.next() else { return; };
|
||||
|
||||
for name in names {
|
||||
assert!(
|
||||
name > previous_name,
|
||||
r#""{}" should be sorted before "{}" in `DEFAULT_TYPES`"#,
|
||||
name,
|
||||
previous_name
|
||||
);
|
||||
|
||||
previous_name = name;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -14,15 +14,18 @@
|
||||
// well.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::{OsString, OsStr};
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::fs::{File, FileType};
|
||||
use std::io::{self, BufRead};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use gitignore::{self, Gitignore, GitignoreBuilder};
|
||||
use pathutil::{is_hidden, strip_prefix};
|
||||
use overrides::{self, Override};
|
||||
use types::{self, Types};
|
||||
use {Error, Match, PartialErrorBuilder};
|
||||
use crate::gitignore::{self, Gitignore, GitignoreBuilder};
|
||||
use crate::overrides::{self, Override};
|
||||
use crate::pathutil::{is_hidden, strip_prefix};
|
||||
use crate::types::{self, Types};
|
||||
use crate::walk::DirEntry;
|
||||
use crate::{Error, Match, PartialErrorBuilder};
|
||||
|
||||
/// IgnoreMatch represents information about where a match came from when using
|
||||
/// the `Ignore` matcher.
|
||||
@@ -65,19 +68,19 @@ struct IgnoreOptions {
|
||||
hidden: bool,
|
||||
/// Whether to read .ignore files.
|
||||
ignore: bool,
|
||||
/// Whether to respect any ignore files in parent directories.
|
||||
parents: bool,
|
||||
/// Whether to read git's global gitignore file.
|
||||
git_global: bool,
|
||||
/// Whether to read .gitignore files.
|
||||
git_ignore: bool,
|
||||
/// Whether to read .git/info/exclude files.
|
||||
git_exclude: bool,
|
||||
}
|
||||
|
||||
impl IgnoreOptions {
|
||||
/// Returns true if at least one type of ignore rules should be matched.
|
||||
fn has_any_ignore_options(&self) -> bool {
|
||||
self.ignore || self.git_global || self.git_ignore || self.git_exclude
|
||||
}
|
||||
/// Whether to ignore files case insensitively
|
||||
ignore_case_insensitive: bool,
|
||||
/// Whether a git repository must be present in order to apply any
|
||||
/// git-related ignore rules.
|
||||
require_git: bool,
|
||||
}
|
||||
|
||||
/// Ignore is a matcher useful for recursively walking one or more directories.
|
||||
@@ -158,6 +161,15 @@ impl Ignore {
|
||||
&self,
|
||||
path: P,
|
||||
) -> (Ignore, Option<Error>) {
|
||||
if !self.0.opts.parents
|
||||
&& !self.0.opts.git_ignore
|
||||
&& !self.0.opts.git_exclude
|
||||
&& !self.0.opts.git_global
|
||||
{
|
||||
// If we never need info from parent directories, then don't do
|
||||
// anything.
|
||||
return (self.clone(), None);
|
||||
}
|
||||
if !self.is_root() {
|
||||
panic!("Ignore::add_parents called on non-root matcher");
|
||||
}
|
||||
@@ -190,6 +202,12 @@ impl Ignore {
|
||||
errs.maybe_push(err);
|
||||
igtmp.is_absolute_parent = true;
|
||||
igtmp.absolute_base = Some(absolute_base.clone());
|
||||
igtmp.has_git =
|
||||
if self.0.opts.require_git && self.0.opts.git_ignore {
|
||||
parent.join(".git").exists()
|
||||
} else {
|
||||
false
|
||||
};
|
||||
ig = Ignore(Arc::new(igtmp));
|
||||
compiled.insert(parent.as_os_str().to_os_string(), ig.clone());
|
||||
}
|
||||
@@ -214,37 +232,71 @@ impl Ignore {
|
||||
|
||||
/// Like add_child, but takes a full path and returns an IgnoreInner.
|
||||
fn add_child_path(&self, dir: &Path) -> (IgnoreInner, Option<Error>) {
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
let custom_ig_matcher =
|
||||
let git_type = if self.0.opts.require_git
|
||||
&& (self.0.opts.git_ignore || self.0.opts.git_exclude)
|
||||
{
|
||||
let (m, err) =
|
||||
create_gitignore(&dir, &self.0.custom_ignore_filenames);
|
||||
dir.join(".git").metadata().ok().map(|md| md.file_type())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let has_git = git_type.map(|_| true).unwrap_or(false);
|
||||
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
let custom_ig_matcher = if self.0.custom_ignore_filenames.is_empty() {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) = create_gitignore(
|
||||
&dir,
|
||||
&dir,
|
||||
&self.0.custom_ignore_filenames,
|
||||
self.0.opts.ignore_case_insensitive,
|
||||
);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
let ig_matcher =
|
||||
if !self.0.opts.ignore {
|
||||
let ig_matcher = if !self.0.opts.ignore {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) = create_gitignore(&dir, &[".ignore"]);
|
||||
let (m, err) = create_gitignore(
|
||||
&dir,
|
||||
&dir,
|
||||
&[".ignore"],
|
||||
self.0.opts.ignore_case_insensitive,
|
||||
);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
let gi_matcher =
|
||||
if !self.0.opts.git_ignore {
|
||||
let gi_matcher = if !self.0.opts.git_ignore {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) = create_gitignore(&dir, &[".gitignore"]);
|
||||
let (m, err) = create_gitignore(
|
||||
&dir,
|
||||
&dir,
|
||||
&[".gitignore"],
|
||||
self.0.opts.ignore_case_insensitive,
|
||||
);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
};
|
||||
let gi_exclude_matcher =
|
||||
if !self.0.opts.git_exclude {
|
||||
let gi_exclude_matcher = if !self.0.opts.git_exclude {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (m, err) = create_gitignore(&dir, &[".git/info/exclude"]);
|
||||
match resolve_git_commondir(dir, git_type) {
|
||||
Ok(git_dir) => {
|
||||
let (m, err) = create_gitignore(
|
||||
&dir,
|
||||
&git_dir,
|
||||
&["info/exclude"],
|
||||
self.0.opts.ignore_case_insensitive,
|
||||
);
|
||||
errs.maybe_push(err);
|
||||
m
|
||||
}
|
||||
Err(err) => {
|
||||
errs.maybe_push(err);
|
||||
Gitignore::empty()
|
||||
}
|
||||
}
|
||||
};
|
||||
let ig = IgnoreInner {
|
||||
compiled: self.0.compiled.clone(),
|
||||
@@ -261,17 +313,44 @@ impl Ignore {
|
||||
git_global_matcher: self.0.git_global_matcher.clone(),
|
||||
git_ignore_matcher: gi_matcher,
|
||||
git_exclude_matcher: gi_exclude_matcher,
|
||||
has_git: dir.join(".git").is_dir(),
|
||||
has_git,
|
||||
opts: self.0.opts,
|
||||
};
|
||||
(ig, errs.into_error_option())
|
||||
}
|
||||
|
||||
/// Returns true if at least one type of ignore rule should be matched.
|
||||
fn has_any_ignore_rules(&self) -> bool {
|
||||
let opts = self.0.opts;
|
||||
let has_custom_ignore_files =
|
||||
!self.0.custom_ignore_filenames.is_empty();
|
||||
let has_explicit_ignores = !self.0.explicit_ignores.is_empty();
|
||||
|
||||
opts.ignore
|
||||
|| opts.git_global
|
||||
|| opts.git_ignore
|
||||
|| opts.git_exclude
|
||||
|| has_custom_ignore_files
|
||||
|| has_explicit_ignores
|
||||
}
|
||||
|
||||
/// Like `matched`, but works with a directory entry instead.
|
||||
pub fn matched_dir_entry<'a>(
|
||||
&'a self,
|
||||
dent: &DirEntry,
|
||||
) -> Match<IgnoreMatch<'a>> {
|
||||
let m = self.matched(dent.path(), dent.is_dir());
|
||||
if m.is_none() && self.0.opts.hidden && is_hidden(dent) {
|
||||
return Match::Ignore(IgnoreMatch::hidden());
|
||||
}
|
||||
m
|
||||
}
|
||||
|
||||
/// Returns a match indicating whether the given file path should be
|
||||
/// ignored or not.
|
||||
///
|
||||
/// The match contains information about its origin.
|
||||
pub fn matched<'a, P: AsRef<Path>>(
|
||||
fn matched<'a, P: AsRef<Path>>(
|
||||
&'a self,
|
||||
path: P,
|
||||
is_dir: bool,
|
||||
@@ -287,15 +366,17 @@ impl Ignore {
|
||||
// return that result immediately. Overrides have the highest
|
||||
// precedence.
|
||||
if !self.0.overrides.is_empty() {
|
||||
let mat =
|
||||
self.0.overrides.matched(path, is_dir)
|
||||
let mat = self
|
||||
.0
|
||||
.overrides
|
||||
.matched(path, is_dir)
|
||||
.map(IgnoreMatch::overrides);
|
||||
if !mat.is_none() {
|
||||
return mat;
|
||||
}
|
||||
}
|
||||
let mut whitelisted = Match::None;
|
||||
if self.0.opts.has_any_ignore_options() {
|
||||
if self.has_any_ignore_rules() {
|
||||
let mat = self.matched_ignore(path, is_dir);
|
||||
if mat.is_ignore() {
|
||||
return mat;
|
||||
@@ -312,9 +393,6 @@ impl Ignore {
|
||||
whitelisted = mat;
|
||||
}
|
||||
}
|
||||
if whitelisted.is_none() && self.0.opts.hidden && is_hidden(path) {
|
||||
return Match::Ignore(IgnoreMatch::hidden());
|
||||
}
|
||||
whitelisted
|
||||
}
|
||||
|
||||
@@ -325,72 +403,102 @@ impl Ignore {
|
||||
path: &Path,
|
||||
is_dir: bool,
|
||||
) -> Match<IgnoreMatch<'a>> {
|
||||
let (mut m_custom_ignore, mut m_ignore, mut m_gi, mut m_gi_exclude, mut m_explicit) =
|
||||
(Match::None, Match::None, Match::None, Match::None, Match::None);
|
||||
let (
|
||||
mut m_custom_ignore,
|
||||
mut m_ignore,
|
||||
mut m_gi,
|
||||
mut m_gi_exclude,
|
||||
mut m_explicit,
|
||||
) = (Match::None, Match::None, Match::None, Match::None, Match::None);
|
||||
let any_git =
|
||||
!self.0.opts.require_git || self.parents().any(|ig| ig.0.has_git);
|
||||
let mut saw_git = false;
|
||||
for ig in self.parents().take_while(|ig| !ig.0.is_absolute_parent) {
|
||||
if m_custom_ignore.is_none() {
|
||||
m_custom_ignore =
|
||||
ig.0.custom_ignore_matcher.matched(path, is_dir)
|
||||
ig.0.custom_ignore_matcher
|
||||
.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if m_ignore.is_none() {
|
||||
m_ignore =
|
||||
ig.0.ignore_matcher.matched(path, is_dir)
|
||||
ig.0.ignore_matcher
|
||||
.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if !saw_git && m_gi.is_none() {
|
||||
if any_git && !saw_git && m_gi.is_none() {
|
||||
m_gi =
|
||||
ig.0.git_ignore_matcher.matched(path, is_dir)
|
||||
ig.0.git_ignore_matcher
|
||||
.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if !saw_git && m_gi_exclude.is_none() {
|
||||
if any_git && !saw_git && m_gi_exclude.is_none() {
|
||||
m_gi_exclude =
|
||||
ig.0.git_exclude_matcher.matched(path, is_dir)
|
||||
ig.0.git_exclude_matcher
|
||||
.matched(path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
saw_git = saw_git || ig.0.has_git;
|
||||
}
|
||||
if self.0.opts.parents {
|
||||
if let Some(abs_parent_path) = self.absolute_base() {
|
||||
let path = abs_parent_path.join(path);
|
||||
for ig in self.parents().skip_while(|ig|!ig.0.is_absolute_parent) {
|
||||
for ig in
|
||||
self.parents().skip_while(|ig| !ig.0.is_absolute_parent)
|
||||
{
|
||||
if m_custom_ignore.is_none() {
|
||||
m_custom_ignore =
|
||||
ig.0.custom_ignore_matcher.matched(&path, is_dir)
|
||||
ig.0.custom_ignore_matcher
|
||||
.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if m_ignore.is_none() {
|
||||
m_ignore =
|
||||
ig.0.ignore_matcher.matched(&path, is_dir)
|
||||
ig.0.ignore_matcher
|
||||
.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if !saw_git && m_gi.is_none() {
|
||||
if any_git && !saw_git && m_gi.is_none() {
|
||||
m_gi =
|
||||
ig.0.git_ignore_matcher.matched(&path, is_dir)
|
||||
ig.0.git_ignore_matcher
|
||||
.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
if !saw_git && m_gi_exclude.is_none() {
|
||||
if any_git && !saw_git && m_gi_exclude.is_none() {
|
||||
m_gi_exclude =
|
||||
ig.0.git_exclude_matcher.matched(&path, is_dir)
|
||||
ig.0.git_exclude_matcher
|
||||
.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
}
|
||||
saw_git = saw_git || ig.0.has_git;
|
||||
}
|
||||
}
|
||||
}
|
||||
for gi in self.0.explicit_ignores.iter().rev() {
|
||||
if !m_explicit.is_none() {
|
||||
break;
|
||||
}
|
||||
m_explicit = gi.matched(&path, is_dir).map(IgnoreMatch::gitignore);
|
||||
}
|
||||
let m_global = self.0.git_global_matcher.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore);
|
||||
let m_global = if any_git {
|
||||
self.0
|
||||
.git_global_matcher
|
||||
.matched(&path, is_dir)
|
||||
.map(IgnoreMatch::gitignore)
|
||||
} else {
|
||||
Match::None
|
||||
};
|
||||
|
||||
m_custom_ignore.or(m_ignore).or(m_gi).or(m_gi_exclude).or(m_global).or(m_explicit)
|
||||
m_custom_ignore
|
||||
.or(m_ignore)
|
||||
.or(m_gi)
|
||||
.or(m_gi_exclude)
|
||||
.or(m_global)
|
||||
.or(m_explicit)
|
||||
}
|
||||
|
||||
/// Returns an iterator over parent ignore matchers, including this one.
|
||||
pub fn parents(&self) -> Parents {
|
||||
pub fn parents(&self) -> Parents<'_> {
|
||||
Parents(Some(self))
|
||||
}
|
||||
|
||||
@@ -452,9 +560,12 @@ impl IgnoreBuilder {
|
||||
opts: IgnoreOptions {
|
||||
hidden: true,
|
||||
ignore: true,
|
||||
parents: true,
|
||||
git_global: true,
|
||||
git_ignore: true,
|
||||
git_exclude: true,
|
||||
ignore_case_insensitive: false,
|
||||
require_git: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -464,13 +575,16 @@ impl IgnoreBuilder {
|
||||
/// The matcher returned won't match anything until ignore rules from
|
||||
/// directories are added to it.
|
||||
pub fn build(&self) -> Ignore {
|
||||
let git_global_matcher =
|
||||
if !self.opts.git_global {
|
||||
let git_global_matcher = if !self.opts.git_global {
|
||||
Gitignore::empty()
|
||||
} else {
|
||||
let (gi, err) = Gitignore::global();
|
||||
let mut builder = GitignoreBuilder::new("");
|
||||
builder
|
||||
.case_insensitive(self.opts.ignore_case_insensitive)
|
||||
.unwrap();
|
||||
let (gi, err) = builder.build_global();
|
||||
if let Some(err) = err {
|
||||
debug!("{}", err);
|
||||
log::debug!("{}", err);
|
||||
}
|
||||
gi
|
||||
};
|
||||
@@ -484,7 +598,9 @@ impl IgnoreBuilder {
|
||||
is_absolute_parent: true,
|
||||
absolute_base: None,
|
||||
explicit_ignores: Arc::new(self.explicit_ignores.clone()),
|
||||
custom_ignore_filenames: Arc::new(self.custom_ignore_filenames.clone()),
|
||||
custom_ignore_filenames: Arc::new(
|
||||
self.custom_ignore_filenames.clone(),
|
||||
),
|
||||
custom_ignore_matcher: Gitignore::empty(),
|
||||
ignore_matcher: Gitignore::empty(),
|
||||
git_global_matcher: Arc::new(git_global_matcher),
|
||||
@@ -529,7 +645,7 @@ impl IgnoreBuilder {
|
||||
/// later names.
|
||||
pub fn add_custom_ignore_filename<S: AsRef<OsStr>>(
|
||||
&mut self,
|
||||
file_name: S
|
||||
file_name: S,
|
||||
) -> &mut IgnoreBuilder {
|
||||
self.custom_ignore_filenames.push(file_name.as_ref().to_os_string());
|
||||
self
|
||||
@@ -554,6 +670,17 @@ impl IgnoreBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Enables reading ignore files from parent directories.
|
||||
///
|
||||
/// If this is enabled, then .gitignore files in parent directories of each
|
||||
/// file path given are respected. Otherwise, they are ignored.
|
||||
///
|
||||
/// This is enabled by default.
|
||||
pub fn parents(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
self.opts.parents = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a global gitignore matcher.
|
||||
///
|
||||
/// Its precedence is lower than both normal `.gitignore` files and
|
||||
@@ -588,24 +715,64 @@ impl IgnoreBuilder {
|
||||
self.opts.git_exclude = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Whether a git repository is required to apply git-related ignore
|
||||
/// rules (global rules, .gitignore and local exclude rules).
|
||||
///
|
||||
/// When disabled, git-related ignore rules are applied even when searching
|
||||
/// outside a git repository.
|
||||
pub fn require_git(&mut self, yes: bool) -> &mut IgnoreBuilder {
|
||||
self.opts.require_git = yes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Process ignore files case insensitively
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn ignore_case_insensitive(
|
||||
&mut self,
|
||||
yes: bool,
|
||||
) -> &mut IgnoreBuilder {
|
||||
self.opts.ignore_case_insensitive = yes;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new gitignore matcher for the directory given.
|
||||
///
|
||||
/// Ignore globs are extracted from each of the file names in `dir` in the
|
||||
/// order given (earlier names have lower precedence than later names).
|
||||
/// The matcher is meant to match files below `dir`.
|
||||
/// Ignore globs are extracted from each of the file names relative to
|
||||
/// `dir_for_ignorefile` in the order given (earlier names have lower
|
||||
/// precedence than later names).
|
||||
///
|
||||
/// I/O errors are ignored.
|
||||
pub fn create_gitignore<T: AsRef<OsStr>>(
|
||||
dir: &Path,
|
||||
dir_for_ignorefile: &Path,
|
||||
names: &[T],
|
||||
case_insensitive: bool,
|
||||
) -> (Gitignore, Option<Error>) {
|
||||
let mut builder = GitignoreBuilder::new(dir);
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
builder.case_insensitive(case_insensitive).unwrap();
|
||||
for name in names {
|
||||
let gipath = dir.join(name.as_ref());
|
||||
let gipath = dir_for_ignorefile.join(name.as_ref());
|
||||
// This check is not necessary, but is added for performance. Namely,
|
||||
// a simple stat call checking for existence can often be just a bit
|
||||
// quicker than actually trying to open a file. Since the number of
|
||||
// directories without ignore files likely greatly exceeds the number
|
||||
// with ignore files, this check generally makes sense.
|
||||
//
|
||||
// However, until demonstrated otherwise, we speculatively do not do
|
||||
// this on Windows since Windows is notorious for having slow file
|
||||
// system operations. Namely, it's not clear whether this analysis
|
||||
// makes sense on Windows.
|
||||
//
|
||||
// For more details: https://github.com/BurntSushi/ripgrep/pull/1381
|
||||
if cfg!(windows) || gipath.exists() {
|
||||
errs.maybe_push_ignore_io(builder.add(gipath));
|
||||
}
|
||||
}
|
||||
let gi = match builder.build() {
|
||||
Ok(gi) => gi,
|
||||
Err(err) => {
|
||||
@@ -616,17 +783,70 @@ pub fn create_gitignore<T: AsRef<OsStr>>(
|
||||
(gi, errs.into_error_option())
|
||||
}
|
||||
|
||||
/// Find the GIT_COMMON_DIR for the given git worktree.
|
||||
///
|
||||
/// This is the directory that may contain a private ignore file
|
||||
/// "info/exclude". Unlike git, this function does *not* read environment
|
||||
/// variables GIT_DIR and GIT_COMMON_DIR, because it is not clear how to use
|
||||
/// them when multiple repositories are searched.
|
||||
///
|
||||
/// Some I/O errors are ignored.
|
||||
fn resolve_git_commondir(
|
||||
dir: &Path,
|
||||
git_type: Option<FileType>,
|
||||
) -> Result<PathBuf, Option<Error>> {
|
||||
let git_dir_path = || dir.join(".git");
|
||||
let git_dir = git_dir_path();
|
||||
if !git_type.map_or(false, |ft| ft.is_file()) {
|
||||
return Ok(git_dir);
|
||||
}
|
||||
let file = match File::open(git_dir) {
|
||||
Ok(file) => io::BufReader::new(file),
|
||||
Err(err) => {
|
||||
return Err(Some(Error::Io(err).with_path(git_dir_path())));
|
||||
}
|
||||
};
|
||||
let dot_git_line = match file.lines().next() {
|
||||
Some(Ok(line)) => line,
|
||||
Some(Err(err)) => {
|
||||
return Err(Some(Error::Io(err).with_path(git_dir_path())));
|
||||
}
|
||||
None => return Err(None),
|
||||
};
|
||||
if !dot_git_line.starts_with("gitdir: ") {
|
||||
return Err(None);
|
||||
}
|
||||
let real_git_dir = PathBuf::from(&dot_git_line["gitdir: ".len()..]);
|
||||
let git_commondir_file = || real_git_dir.join("commondir");
|
||||
let file = match File::open(git_commondir_file()) {
|
||||
Ok(file) => io::BufReader::new(file),
|
||||
Err(_) => return Err(None),
|
||||
};
|
||||
let commondir_line = match file.lines().next() {
|
||||
Some(Ok(line)) => line,
|
||||
Some(Err(err)) => {
|
||||
return Err(Some(Error::Io(err).with_path(git_commondir_file())));
|
||||
}
|
||||
None => return Err(None),
|
||||
};
|
||||
let commondir_abs = if commondir_line.starts_with(".") {
|
||||
real_git_dir.join(commondir_line) // relative commondir
|
||||
} else {
|
||||
PathBuf::from(commondir_line)
|
||||
};
|
||||
Ok(commondir_abs)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::fs::{self, File};
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
use tempdir::TempDir;
|
||||
|
||||
use dir::IgnoreBuilder;
|
||||
use gitignore::Gitignore;
|
||||
use Error;
|
||||
use crate::dir::IgnoreBuilder;
|
||||
use crate::gitignore::Gitignore;
|
||||
use crate::tests::TempDir;
|
||||
use crate::Error;
|
||||
|
||||
fn wfile<P: AsRef<Path>>(path: P, contents: &str) {
|
||||
let mut file = File::create(path).unwrap();
|
||||
@@ -644,15 +864,19 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn tmpdir() -> TempDir {
|
||||
TempDir::new().unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn explicit_ignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join("not-an-ignore"), "foo\n!bar");
|
||||
|
||||
let (gi, err) = Gitignore::new(td.path().join("not-an-ignore"));
|
||||
assert!(err.is_none());
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_ignore(gi).build().add_child(td.path());
|
||||
let (ig, err) =
|
||||
IgnoreBuilder::new().add_ignore(gi).build().add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_ignore());
|
||||
assert!(ig.matched("bar", false).is_whitelist());
|
||||
@@ -661,7 +885,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn git_exclude() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
mkdirp(td.path().join(".git/info"));
|
||||
wfile(td.path().join(".git/info/exclude"), "foo\n!bar");
|
||||
|
||||
@@ -674,7 +898,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn gitignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
mkdirp(td.path().join(".git"));
|
||||
wfile(td.path().join(".gitignore"), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
@@ -684,9 +909,36 @@ mod tests {
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gitignore_no_git() {
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_none());
|
||||
assert!(ig.matched("bar", false).is_none());
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gitignore_allowed_no_git() {
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.require_git(false)
|
||||
.build()
|
||||
.add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_ignore());
|
||||
assert!(ig.matched("bar", false).is_whitelist());
|
||||
assert!(ig.matched("baz", false).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".ignore"), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
@@ -698,13 +950,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn custom_ignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
let custom_ignore = ".customignore";
|
||||
wfile(td.path().join(custom_ignore), "foo\n!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_custom_ignore_filename(custom_ignore)
|
||||
.build().add_child(td.path());
|
||||
.build()
|
||||
.add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_ignore());
|
||||
assert!(ig.matched("bar", false).is_whitelist());
|
||||
@@ -714,14 +967,15 @@ mod tests {
|
||||
// Tests that a custom ignore file will override an .ignore.
|
||||
#[test]
|
||||
fn custom_ignore_over_ignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
let custom_ignore = ".customignore";
|
||||
wfile(td.path().join(".ignore"), "foo");
|
||||
wfile(td.path().join(custom_ignore), "!foo");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_custom_ignore_filename(custom_ignore)
|
||||
.build().add_child(td.path());
|
||||
.build()
|
||||
.add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_whitelist());
|
||||
}
|
||||
@@ -729,7 +983,7 @@ mod tests {
|
||||
// Tests that earlier custom ignore files have lower precedence than later.
|
||||
#[test]
|
||||
fn custom_ignore_precedence() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
let custom_ignore1 = ".customignore1";
|
||||
let custom_ignore2 = ".customignore2";
|
||||
wfile(td.path().join(custom_ignore1), "foo");
|
||||
@@ -738,7 +992,8 @@ mod tests {
|
||||
let (ig, err) = IgnoreBuilder::new()
|
||||
.add_custom_ignore_filename(custom_ignore1)
|
||||
.add_custom_ignore_filename(custom_ignore2)
|
||||
.build().add_child(td.path());
|
||||
.build()
|
||||
.add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
assert!(ig.matched("foo", false).is_whitelist());
|
||||
}
|
||||
@@ -746,7 +1001,7 @@ mod tests {
|
||||
// Tests that an .ignore will override a .gitignore.
|
||||
#[test]
|
||||
fn ignore_over_gitignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "foo");
|
||||
wfile(td.path().join(".ignore"), "!foo");
|
||||
|
||||
@@ -758,7 +1013,7 @@ mod tests {
|
||||
// Tests that exclude has lower precedent than both .ignore and .gitignore.
|
||||
#[test]
|
||||
fn exclude_lowest() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "!foo");
|
||||
wfile(td.path().join(".ignore"), "!bar");
|
||||
mkdirp(td.path().join(".git/info"));
|
||||
@@ -773,8 +1028,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn errored() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
wfile(td.path().join(".gitignore"), "f**oo");
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "{foo");
|
||||
|
||||
let (_, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert!(err.is_some());
|
||||
@@ -782,9 +1037,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn errored_both() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
wfile(td.path().join(".gitignore"), "f**oo");
|
||||
wfile(td.path().join(".ignore"), "fo**o");
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "{foo");
|
||||
wfile(td.path().join(".ignore"), "{bar");
|
||||
|
||||
let (_, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert_eq!(2, partial(err.expect("an error")).len());
|
||||
@@ -792,8 +1047,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn errored_partial() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
wfile(td.path().join(".gitignore"), "f**oo\nbar");
|
||||
let td = tmpdir();
|
||||
mkdirp(td.path().join(".git"));
|
||||
wfile(td.path().join(".gitignore"), "{foo\nbar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert!(err.is_some());
|
||||
@@ -802,8 +1058,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn errored_partial_and_ignore() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
wfile(td.path().join(".gitignore"), "f**oo\nbar");
|
||||
let td = tmpdir();
|
||||
wfile(td.path().join(".gitignore"), "{foo\nbar");
|
||||
wfile(td.path().join(".ignore"), "!bar");
|
||||
|
||||
let (ig, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
@@ -813,7 +1069,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn not_present_empty() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
|
||||
let (_, err) = IgnoreBuilder::new().build().add_child(td.path());
|
||||
assert!(err.is_none());
|
||||
@@ -823,7 +1079,7 @@ mod tests {
|
||||
fn stops_at_git_dir() {
|
||||
// This tests that .gitignore files beyond a .git barrier aren't
|
||||
// matched, but .ignore files are.
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
mkdirp(td.path().join(".git"));
|
||||
mkdirp(td.path().join("foo/.git"));
|
||||
wfile(td.path().join(".gitignore"), "foo");
|
||||
@@ -844,7 +1100,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn absolute_parent() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
mkdirp(td.path().join(".git"));
|
||||
mkdirp(td.path().join("foo"));
|
||||
wfile(td.path().join(".gitignore"), "bar");
|
||||
@@ -867,7 +1123,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn absolute_parent_anchored() {
|
||||
let td = TempDir::new("ignore-test-").unwrap();
|
||||
let td = tmpdir();
|
||||
mkdirp(td.path().join(".git"));
|
||||
mkdirp(td.path().join("src/llvm"));
|
||||
wfile(td.path().join(".gitignore"), "/llvm/\nfoo");
|
||||
@@ -884,4 +1140,49 @@ mod tests {
|
||||
assert!(ig2.matched("foo", false).is_ignore());
|
||||
assert!(ig2.matched("src/foo", false).is_ignore());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn git_info_exclude_in_linked_worktree() {
|
||||
let td = tmpdir();
|
||||
let git_dir = td.path().join(".git");
|
||||
mkdirp(git_dir.join("info"));
|
||||
wfile(git_dir.join("info/exclude"), "ignore_me");
|
||||
mkdirp(git_dir.join("worktrees/linked-worktree"));
|
||||
let commondir_path =
|
||||
|| git_dir.join("worktrees/linked-worktree/commondir");
|
||||
mkdirp(td.path().join("linked-worktree"));
|
||||
let worktree_git_dir_abs = format!(
|
||||
"gitdir: {}",
|
||||
git_dir.join("worktrees/linked-worktree").to_str().unwrap(),
|
||||
);
|
||||
wfile(td.path().join("linked-worktree/.git"), &worktree_git_dir_abs);
|
||||
|
||||
// relative commondir
|
||||
wfile(commondir_path(), "../..");
|
||||
let ib = IgnoreBuilder::new().build();
|
||||
let (ignore, err) = ib.add_child(td.path().join("linked-worktree"));
|
||||
assert!(err.is_none());
|
||||
assert!(ignore.matched("ignore_me", false).is_ignore());
|
||||
|
||||
// absolute commondir
|
||||
wfile(commondir_path(), git_dir.to_str().unwrap());
|
||||
let (ignore, err) = ib.add_child(td.path().join("linked-worktree"));
|
||||
assert!(err.is_none());
|
||||
assert!(ignore.matched("ignore_me", false).is_ignore());
|
||||
|
||||
// missing commondir file
|
||||
assert!(fs::remove_file(commondir_path()).is_ok());
|
||||
let (_, err) = ib.add_child(td.path().join("linked-worktree"));
|
||||
// We squash the error in this case, because it occurs in repositories
|
||||
// that are not linked worktrees but have submodules.
|
||||
assert!(err.is_none());
|
||||
|
||||
wfile(td.path().join("linked-worktree/.git"), "garbage");
|
||||
let (_, err) = ib.add_child(td.path().join("linked-worktree"));
|
||||
assert!(err.is_none());
|
||||
|
||||
wfile(td.path().join("linked-worktree/.git"), "gitdir: garbage");
|
||||
let (_, err) = ib.add_child(td.path().join("linked-worktree"));
|
||||
assert!(err.is_none());
|
||||
}
|
||||
}
|
||||
@@ -19,8 +19,8 @@ use globset::{Candidate, GlobBuilder, GlobSet, GlobSetBuilder};
|
||||
use regex::bytes::Regex;
|
||||
use thread_local::ThreadLocal;
|
||||
|
||||
use pathutil::{is_file_name, strip_prefix};
|
||||
use {Error, Match, PartialErrorBuilder};
|
||||
use crate::pathutil::{is_file_name, strip_prefix};
|
||||
use crate::{Error, Match, PartialErrorBuilder};
|
||||
|
||||
/// Glob represents a single glob in a gitignore file.
|
||||
///
|
||||
@@ -66,6 +66,11 @@ impl Glob {
|
||||
pub fn is_only_dir(&self) -> bool {
|
||||
self.is_only_dir
|
||||
}
|
||||
|
||||
/// Returns true if and only if this glob has a `**/` prefix.
|
||||
fn has_doublestar_prefix(&self) -> bool {
|
||||
self.actual.starts_with("**/") || self.actual == "**"
|
||||
}
|
||||
}
|
||||
|
||||
/// Gitignore is a matcher for the globs in one or more gitignore files
|
||||
@@ -77,7 +82,7 @@ pub struct Gitignore {
|
||||
globs: Vec<Glob>,
|
||||
num_ignores: u64,
|
||||
num_whitelists: u64,
|
||||
matches: Arc<ThreadLocal<RefCell<Vec<usize>>>>,
|
||||
matches: Option<Arc<ThreadLocal<RefCell<Vec<usize>>>>>,
|
||||
}
|
||||
|
||||
impl Gitignore {
|
||||
@@ -121,23 +126,21 @@ impl Gitignore {
|
||||
/// `$XDG_CONFIG_HOME/git/ignore` is read. If `$XDG_CONFIG_HOME` is not
|
||||
/// set or is empty, then `$HOME/.config/git/ignore` is used instead.
|
||||
pub fn global() -> (Gitignore, Option<Error>) {
|
||||
match gitconfig_excludes_path() {
|
||||
None => (Gitignore::empty(), None),
|
||||
Some(path) => {
|
||||
if !path.is_file() {
|
||||
(Gitignore::empty(), None)
|
||||
} else {
|
||||
Gitignore::new(path)
|
||||
}
|
||||
}
|
||||
}
|
||||
GitignoreBuilder::new("").build_global()
|
||||
}
|
||||
|
||||
/// Creates a new empty gitignore matcher that never matches anything.
|
||||
///
|
||||
/// Its path is empty.
|
||||
pub fn empty() -> Gitignore {
|
||||
GitignoreBuilder::new("").build().unwrap()
|
||||
Gitignore {
|
||||
set: GlobSet::empty(),
|
||||
root: PathBuf::from(""),
|
||||
globs: vec![],
|
||||
num_ignores: 0,
|
||||
num_whitelists: 0,
|
||||
matches: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the directory containing this gitignore matcher.
|
||||
@@ -207,6 +210,11 @@ impl Gitignore {
|
||||
/// determined by a common suffix of the directory containing this
|
||||
/// gitignore) is stripped. If there is no common suffix/prefix overlap,
|
||||
/// then `path` is assumed to be relative to this matcher.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method panics if the given file path is not under the root path
|
||||
/// of this matcher.
|
||||
pub fn matched_path_or_any_parents<P: AsRef<Path>>(
|
||||
&self,
|
||||
path: P,
|
||||
@@ -216,10 +224,8 @@ impl Gitignore {
|
||||
return Match::None;
|
||||
}
|
||||
let mut path = self.strip(path.as_ref());
|
||||
debug_assert!(
|
||||
!path.has_root(),
|
||||
"path is expect to be under the root"
|
||||
);
|
||||
assert!(!path.has_root(), "path is expected to be under the root");
|
||||
|
||||
match self.matched_stripped(path, is_dir) {
|
||||
Match::None => (), // walk up
|
||||
a_match => return a_match,
|
||||
@@ -243,7 +249,7 @@ impl Gitignore {
|
||||
return Match::None;
|
||||
}
|
||||
let path = path.as_ref();
|
||||
let _matches = self.matches.get_default();
|
||||
let _matches = self.matches.as_ref().unwrap().get_or_default();
|
||||
let mut matches = _matches.borrow_mut();
|
||||
let candidate = Candidate::new(path);
|
||||
self.set.matches_candidate_into(&candidate, &mut *matches);
|
||||
@@ -278,7 +284,10 @@ impl Gitignore {
|
||||
// BUT, a file name might not have any directory components to it,
|
||||
// in which case, we don't want to accidentally strip any part of the
|
||||
// file name.
|
||||
if !is_file_name(path) {
|
||||
//
|
||||
// As an additional special case, if the root is just `.`, then we
|
||||
// shouldn't try to strip anything, e.g., when path begins with a `.`.
|
||||
if self.root != Path::new(".") && !is_file_name(path) {
|
||||
if let Some(p) = strip_prefix(&self.root, path) {
|
||||
path = p;
|
||||
// If we're left with a leading slash, get rid of it.
|
||||
@@ -292,6 +301,7 @@ impl Gitignore {
|
||||
}
|
||||
|
||||
/// Builds a matcher for a single set of globs from a .gitignore file.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GitignoreBuilder {
|
||||
builder: GlobSetBuilder,
|
||||
root: PathBuf,
|
||||
@@ -322,23 +332,50 @@ impl GitignoreBuilder {
|
||||
pub fn build(&self) -> Result<Gitignore, Error> {
|
||||
let nignore = self.globs.iter().filter(|g| !g.is_whitelist()).count();
|
||||
let nwhite = self.globs.iter().filter(|g| g.is_whitelist()).count();
|
||||
let set =
|
||||
self.builder.build().map_err(|err| {
|
||||
Error::Glob {
|
||||
glob: None,
|
||||
err: err.to_string(),
|
||||
}
|
||||
})?;
|
||||
let set = self
|
||||
.builder
|
||||
.build()
|
||||
.map_err(|err| Error::Glob { glob: None, err: err.to_string() })?;
|
||||
Ok(Gitignore {
|
||||
set: set,
|
||||
root: self.root.clone(),
|
||||
globs: self.globs.clone(),
|
||||
num_ignores: nignore as u64,
|
||||
num_whitelists: nwhite as u64,
|
||||
matches: Arc::new(ThreadLocal::default()),
|
||||
matches: Some(Arc::new(ThreadLocal::default())),
|
||||
})
|
||||
}
|
||||
|
||||
/// Build a global gitignore matcher using the configuration in this
|
||||
/// builder.
|
||||
///
|
||||
/// This consumes ownership of the builder unlike `build` because it
|
||||
/// must mutate the builder to add the global gitignore globs.
|
||||
///
|
||||
/// Note that this ignores the path given to this builder's constructor
|
||||
/// and instead derives the path automatically from git's global
|
||||
/// configuration.
|
||||
pub fn build_global(mut self) -> (Gitignore, Option<Error>) {
|
||||
match gitconfig_excludes_path() {
|
||||
None => (Gitignore::empty(), None),
|
||||
Some(path) => {
|
||||
if !path.is_file() {
|
||||
(Gitignore::empty(), None)
|
||||
} else {
|
||||
let mut errs = PartialErrorBuilder::default();
|
||||
errs.maybe_push_ignore_io(self.add(path));
|
||||
match self.build() {
|
||||
Ok(gi) => (gi, errs.into_error_option()),
|
||||
Err(err) => {
|
||||
errs.push(err);
|
||||
(Gitignore::empty(), errs.into_error_option())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Add each glob from the file path given.
|
||||
///
|
||||
/// The file given should be formatted as a `gitignore` file.
|
||||
@@ -399,6 +436,8 @@ impl GitignoreBuilder {
|
||||
from: Option<PathBuf>,
|
||||
mut line: &str,
|
||||
) -> Result<&mut GitignoreBuilder, Error> {
|
||||
#![allow(deprecated)]
|
||||
|
||||
if line.starts_with("#") {
|
||||
return Ok(self);
|
||||
}
|
||||
@@ -415,7 +454,6 @@ impl GitignoreBuilder {
|
||||
is_whitelist: false,
|
||||
is_only_dir: false,
|
||||
};
|
||||
let mut literal_separator = false;
|
||||
let mut is_absolute = false;
|
||||
if line.starts_with("\\!") || line.starts_with("\\#") {
|
||||
line = &line[1..];
|
||||
@@ -430,31 +468,28 @@ impl GitignoreBuilder {
|
||||
// then the glob can only match the beginning of a path
|
||||
// (relative to the location of gitignore). We achieve this by
|
||||
// simply banning wildcards from matching /.
|
||||
literal_separator = true;
|
||||
line = &line[1..];
|
||||
is_absolute = true;
|
||||
}
|
||||
}
|
||||
// If it ends with a slash, then this should only match directories,
|
||||
// but the slash should otherwise not be used while globbing.
|
||||
if let Some((i, c)) = line.char_indices().rev().nth(0) {
|
||||
if c == '/' {
|
||||
if line.as_bytes().last() == Some(&b'/') {
|
||||
glob.is_only_dir = true;
|
||||
line = &line[..i];
|
||||
line = &line[..line.len() - 1];
|
||||
// If the slash was escaped, then remove the escape.
|
||||
// See: https://github.com/BurntSushi/ripgrep/issues/2236
|
||||
if line.as_bytes().last() == Some(&b'\\') {
|
||||
line = &line[..line.len() - 1];
|
||||
}
|
||||
}
|
||||
// If there is a literal slash, then we note that so that globbing
|
||||
// doesn't let wildcards match slashes.
|
||||
glob.actual = line.to_string();
|
||||
if is_absolute || line.chars().any(|c| c == '/') {
|
||||
literal_separator = true;
|
||||
}
|
||||
// If there was a slash, then this is a glob that must match the entire
|
||||
// path name. Otherwise, we should let it match anywhere, so use a **/
|
||||
// prefix.
|
||||
if !literal_separator {
|
||||
// If there is a literal slash, then this is a glob that must match the
|
||||
// entire path name. Otherwise, we should let it match anywhere, so use
|
||||
// a **/ prefix.
|
||||
if !is_absolute && !line.chars().any(|c| c == '/') {
|
||||
// ... but only if we don't already have a **/ prefix.
|
||||
if !(glob.actual.starts_with("**/") || (glob.actual == "**" && glob.is_only_dir)) {
|
||||
if !glob.has_doublestar_prefix() {
|
||||
glob.actual = format!("**/{}", glob.actual);
|
||||
}
|
||||
}
|
||||
@@ -464,16 +499,14 @@ impl GitignoreBuilder {
|
||||
if glob.actual.ends_with("/**") {
|
||||
glob.actual = format!("{}/*", glob.actual);
|
||||
}
|
||||
let parsed =
|
||||
GlobBuilder::new(&glob.actual)
|
||||
.literal_separator(literal_separator)
|
||||
let parsed = GlobBuilder::new(&glob.actual)
|
||||
.literal_separator(true)
|
||||
.case_insensitive(self.case_insensitive)
|
||||
.backslash_escape(true)
|
||||
.build()
|
||||
.map_err(|err| {
|
||||
Error::Glob {
|
||||
.map_err(|err| Error::Glob {
|
||||
glob: Some(glob.original.clone()),
|
||||
err: err.kind().to_string(),
|
||||
}
|
||||
})?;
|
||||
self.builder.add(parsed);
|
||||
self.globs.push(glob);
|
||||
@@ -482,10 +515,16 @@ impl GitignoreBuilder {
|
||||
|
||||
/// Toggle whether the globs should be matched case insensitively or not.
|
||||
///
|
||||
/// When this option is changed, only globs added after the change will be
|
||||
/// affected.
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn case_insensitive(
|
||||
&mut self, yes: bool
|
||||
&mut self,
|
||||
yes: bool,
|
||||
) -> Result<&mut GitignoreBuilder, Error> {
|
||||
// TODO: This should not return a `Result`. Fix this in the next semver
|
||||
// release.
|
||||
self.case_insensitive = yes;
|
||||
Ok(self)
|
||||
}
|
||||
@@ -495,16 +534,27 @@ impl GitignoreBuilder {
|
||||
///
|
||||
/// Note that the file path returned may not exist.
|
||||
fn gitconfig_excludes_path() -> Option<PathBuf> {
|
||||
gitconfig_contents()
|
||||
.and_then(|data| parse_excludes_file(&data))
|
||||
.or_else(excludes_file_default)
|
||||
// git supports $HOME/.gitconfig and $XDG_CONFIG_HOME/git/config. Notably,
|
||||
// both can be active at the same time, where $HOME/.gitconfig takes
|
||||
// precedent. So if $HOME/.gitconfig defines a `core.excludesFile`, then
|
||||
// we're done.
|
||||
match gitconfig_home_contents().and_then(|x| parse_excludes_file(&x)) {
|
||||
Some(path) => return Some(path),
|
||||
None => {}
|
||||
}
|
||||
match gitconfig_xdg_contents().and_then(|x| parse_excludes_file(&x)) {
|
||||
Some(path) => return Some(path),
|
||||
None => {}
|
||||
}
|
||||
excludes_file_default()
|
||||
}
|
||||
|
||||
/// Returns the file contents of git's global config file, if one exists.
|
||||
fn gitconfig_contents() -> Option<Vec<u8>> {
|
||||
let home = match env::var_os("HOME") {
|
||||
/// Returns the file contents of git's global config file, if one exists, in
|
||||
/// the user's home directory.
|
||||
fn gitconfig_home_contents() -> Option<Vec<u8>> {
|
||||
let home = match home_dir() {
|
||||
None => return None,
|
||||
Some(home) => PathBuf::from(home),
|
||||
Some(home) => home,
|
||||
};
|
||||
let mut file = match File::open(home.join(".gitconfig")) {
|
||||
Err(_) => return None,
|
||||
@@ -514,13 +564,28 @@ fn gitconfig_contents() -> Option<Vec<u8>> {
|
||||
file.read_to_end(&mut contents).ok().map(|_| contents)
|
||||
}
|
||||
|
||||
/// Returns the file contents of git's global config file, if one exists, in
|
||||
/// the user's XDG_CONFIG_HOME directory.
|
||||
fn gitconfig_xdg_contents() -> Option<Vec<u8>> {
|
||||
let path = env::var_os("XDG_CONFIG_HOME")
|
||||
.and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) })
|
||||
.or_else(|| home_dir().map(|p| p.join(".config")))
|
||||
.map(|x| x.join("git/config"));
|
||||
let mut file = match path.and_then(|p| File::open(p).ok()) {
|
||||
None => return None,
|
||||
Some(file) => io::BufReader::new(file),
|
||||
};
|
||||
let mut contents = vec![];
|
||||
file.read_to_end(&mut contents).ok().map(|_| contents)
|
||||
}
|
||||
|
||||
/// Returns the default file path for a global .gitignore file.
|
||||
///
|
||||
/// Specifically, this respects XDG_CONFIG_HOME.
|
||||
fn excludes_file_default() -> Option<PathBuf> {
|
||||
env::var_os("XDG_CONFIG_HOME")
|
||||
.and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) })
|
||||
.or_else(|| env::home_dir().map(|p| p.join(".config")))
|
||||
.or_else(|| home_dir().map(|p| p.join(".config")))
|
||||
.map(|x| x.join("git/ignore"))
|
||||
}
|
||||
|
||||
@@ -530,9 +595,9 @@ fn parse_excludes_file(data: &[u8]) -> Option<PathBuf> {
|
||||
// N.B. This is the lazy approach, and isn't technically correct, but
|
||||
// probably works in more circumstances. I guess we would ideally have
|
||||
// a full INI parser. Yuck.
|
||||
lazy_static! {
|
||||
static ref RE: Regex = Regex::new(
|
||||
r"(?ium)^\s*excludesfile\s*=\s*(.+)\s*$").unwrap();
|
||||
lazy_static::lazy_static! {
|
||||
static ref RE: Regex =
|
||||
Regex::new(r"(?im)^\s*excludesfile\s*=\s*(.+)\s*$").unwrap();
|
||||
};
|
||||
let caps = match RE.captures(data) {
|
||||
None => return None,
|
||||
@@ -543,17 +608,26 @@ fn parse_excludes_file(data: &[u8]) -> Option<PathBuf> {
|
||||
|
||||
/// Expands ~ in file paths to the value of $HOME.
|
||||
fn expand_tilde(path: &str) -> String {
|
||||
let home = match env::var("HOME") {
|
||||
Err(_) => return path.to_string(),
|
||||
Ok(home) => home,
|
||||
let home = match home_dir() {
|
||||
None => return path.to_string(),
|
||||
Some(home) => home.to_string_lossy().into_owned(),
|
||||
};
|
||||
path.replace("~", &home)
|
||||
}
|
||||
|
||||
/// Returns the location of the user's home directory.
|
||||
fn home_dir() -> Option<PathBuf> {
|
||||
// We're fine with using env::home_dir for now. Its bugs are, IMO, pretty
|
||||
// minor corner cases. We should still probably eventually migrate to
|
||||
// the `dirs` crate to get a proper implementation.
|
||||
#![allow(deprecated)]
|
||||
env::home_dir()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::Path;
|
||||
use super::{Gitignore, GitignoreBuilder};
|
||||
use std::path::Path;
|
||||
|
||||
fn gi_from_str<P: AsRef<Path>>(root: P, s: &str) -> Gitignore {
|
||||
let mut builder = GitignoreBuilder::new(root);
|
||||
@@ -620,6 +694,19 @@ mod tests {
|
||||
ignored!(ig29, ROOT, "node_modules/ ", "node_modules", true);
|
||||
ignored!(ig30, ROOT, "**/", "foo/bar", true);
|
||||
ignored!(ig31, ROOT, "path1/*", "path1/foo");
|
||||
ignored!(ig32, ROOT, ".a/b", ".a/b");
|
||||
ignored!(ig33, "./", ".a/b", ".a/b");
|
||||
ignored!(ig34, ".", ".a/b", ".a/b");
|
||||
ignored!(ig35, "./.", ".a/b", ".a/b");
|
||||
ignored!(ig36, "././", ".a/b", ".a/b");
|
||||
ignored!(ig37, "././.", ".a/b", ".a/b");
|
||||
ignored!(ig38, ROOT, "\\[", "[");
|
||||
ignored!(ig39, ROOT, "\\?", "?");
|
||||
ignored!(ig40, ROOT, "\\*", "*");
|
||||
ignored!(ig41, ROOT, "\\a", "a");
|
||||
ignored!(ig42, ROOT, "s*.rs", "sfoo.rs");
|
||||
ignored!(ig43, ROOT, "**", "foo.rs");
|
||||
ignored!(ig44, ROOT, "**/**/*", "a/foo.rs");
|
||||
|
||||
not_ignored!(ignot1, ROOT, "amonths", "months");
|
||||
not_ignored!(ignot2, ROOT, "monthsa", "months");
|
||||
@@ -635,12 +722,16 @@ mod tests {
|
||||
not_ignored!(ignot12, ROOT, "\n\n\n", "foo");
|
||||
not_ignored!(ignot13, ROOT, "foo/**", "foo", true);
|
||||
not_ignored!(
|
||||
ignot14, "./third_party/protobuf", "m4/ltoptions.m4",
|
||||
"./third_party/protobuf/csharp/src/packages/repositories.config");
|
||||
ignot14,
|
||||
"./third_party/protobuf",
|
||||
"m4/ltoptions.m4",
|
||||
"./third_party/protobuf/csharp/src/packages/repositories.config"
|
||||
);
|
||||
not_ignored!(ignot15, ROOT, "!/bar", "foo/bar");
|
||||
not_ignored!(ignot16, ROOT, "*\n!**/", "foo", true);
|
||||
not_ignored!(ignot17, ROOT, "src/*.rs", "src/grep/src/main.rs");
|
||||
not_ignored!(ignot18, ROOT, "path1/*", "path2/path1/foo");
|
||||
not_ignored!(ignot19, ROOT, "s*.rs", "src/foo.rs");
|
||||
|
||||
fn bytes(s: &str) -> Vec<u8> {
|
||||
s.to_string().into_bytes()
|
||||
@@ -679,9 +770,12 @@ mod tests {
|
||||
#[test]
|
||||
fn case_insensitive() {
|
||||
let gi = GitignoreBuilder::new(ROOT)
|
||||
.case_insensitive(true).unwrap()
|
||||
.add_str(None, "*.html").unwrap()
|
||||
.build().unwrap();
|
||||
.case_insensitive(true)
|
||||
.unwrap()
|
||||
.add_str(None, "*.html")
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
assert!(gi.matched("foo.html", false).is_ignore());
|
||||
assert!(gi.matched("foo.HTML", false).is_ignore());
|
||||
assert!(!gi.matched("foo.htm", false).is_ignore());
|
||||
@@ -46,33 +46,21 @@ See the documentation for `WalkBuilder` for many other options.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
extern crate crossbeam;
|
||||
extern crate globset;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate memchr;
|
||||
extern crate regex;
|
||||
extern crate same_file;
|
||||
#[cfg(test)]
|
||||
extern crate tempdir;
|
||||
extern crate thread_local;
|
||||
extern crate walkdir;
|
||||
#[cfg(windows)]
|
||||
extern crate winapi;
|
||||
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
pub use walk::{DirEntry, Walk, WalkBuilder, WalkParallel, WalkState};
|
||||
pub use crate::walk::{
|
||||
DirEntry, ParallelVisitor, ParallelVisitorBuilder, Walk, WalkBuilder,
|
||||
WalkParallel, WalkState,
|
||||
};
|
||||
|
||||
mod default_types;
|
||||
mod dir;
|
||||
pub mod gitignore;
|
||||
mod pathutil;
|
||||
pub mod overrides;
|
||||
mod pathutil;
|
||||
pub mod types;
|
||||
mod walk;
|
||||
|
||||
@@ -132,6 +120,38 @@ pub enum Error {
|
||||
InvalidDefinition,
|
||||
}
|
||||
|
||||
impl Clone for Error {
|
||||
fn clone(&self) -> Error {
|
||||
match *self {
|
||||
Error::Partial(ref errs) => Error::Partial(errs.clone()),
|
||||
Error::WithLineNumber { line, ref err } => {
|
||||
Error::WithLineNumber { line: line, err: err.clone() }
|
||||
}
|
||||
Error::WithPath { ref path, ref err } => {
|
||||
Error::WithPath { path: path.clone(), err: err.clone() }
|
||||
}
|
||||
Error::WithDepth { depth, ref err } => {
|
||||
Error::WithDepth { depth: depth, err: err.clone() }
|
||||
}
|
||||
Error::Loop { ref ancestor, ref child } => Error::Loop {
|
||||
ancestor: ancestor.clone(),
|
||||
child: child.clone(),
|
||||
},
|
||||
Error::Io(ref err) => match err.raw_os_error() {
|
||||
Some(e) => Error::Io(io::Error::from_raw_os_error(e)),
|
||||
None => Error::Io(io::Error::new(err.kind(), err.to_string())),
|
||||
},
|
||||
Error::Glob { ref glob, ref err } => {
|
||||
Error::Glob { glob: glob.clone(), err: err.clone() }
|
||||
}
|
||||
Error::UnrecognizedFileType(ref err) => {
|
||||
Error::UnrecognizedFileType(err.clone())
|
||||
}
|
||||
Error::InvalidDefinition => Error::InvalidDefinition,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
/// Returns true if this is a partial error.
|
||||
///
|
||||
@@ -163,6 +183,71 @@ impl Error {
|
||||
}
|
||||
}
|
||||
|
||||
/// Inspect the original [`io::Error`] if there is one.
|
||||
///
|
||||
/// [`None`] is returned if the [`Error`] doesn't correspond to an
|
||||
/// [`io::Error`]. This might happen, for example, when the error was
|
||||
/// produced because a cycle was found in the directory tree while
|
||||
/// following symbolic links.
|
||||
///
|
||||
/// This method returns a borrowed value that is bound to the lifetime of the [`Error`]. To
|
||||
/// obtain an owned value, the [`into_io_error`] can be used instead.
|
||||
///
|
||||
/// > This is the original [`io::Error`] and is _not_ the same as
|
||||
/// > [`impl From<Error> for std::io::Error`][impl] which contains additional context about the
|
||||
/// error.
|
||||
///
|
||||
/// [`None`]: https://doc.rust-lang.org/stable/std/option/enum.Option.html#variant.None
|
||||
/// [`io::Error`]: https://doc.rust-lang.org/stable/std/io/struct.Error.html
|
||||
/// [`From`]: https://doc.rust-lang.org/stable/std/convert/trait.From.html
|
||||
/// [`Error`]: struct.Error.html
|
||||
/// [`into_io_error`]: struct.Error.html#method.into_io_error
|
||||
/// [impl]: struct.Error.html#impl-From%3CError%3E
|
||||
pub fn io_error(&self) -> Option<&std::io::Error> {
|
||||
match *self {
|
||||
Error::Partial(ref errs) => {
|
||||
if errs.len() == 1 {
|
||||
errs[0].io_error()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
Error::WithLineNumber { ref err, .. } => err.io_error(),
|
||||
Error::WithPath { ref err, .. } => err.io_error(),
|
||||
Error::WithDepth { ref err, .. } => err.io_error(),
|
||||
Error::Loop { .. } => None,
|
||||
Error::Io(ref err) => Some(err),
|
||||
Error::Glob { .. } => None,
|
||||
Error::UnrecognizedFileType(_) => None,
|
||||
Error::InvalidDefinition => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Similar to [`io_error`] except consumes self to convert to the original
|
||||
/// [`io::Error`] if one exists.
|
||||
///
|
||||
/// [`io_error`]: struct.Error.html#method.io_error
|
||||
/// [`io::Error`]: https://doc.rust-lang.org/stable/std/io/struct.Error.html
|
||||
pub fn into_io_error(self) -> Option<std::io::Error> {
|
||||
match self {
|
||||
Error::Partial(mut errs) => {
|
||||
if errs.len() == 1 {
|
||||
errs.remove(0).into_io_error()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
Error::WithLineNumber { err, .. } => err.into_io_error(),
|
||||
Error::WithPath { err, .. } => err.into_io_error(),
|
||||
Error::WithDepth { err, .. } => err.into_io_error(),
|
||||
Error::Loop { .. } => None,
|
||||
Error::Io(err) => Some(err),
|
||||
Error::Glob { .. } => None,
|
||||
Error::UnrecognizedFileType(_) => None,
|
||||
Error::InvalidDefinition => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a depth associated with recursively walking a directory (if
|
||||
/// this error was generated from a recursive directory iterator).
|
||||
pub fn depth(&self) -> Option<usize> {
|
||||
@@ -183,19 +268,14 @@ impl Error {
|
||||
|
||||
/// Turn an error into a tagged error with the given depth.
|
||||
fn with_depth(self, depth: usize) -> Error {
|
||||
Error::WithDepth {
|
||||
depth: depth,
|
||||
err: Box::new(self),
|
||||
}
|
||||
Error::WithDepth { depth: depth, err: Box::new(self) }
|
||||
}
|
||||
|
||||
/// Turn an error into a tagged error with the given file path and line
|
||||
/// number. If path is empty, then it is omitted from the error.
|
||||
fn tagged<P: AsRef<Path>>(self, path: P, lineno: u64) -> Error {
|
||||
let errline = Error::WithLineNumber {
|
||||
line: lineno,
|
||||
err: Box::new(self),
|
||||
};
|
||||
let errline =
|
||||
Error::WithLineNumber { line: lineno, err: Box::new(self) };
|
||||
if path.as_ref().as_os_str().is_empty() {
|
||||
return errline;
|
||||
}
|
||||
@@ -217,16 +297,14 @@ impl Error {
|
||||
let path = err.path().map(|p| p.to_path_buf());
|
||||
let mut ig_err = Error::Io(io::Error::from(err));
|
||||
if let Some(path) = path {
|
||||
ig_err = Error::WithPath {
|
||||
path: path,
|
||||
err: Box::new(ig_err),
|
||||
};
|
||||
ig_err = Error::WithPath { path: path, err: Box::new(ig_err) };
|
||||
}
|
||||
ig_err
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for Error {
|
||||
#[allow(deprecated)]
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
Error::Partial(_) => "partial error",
|
||||
@@ -243,7 +321,7 @@ impl error::Error for Error {
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
Error::Partial(ref errs) => {
|
||||
let msgs: Vec<String> =
|
||||
@@ -257,11 +335,13 @@ impl fmt::Display for Error {
|
||||
write!(f, "{}: {}", path.display(), err)
|
||||
}
|
||||
Error::WithDepth { ref err, .. } => err.fmt(f),
|
||||
Error::Loop { ref ancestor, ref child } => {
|
||||
write!(f, "File system loop found: \
|
||||
Error::Loop { ref ancestor, ref child } => write!(
|
||||
f,
|
||||
"File system loop found: \
|
||||
{} points to an ancestor {}",
|
||||
child.display(), ancestor.display())
|
||||
}
|
||||
child.display(),
|
||||
ancestor.display()
|
||||
),
|
||||
Error::Io(ref err) => err.fmt(f),
|
||||
Error::Glob { glob: None, ref err } => write!(f, "{}", err),
|
||||
Error::Glob { glob: Some(ref glob), ref err } => {
|
||||
@@ -270,10 +350,11 @@ impl fmt::Display for Error {
|
||||
Error::UnrecognizedFileType(ref ty) => {
|
||||
write!(f, "unrecognized file type: {}", ty)
|
||||
}
|
||||
Error::InvalidDefinition => {
|
||||
write!(f, "invalid definition (format is type:glob, e.g., \
|
||||
html:*.html)")
|
||||
}
|
||||
Error::InvalidDefinition => write!(
|
||||
f,
|
||||
"invalid definition (format is type:glob, e.g., \
|
||||
html:*.html)"
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -404,3 +485,66 @@ impl<T> Match<T> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::env;
|
||||
use std::error;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::result;
|
||||
|
||||
/// A convenient result type alias.
|
||||
pub type Result<T> =
|
||||
result::Result<T, Box<dyn error::Error + Send + Sync>>;
|
||||
|
||||
macro_rules! err {
|
||||
($($tt:tt)*) => {
|
||||
Box::<dyn error::Error + Send + Sync>::from(format!($($tt)*))
|
||||
}
|
||||
}
|
||||
|
||||
/// A simple wrapper for creating a temporary directory that is
|
||||
/// automatically deleted when it's dropped.
|
||||
///
|
||||
/// We use this in lieu of tempfile because tempfile brings in too many
|
||||
/// dependencies.
|
||||
#[derive(Debug)]
|
||||
pub struct TempDir(PathBuf);
|
||||
|
||||
impl Drop for TempDir {
|
||||
fn drop(&mut self) {
|
||||
fs::remove_dir_all(&self.0).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl TempDir {
|
||||
/// Create a new empty temporary directory under the system's configured
|
||||
/// temporary directory.
|
||||
pub fn new() -> Result<TempDir> {
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
static TRIES: usize = 100;
|
||||
static COUNTER: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
let tmpdir = env::temp_dir();
|
||||
for _ in 0..TRIES {
|
||||
let count = COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
let path = tmpdir.join("rust-ignore").join(count.to_string());
|
||||
if path.is_dir() {
|
||||
continue;
|
||||
}
|
||||
fs::create_dir_all(&path).map_err(|e| {
|
||||
err!("failed to create {}: {}", path.display(), e)
|
||||
})?;
|
||||
return Ok(TempDir(path));
|
||||
}
|
||||
Err(err!("failed to create temp dir after {} tries", TRIES))
|
||||
}
|
||||
|
||||
/// Return the underlying path to this temporary directory.
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,8 +6,8 @@ line tools.
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use gitignore::{self, Gitignore, GitignoreBuilder};
|
||||
use {Error, Match};
|
||||
use crate::gitignore::{self, Gitignore, GitignoreBuilder};
|
||||
use crate::{Error, Match};
|
||||
|
||||
/// Glob represents a single glob in an override matcher.
|
||||
///
|
||||
@@ -115,9 +115,7 @@ impl OverrideBuilder {
|
||||
///
|
||||
/// Matching is done relative to the directory path provided.
|
||||
pub fn new<P: AsRef<Path>>(path: P) -> OverrideBuilder {
|
||||
OverrideBuilder {
|
||||
builder: GitignoreBuilder::new(path),
|
||||
}
|
||||
OverrideBuilder { builder: GitignoreBuilder::new(path) }
|
||||
}
|
||||
|
||||
/// Builds a new override matcher from the globs added so far.
|
||||
@@ -140,10 +138,15 @@ impl OverrideBuilder {
|
||||
|
||||
/// Toggle whether the globs should be matched case insensitively or not.
|
||||
///
|
||||
/// When this option is changed, only globs added after the change will be affected.
|
||||
///
|
||||
/// This is disabled by default.
|
||||
pub fn case_insensitive(
|
||||
&mut self, yes: bool
|
||||
&mut self,
|
||||
yes: bool,
|
||||
) -> Result<&mut OverrideBuilder, Error> {
|
||||
// TODO: This should not return a `Result`. Fix this in the next semver
|
||||
// release.
|
||||
self.builder.case_insensitive(yes)?;
|
||||
Ok(self)
|
||||
}
|
||||
@@ -235,9 +238,12 @@ mod tests {
|
||||
#[test]
|
||||
fn case_insensitive() {
|
||||
let ov = OverrideBuilder::new(ROOT)
|
||||
.case_insensitive(true).unwrap()
|
||||
.add("*.html").unwrap()
|
||||
.build().unwrap();
|
||||
.case_insensitive(true)
|
||||
.unwrap()
|
||||
.add("*.html")
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
assert!(ov.matched("foo.html", false).is_whitelist());
|
||||
assert!(ov.matched("foo.HTML", false).is_whitelist());
|
||||
assert!(ov.matched("foo.htm", false).is_ignore());
|
||||
@@ -246,9 +252,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn default_case_sensitive() {
|
||||
let ov = OverrideBuilder::new(ROOT)
|
||||
.add("*.html").unwrap()
|
||||
.build().unwrap();
|
||||
let ov =
|
||||
OverrideBuilder::new(ROOT).add("*.html").unwrap().build().unwrap();
|
||||
assert!(ov.matched("foo.html", false).is_whitelist());
|
||||
assert!(ov.matched("foo.HTML", false).is_ignore());
|
||||
assert!(ov.matched("foo.htm", false).is_ignore());
|
||||
@@ -1,22 +1,56 @@
|
||||
use std::ffi::OsStr;
|
||||
use std::path::Path;
|
||||
|
||||
/// Returns true if and only if this file path is considered to be hidden.
|
||||
use crate::walk::DirEntry;
|
||||
|
||||
/// Returns true if and only if this entry is considered to be hidden.
|
||||
///
|
||||
/// This only returns true if the base name of the path starts with a `.`.
|
||||
///
|
||||
/// On Unix, this implements a more optimized check.
|
||||
#[cfg(unix)]
|
||||
pub fn is_hidden<P: AsRef<Path>>(path: P) -> bool {
|
||||
pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
if let Some(name) = file_name(path.as_ref()) {
|
||||
if let Some(name) = file_name(dent.path()) {
|
||||
name.as_bytes().get(0) == Some(&b'.')
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if this file path is considered to be hidden.
|
||||
#[cfg(not(unix))]
|
||||
pub fn is_hidden<P: AsRef<Path>>(path: P) -> bool {
|
||||
if let Some(name) = file_name(path.as_ref()) {
|
||||
/// Returns true if and only if this entry is considered to be hidden.
|
||||
///
|
||||
/// On Windows, this returns true if one of the following is true:
|
||||
///
|
||||
/// * The base name of the path starts with a `.`.
|
||||
/// * The file attributes have the `HIDDEN` property set.
|
||||
#[cfg(windows)]
|
||||
pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
use std::os::windows::fs::MetadataExt;
|
||||
use winapi_util::file;
|
||||
|
||||
// This looks like we're doing an extra stat call, but on Windows, the
|
||||
// directory traverser reuses the metadata retrieved from each directory
|
||||
// entry and stores it on the DirEntry itself. So this is "free."
|
||||
if let Ok(md) = dent.metadata() {
|
||||
if file::is_hidden(md.file_attributes() as u64) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if let Some(name) = file_name(dent.path()) {
|
||||
name.to_str().map(|s| s.starts_with(".")).unwrap_or(false)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if and only if this entry is considered to be hidden.
|
||||
///
|
||||
/// This only returns true if the base name of the path starts with a `.`.
|
||||
#[cfg(not(any(unix, windows)))]
|
||||
pub fn is_hidden(dent: &DirEntry) -> bool {
|
||||
if let Some(name) = file_name(dent.path()) {
|
||||
name.to_str().map(|s| s.starts_with(".")).unwrap_or(false)
|
||||
} else {
|
||||
false
|
||||
@@ -57,8 +91,8 @@ pub fn strip_prefix<'a, P: AsRef<Path> + ?Sized>(
|
||||
/// the empty string.
|
||||
#[cfg(unix)]
|
||||
pub fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use memchr::memchr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
let path = path.as_ref().as_os_str().as_bytes();
|
||||
memchr(b'/', path).is_none()
|
||||
@@ -79,8 +113,8 @@ pub fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
|
||||
pub fn file_name<'a, P: AsRef<Path> + ?Sized>(
|
||||
path: &'a P,
|
||||
) -> Option<&'a OsStr> {
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use memchr::memrchr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
let path = path.as_ref().as_os_str().as_bytes();
|
||||
if path.is_empty() {
|
||||
@@ -93,204 +93,9 @@ use globset::{GlobBuilder, GlobSet, GlobSetBuilder};
|
||||
use regex::Regex;
|
||||
use thread_local::ThreadLocal;
|
||||
|
||||
use pathutil::file_name;
|
||||
use {Error, Match};
|
||||
|
||||
const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
|
||||
("agda", &["*.agda", "*.lagda"]),
|
||||
("asciidoc", &["*.adoc", "*.asc", "*.asciidoc"]),
|
||||
("asm", &["*.asm", "*.s", "*.S"]),
|
||||
("avro", &["*.avdl", "*.avpr", "*.avsc"]),
|
||||
("awk", &["*.awk"]),
|
||||
("bitbake", &["*.bb", "*.bbappend", "*.bbclass", "*.conf", "*.inc"]),
|
||||
("bzip2", &["*.bz2"]),
|
||||
("c", &["*.c", "*.h", "*.H"]),
|
||||
("cabal", &["*.cabal"]),
|
||||
("cbor", &["*.cbor"]),
|
||||
("ceylon", &["*.ceylon"]),
|
||||
("clojure", &["*.clj", "*.cljc", "*.cljs", "*.cljx"]),
|
||||
("cmake", &["*.cmake", "CMakeLists.txt"]),
|
||||
("coffeescript", &["*.coffee"]),
|
||||
("creole", &["*.creole"]),
|
||||
("config", &["*.cfg", "*.conf", "*.config", "*.ini"]),
|
||||
("cpp", &[
|
||||
"*.C", "*.cc", "*.cpp", "*.cxx",
|
||||
"*.h", "*.H", "*.hh", "*.hpp", "*.hxx", "*.inl",
|
||||
]),
|
||||
("crystal", &["Projectfile", "*.cr"]),
|
||||
("cs", &["*.cs"]),
|
||||
("csharp", &["*.cs"]),
|
||||
("cshtml", &["*.cshtml"]),
|
||||
("css", &["*.css", "*.scss"]),
|
||||
("cython", &["*.pyx"]),
|
||||
("dart", &["*.dart"]),
|
||||
("d", &["*.d"]),
|
||||
("docker", &["*Dockerfile*"]),
|
||||
("elisp", &["*.el"]),
|
||||
("elixir", &["*.ex", "*.eex", "*.exs"]),
|
||||
("elm", &["*.elm"]),
|
||||
("erlang", &["*.erl", "*.hrl"]),
|
||||
("fish", &["*.fish"]),
|
||||
("fortran", &[
|
||||
"*.f", "*.F", "*.f77", "*.F77", "*.pfo",
|
||||
"*.f90", "*.F90", "*.f95", "*.F95",
|
||||
]),
|
||||
("fsharp", &["*.fs", "*.fsx", "*.fsi"]),
|
||||
("gn", &["*.gn", "*.gni"]),
|
||||
("go", &["*.go"]),
|
||||
("gzip", &["*.gz"]),
|
||||
("groovy", &["*.groovy", "*.gradle"]),
|
||||
("h", &["*.h", "*.hpp"]),
|
||||
("hbs", &["*.hbs"]),
|
||||
("haskell", &["*.hs", "*.lhs"]),
|
||||
("html", &["*.htm", "*.html", "*.ejs"]),
|
||||
("java", &["*.java"]),
|
||||
("jinja", &["*.j2", "*.jinja", "*.jinja2"]),
|
||||
("js", &[
|
||||
"*.js", "*.jsx", "*.vue",
|
||||
]),
|
||||
("json", &["*.json", "composer.lock"]),
|
||||
("jsonl", &["*.jsonl"]),
|
||||
("julia", &["*.jl"]),
|
||||
("jupyter", &["*.ipynb", "*.jpynb"]),
|
||||
("jl", &["*.jl"]),
|
||||
("kotlin", &["*.kt", "*.kts"]),
|
||||
("less", &["*.less"]),
|
||||
("license", &[
|
||||
// General
|
||||
"COPYING", "COPYING[.-]*",
|
||||
"COPYRIGHT", "COPYRIGHT[.-]*",
|
||||
"EULA", "EULA[.-]*",
|
||||
"licen[cs]e", "licen[cs]e.*",
|
||||
"LICEN[CS]E", "LICEN[CS]E[.-]*", "*[.-]LICEN[CS]E*",
|
||||
"NOTICE", "NOTICE[.-]*",
|
||||
"PATENTS", "PATENTS[.-]*",
|
||||
"UNLICEN[CS]E", "UNLICEN[CS]E[.-]*",
|
||||
// GPL (gpl.txt, etc.)
|
||||
"agpl[.-]*",
|
||||
"gpl[.-]*",
|
||||
"lgpl[.-]*",
|
||||
// Other license-specific (APACHE-2.0.txt, etc.)
|
||||
"AGPL-*[0-9]*",
|
||||
"APACHE-*[0-9]*",
|
||||
"BSD-*[0-9]*",
|
||||
"CC-BY-*",
|
||||
"GFDL-*[0-9]*",
|
||||
"GNU-*[0-9]*",
|
||||
"GPL-*[0-9]*",
|
||||
"LGPL-*[0-9]*",
|
||||
"MIT-*[0-9]*",
|
||||
"MPL-*[0-9]*",
|
||||
"OFL-*[0-9]*",
|
||||
]),
|
||||
("lisp", &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]),
|
||||
("log", &["*.log"]),
|
||||
("lua", &["*.lua"]),
|
||||
("lzma", &["*.lzma"]),
|
||||
("m4", &["*.ac", "*.m4"]),
|
||||
("make", &[
|
||||
"gnumakefile", "Gnumakefile", "GNUmakefile",
|
||||
"makefile", "Makefile",
|
||||
"*.mk", "*.mak"
|
||||
]),
|
||||
("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
|
||||
("md", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
|
||||
("man", &["*.[0-9lnpx]", "*.[0-9][cEFMmpSx]"]),
|
||||
("matlab", &["*.m"]),
|
||||
("mk", &["mkfile"]),
|
||||
("ml", &["*.ml"]),
|
||||
("msbuild", &[
|
||||
"*.csproj", "*.fsproj", "*.vcxproj", "*.proj", "*.props", "*.targets"
|
||||
]),
|
||||
("nim", &["*.nim"]),
|
||||
("nix", &["*.nix"]),
|
||||
("objc", &["*.h", "*.m"]),
|
||||
("objcpp", &["*.h", "*.mm"]),
|
||||
("ocaml", &["*.ml", "*.mli", "*.mll", "*.mly"]),
|
||||
("org", &["*.org"]),
|
||||
("perl", &["*.perl", "*.pl", "*.PL", "*.plh", "*.plx", "*.pm", "*.t"]),
|
||||
("pdf", &["*.pdf"]),
|
||||
("php", &["*.php", "*.php3", "*.php4", "*.php5", "*.phtml"]),
|
||||
("pod", &["*.pod"]),
|
||||
("protobuf", &["*.proto"]),
|
||||
("ps", &["*.cdxml", "*.ps1", "*.ps1xml", "*.psd1", "*.psm1"]),
|
||||
("purs", &["*.purs"]),
|
||||
("py", &["*.py"]),
|
||||
("qmake", &["*.pro", "*.pri", "*.prf"]),
|
||||
("readme", &["README*", "*README"]),
|
||||
("r", &["*.R", "*.r", "*.Rmd", "*.Rnw"]),
|
||||
("rdoc", &["*.rdoc"]),
|
||||
("rst", &["*.rst"]),
|
||||
("ruby", &["Gemfile", "*.gemspec", ".irbrc", "Rakefile", "*.rb"]),
|
||||
("rust", &["*.rs"]),
|
||||
("sass", &["*.sass", "*.scss"]),
|
||||
("scala", &["*.scala"]),
|
||||
("sh", &[
|
||||
// Portable/misc. init files
|
||||
".login", ".logout", ".profile", "profile",
|
||||
// bash-specific init files
|
||||
".bash_login", "bash_login",
|
||||
".bash_logout", "bash_logout",
|
||||
".bash_profile", "bash_profile",
|
||||
".bashrc", "bashrc", "*.bashrc",
|
||||
// csh-specific init files
|
||||
".cshrc", "*.cshrc",
|
||||
// ksh-specific init files
|
||||
".kshrc", "*.kshrc",
|
||||
// tcsh-specific init files
|
||||
".tcshrc",
|
||||
// zsh-specific init files
|
||||
".zshenv", "zshenv",
|
||||
".zlogin", "zlogin",
|
||||
".zlogout", "zlogout",
|
||||
".zprofile", "zprofile",
|
||||
".zshrc", "zshrc",
|
||||
// Extensions
|
||||
"*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh", "*.zsh",
|
||||
]),
|
||||
("smarty", &["*.tpl"]),
|
||||
("sml", &["*.sml", "*.sig"]),
|
||||
("soy", &["*.soy"]),
|
||||
("spark", &["*.spark"]),
|
||||
("sql", &["*.sql", "*.psql"]),
|
||||
("stylus", &["*.styl"]),
|
||||
("sv", &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]),
|
||||
("svg", &["*.svg"]),
|
||||
("swift", &["*.swift"]),
|
||||
("swig", &["*.def", "*.i"]),
|
||||
("systemd", &[
|
||||
"*.automount", "*.conf", "*.device", "*.link", "*.mount", "*.path",
|
||||
"*.scope", "*.service", "*.slice", "*.socket", "*.swap", "*.target",
|
||||
"*.timer",
|
||||
]),
|
||||
("taskpaper", &["*.taskpaper"]),
|
||||
("tcl", &["*.tcl"]),
|
||||
("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib"]),
|
||||
("textile", &["*.textile"]),
|
||||
("tf", &["*.tf"]),
|
||||
("ts", &["*.ts", "*.tsx"]),
|
||||
("txt", &["*.txt"]),
|
||||
("toml", &["*.toml", "Cargo.lock"]),
|
||||
("twig", &["*.twig"]),
|
||||
("vala", &["*.vala"]),
|
||||
("vb", &["*.vb"]),
|
||||
("vim", &["*.vim"]),
|
||||
("vimscript", &["*.vim"]),
|
||||
("wiki", &["*.mediawiki", "*.wiki"]),
|
||||
("webidl", &["*.idl", "*.webidl", "*.widl"]),
|
||||
("xml", &["*.xml", "*.xml.dist"]),
|
||||
("xz", &["*.xz"]),
|
||||
("yacc", &["*.y"]),
|
||||
("yaml", &["*.yaml", "*.yml"]),
|
||||
("zsh", &[
|
||||
".zshenv", "zshenv",
|
||||
".zlogin", "zlogin",
|
||||
".zlogout", "zlogout",
|
||||
".zprofile", "zprofile",
|
||||
".zshrc", "zshrc",
|
||||
"*.zsh",
|
||||
]),
|
||||
];
|
||||
use crate::default_types::DEFAULT_TYPES;
|
||||
use crate::pathutil::file_name;
|
||||
use crate::{Error, Match};
|
||||
|
||||
/// Glob represents a single glob in a set of file type definitions.
|
||||
///
|
||||
@@ -317,17 +122,23 @@ enum GlobInner<'a> {
|
||||
Matched {
|
||||
/// The file type definition which provided the glob.
|
||||
def: &'a FileTypeDef,
|
||||
/// The index of the glob that matched inside the file type definition.
|
||||
which: usize,
|
||||
/// Whether the selection was negated or not.
|
||||
negated: bool,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
impl<'a> Glob<'a> {
|
||||
fn unmatched() -> Glob<'a> {
|
||||
Glob(GlobInner::UnmatchedIgnore)
|
||||
}
|
||||
|
||||
/// Return the file type definition that matched, if one exists. A file type
|
||||
/// definition always exists when a specific definition matches a file
|
||||
/// path.
|
||||
pub fn file_type_def(&self) -> Option<&FileTypeDef> {
|
||||
match self {
|
||||
Glob(GlobInner::UnmatchedIgnore) => None,
|
||||
Glob(GlobInner::Matched { def, .. }) => Some(def),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A single file type definition.
|
||||
@@ -472,17 +283,13 @@ impl Types {
|
||||
return Match::None;
|
||||
}
|
||||
};
|
||||
let mut matches = self.matches.get_default().borrow_mut();
|
||||
let mut matches = self.matches.get_or_default().borrow_mut();
|
||||
self.set.matches_into(name, &mut *matches);
|
||||
// The highest precedent match is the last one.
|
||||
if let Some(&i) = matches.last() {
|
||||
let (isel, iglob) = self.glob_to_selection[i];
|
||||
let (isel, _) = self.glob_to_selection[i];
|
||||
let sel = &self.selections[isel];
|
||||
let glob = Glob(GlobInner::Matched {
|
||||
def: sel.inner(),
|
||||
which: iglob,
|
||||
negated: sel.is_negated(),
|
||||
});
|
||||
let glob = Glob(GlobInner::Matched { def: sel.inner() });
|
||||
return if sel.is_negated() {
|
||||
Match::Ignore(glob)
|
||||
} else {
|
||||
@@ -511,10 +318,7 @@ impl TypesBuilder {
|
||||
/// of default type definitions can be added with `add_defaults`, and
|
||||
/// additional type definitions can be added with `select` and `negate`.
|
||||
pub fn new() -> TypesBuilder {
|
||||
TypesBuilder {
|
||||
types: HashMap::new(),
|
||||
selections: vec![],
|
||||
}
|
||||
TypesBuilder { types: HashMap::new(), selections: vec![] }
|
||||
}
|
||||
|
||||
/// Build the current set of file type definitions *and* selections into
|
||||
@@ -539,19 +343,18 @@ impl TypesBuilder {
|
||||
GlobBuilder::new(glob)
|
||||
.literal_separator(true)
|
||||
.build()
|
||||
.map_err(|err| {
|
||||
Error::Glob {
|
||||
.map_err(|err| Error::Glob {
|
||||
glob: Some(glob.to_string()),
|
||||
err: err.kind().to_string(),
|
||||
}
|
||||
})?);
|
||||
})?,
|
||||
);
|
||||
glob_to_selection.push((isel, iglob));
|
||||
}
|
||||
selections.push(selection.clone().map(move |_| def));
|
||||
}
|
||||
let set = build_set.build().map_err(|err| {
|
||||
Error::Glob { glob: None, err: err.to_string() }
|
||||
})?;
|
||||
let set = build_set
|
||||
.build()
|
||||
.map_err(|err| Error::Glob { glob: None, err: err.to_string() })?;
|
||||
Ok(Types {
|
||||
defs: defs,
|
||||
selections: selections,
|
||||
@@ -616,16 +419,21 @@ impl TypesBuilder {
|
||||
/// If `name` is `all` or otherwise contains any character that is not a
|
||||
/// Unicode letter or number, then an error is returned.
|
||||
pub fn add(&mut self, name: &str, glob: &str) -> Result<(), Error> {
|
||||
lazy_static! {
|
||||
lazy_static::lazy_static! {
|
||||
static ref RE: Regex = Regex::new(r"^[\pL\pN]+$").unwrap();
|
||||
};
|
||||
if name == "all" || !RE.is_match(name) {
|
||||
return Err(Error::InvalidDefinition);
|
||||
}
|
||||
let (key, glob) = (name.to_string(), glob.to_string());
|
||||
self.types.entry(key).or_insert_with(|| {
|
||||
FileTypeDef { name: name.to_string(), globs: vec![] }
|
||||
}).globs.push(glob);
|
||||
self.types
|
||||
.entry(key)
|
||||
.or_insert_with(|| FileTypeDef {
|
||||
name: name.to_string(),
|
||||
globs: vec![],
|
||||
})
|
||||
.globs
|
||||
.push(glob);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -652,7 +460,10 @@ impl TypesBuilder {
|
||||
3 => {
|
||||
let name = parts[0];
|
||||
let types_string = parts[2];
|
||||
if name.is_empty() || parts[1] != "include" || types_string.is_empty() {
|
||||
if name.is_empty()
|
||||
|| parts[1] != "include"
|
||||
|| types_string.is_empty()
|
||||
{
|
||||
return Err(Error::InvalidDefinition);
|
||||
}
|
||||
let types = types_string.split(',');
|
||||
@@ -662,14 +473,15 @@ impl TypesBuilder {
|
||||
return Err(Error::InvalidDefinition);
|
||||
}
|
||||
for type_name in types {
|
||||
let globs = self.types.get(type_name).unwrap().globs.clone();
|
||||
let globs =
|
||||
self.types.get(type_name).unwrap().globs.clone();
|
||||
for glob in globs {
|
||||
self.add(name, &glob)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
_ => Err(Error::InvalidDefinition)
|
||||
_ => Err(Error::InvalidDefinition),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -726,7 +538,7 @@ mod tests {
|
||||
"rust:*.rs",
|
||||
"js:*.js",
|
||||
"foo:*.{rs,foo}",
|
||||
"combo:include:html,rust"
|
||||
"combo:include:html,rust",
|
||||
]
|
||||
}
|
||||
|
||||
@@ -760,7 +572,7 @@ mod tests {
|
||||
"combo:include:html,python",
|
||||
// Bad format
|
||||
"combo:foobar:html,rust",
|
||||
""
|
||||
"",
|
||||
];
|
||||
for def in bad_defs {
|
||||
assert!(btypes.add_def(def).is_err());
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user